1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Helper function that does a bunch of checks for a chunk.
  58 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  59 
  60 // Given a Metachunk, update its in-use information (both in the
  61 // chunk and the occupancy map).
  62 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  63 
  64 size_t const allocation_from_dictionary_limit = 4 * K;
  65 
  66 MetaWord* last_allocated = 0;
  67 
  68 size_t Metaspace::_compressed_class_space_size;
  69 const MetaspaceTracer* Metaspace::_tracer = NULL;
  70 
  71 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  72 
  73 enum ChunkSizes {    // in words.
  74   ClassSpecializedChunk = 128,
  75   SpecializedChunk = 128,
  76   ClassSmallChunk = 256,
  77   SmallChunk = 512,
  78   ClassMediumChunk = 4 * K,
  79   MediumChunk = 8 * K
  80 };
  81 
  82 // Returns size of this chunk type.
  83 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  84   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  85   size_t size = 0;
  86   if (is_class) {
  87     switch(chunktype) {
  88       case SpecializedIndex: size = ClassSpecializedChunk; break;
  89       case SmallIndex: size = ClassSmallChunk; break;
  90       case MediumIndex: size = ClassMediumChunk; break;
  91       default:
  92         ShouldNotReachHere();
  93     }
  94   } else {
  95     switch(chunktype) {
  96       case SpecializedIndex: size = SpecializedChunk; break;
  97       case SmallIndex: size = SmallChunk; break;
  98       case MediumIndex: size = MediumChunk; break;
  99       default:
 100         ShouldNotReachHere();
 101     }
 102   }
 103   return size;
 104 }
 105 
 106 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 107   if (is_class) {
 108     if (size == ClassSpecializedChunk) {
 109       return SpecializedIndex;
 110     } else if (size == ClassSmallChunk) {
 111       return SmallIndex;
 112     } else if (size == ClassMediumChunk) {
 113       return MediumIndex;
 114     } else if (size > ClassMediumChunk) {
 115       // A valid humongous chunk size is a multiple of the smallest chunk size.
 116       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 117       return HumongousIndex;
 118     }
 119   } else {
 120     if (size == SpecializedChunk) {
 121       return SpecializedIndex;
 122     } else if (size == SmallChunk) {
 123       return SmallIndex;
 124     } else if (size == MediumChunk) {
 125       return MediumIndex;
 126     } else if (size > MediumChunk) {
 127       // A valid humongous chunk size is a multiple of the smallest chunk size.
 128       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 129       return HumongousIndex;
 130     }
 131   }
 132   ShouldNotReachHere();
 133   return (ChunkIndex)-1;
 134 }
 135 
 136 
 137 static ChunkIndex next_chunk_index(ChunkIndex i) {
 138   assert(i < NumberOfInUseLists, "Out of bound");
 139   return (ChunkIndex) (i+1);
 140 }
 141 
 142 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 143   assert(i > ZeroIndex, "Out of bound");
 144   return (ChunkIndex) (i-1);
 145 }
 146 
 147 static const char* scale_unit(size_t scale) {
 148   switch(scale) {
 149     case 1: return "BYTES";
 150     case K: return "KB";
 151     case M: return "MB";
 152     case G: return "GB";
 153     default:
 154       ShouldNotReachHere();
 155       return NULL;
 156   }
 157 }
 158 
 159 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 160 uint MetaspaceGC::_shrink_factor = 0;
 161 bool MetaspaceGC::_should_concurrent_collect = false;
 162 
 163 typedef class FreeList<Metachunk> ChunkList;
 164 
 165 // Manages the global free lists of chunks.
 166 class ChunkManager : public CHeapObj<mtInternal> {
 167   friend class TestVirtualSpaceNodeTest;
 168 
 169   // Free list of chunks of different sizes.
 170   //   SpecializedChunk
 171   //   SmallChunk
 172   //   MediumChunk
 173   ChunkList _free_chunks[NumberOfFreeLists];
 174 
 175   // Whether or not this is the class chunkmanager.
 176   const bool _is_class;
 177 
 178   // Return non-humongous chunk list by its index.
 179   ChunkList* free_chunks(ChunkIndex index);
 180 
 181   // Returns non-humongous chunk list for the given chunk word size.
 182   ChunkList* find_free_chunks_list(size_t word_size);
 183 
 184   //   HumongousChunk
 185   ChunkTreeDictionary _humongous_dictionary;
 186 
 187   // Returns the humongous chunk dictionary.
 188   ChunkTreeDictionary* humongous_dictionary() {
 189     return &_humongous_dictionary;
 190   }
 191 
 192   // Size, in metaspace words, of all chunks managed by this ChunkManager
 193   size_t _free_chunks_total;
 194   // Number of chunks in this ChunkManager
 195   size_t _free_chunks_count;
 196 
 197   // Update counters after a chunk was added or removed removed.
 198   void account_for_added_chunk(const Metachunk* c);
 199   void account_for_removed_chunk(const Metachunk* c);
 200 
 201   // Debug support
 202 
 203   size_t sum_free_chunks();
 204   size_t sum_free_chunks_count();
 205 
 206   void locked_verify_free_chunks_total();
 207   void slow_locked_verify_free_chunks_total() {
 208     if (VerifyMetaspace) {
 209       locked_verify_free_chunks_total();
 210     }
 211   }
 212   void locked_verify_free_chunks_count();
 213   void slow_locked_verify_free_chunks_count() {
 214     if (VerifyMetaspace) {
 215       locked_verify_free_chunks_count();
 216     }
 217   }
 218   void verify_free_chunks_count();
 219 
 220   // Given a pointer to a chunk, attempts to merge it with neighboring
 221   // free chunks to form a bigger chunk. Returns true if successful.
 222   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 223 
 224   // Helper for chunk merging:
 225   //  Given an address range with 1-n chunks which are all supposed to be
 226   //  free and hence currently managed by this ChunkManager, remove them
 227   //  from this ChunkManager and mark them as invalid.
 228   // - This does not correct the occupancy map.
 229   // - This does not adjust the counters in ChunkManager.
 230   // - Does not adjust container count counter in containing VirtualSpaceNode.
 231   // Returns number of chunks removed.
 232   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 233 
 234   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 235   // split up the larger chunk into n smaller chunks, at least one of which should be
 236   // the target chunk of target chunk size. The smaller chunks, including the target
 237   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 238   // Note that this chunk is supposed to be removed from the freelist right away.
 239   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 240 
 241  public:
 242 
 243   struct ChunkManagerStatistics {
 244     size_t num_by_type[NumberOfFreeLists];
 245     size_t single_size_by_type[NumberOfFreeLists];
 246     size_t total_size_by_type[NumberOfFreeLists];
 247     size_t num_humongous_chunks;
 248     size_t total_size_humongous_chunks;
 249   };
 250 
 251   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 252   void get_statistics(ChunkManagerStatistics* stat) const;
 253   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 254 
 255 
 256   ChunkManager(bool is_class)
 257       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 258     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 259     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 260     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 261   }
 262 
 263   // Add or delete (return) a chunk to the global freelist.
 264   Metachunk* chunk_freelist_allocate(size_t word_size);
 265 
 266   // Map a size to a list index assuming that there are lists
 267   // for special, small, medium, and humongous chunks.
 268   ChunkIndex list_index(size_t size);
 269 
 270   // Map a given index to the chunk size.
 271   size_t size_by_index(ChunkIndex index) const;
 272 
 273   bool is_class() const { return _is_class; }
 274 
 275   // Convenience accessors.
 276   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 277   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 278   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 279 
 280   // Take a chunk from the ChunkManager. The chunk is expected to be in
 281   // the chunk manager (the freelist if non-humongous, the dictionary if
 282   // humongous).
 283   void remove_chunk(Metachunk* chunk);
 284 
 285   // Return a single chunk of type index to the ChunkManager.
 286   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 287 
 288   // Add the simple linked list of chunks to the freelist of chunks
 289   // of type index.
 290   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 291 
 292   // Total of the space in the free chunks list
 293   size_t free_chunks_total_words();
 294   size_t free_chunks_total_bytes();
 295 
 296   // Number of chunks in the free chunks list
 297   size_t free_chunks_count();
 298 
 299   // Remove from a list by size.  Selects list based on size of chunk.
 300   Metachunk* free_chunks_get(size_t chunk_word_size);
 301 
 302 #define index_bounds_check(index)                                         \
 303   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 304 
 305   size_t num_free_chunks(ChunkIndex index) const {
 306     index_bounds_check(index);
 307 
 308     if (index == HumongousIndex) {
 309       return _humongous_dictionary.total_free_blocks();
 310     }
 311 
 312     ssize_t count = _free_chunks[index].count();
 313     return count == -1 ? 0 : (size_t) count;
 314   }
 315 
 316   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 317     index_bounds_check(index);
 318 
 319     size_t word_size = 0;
 320     if (index == HumongousIndex) {
 321       word_size = _humongous_dictionary.total_size();
 322     } else {
 323       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 324       word_size = size_per_chunk_in_words * num_free_chunks(index);
 325     }
 326 
 327     return word_size * BytesPerWord;
 328   }
 329 
 330   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 331     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 332                                          num_free_chunks(SmallIndex),
 333                                          num_free_chunks(MediumIndex),
 334                                          num_free_chunks(HumongousIndex),
 335                                          size_free_chunks_in_bytes(SpecializedIndex),
 336                                          size_free_chunks_in_bytes(SmallIndex),
 337                                          size_free_chunks_in_bytes(MediumIndex),
 338                                          size_free_chunks_in_bytes(HumongousIndex));
 339   }
 340 
 341   // Debug support
 342   void verify();
 343   void slow_verify() {
 344     if (VerifyMetaspace) {
 345       verify();
 346     }
 347   }
 348   void locked_verify();
 349   void slow_locked_verify() {
 350     if (VerifyMetaspace) {
 351       locked_verify();
 352     }
 353   }
 354   void verify_free_chunks_total();
 355 
 356   void locked_print_free_chunks(outputStream* st);
 357   void locked_print_sum_free_chunks(outputStream* st);
 358 
 359   void print_on(outputStream* st) const;
 360 
 361   // Prints composition for both non-class and (if available)
 362   // class chunk manager.
 363   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 364 };
 365 
 366 class SmallBlocks : public CHeapObj<mtClass> {
 367   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 368   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 369 
 370  private:
 371   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 372 
 373   FreeList<Metablock>& list_at(size_t word_size) {
 374     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 375     return _small_lists[word_size - _small_block_min_size];
 376   }
 377 
 378  public:
 379   SmallBlocks() {
 380     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 381       uint k = i - _small_block_min_size;
 382       _small_lists[k].set_size(i);
 383     }
 384   }
 385 
 386   size_t total_size() const {
 387     size_t result = 0;
 388     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 389       uint k = i - _small_block_min_size;
 390       result = result + _small_lists[k].count() * _small_lists[k].size();
 391     }
 392     return result;
 393   }
 394 
 395   static uint small_block_max_size() { return _small_block_max_size; }
 396   static uint small_block_min_size() { return _small_block_min_size; }
 397 
 398   MetaWord* get_block(size_t word_size) {
 399     if (list_at(word_size).count() > 0) {
 400       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 401       return new_block;
 402     } else {
 403       return NULL;
 404     }
 405   }
 406   void return_block(Metablock* free_chunk, size_t word_size) {
 407     list_at(word_size).return_chunk_at_head(free_chunk, false);
 408     assert(list_at(word_size).count() > 0, "Should have a chunk");
 409   }
 410 
 411   void print_on(outputStream* st) const {
 412     st->print_cr("SmallBlocks:");
 413     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 414       uint k = i - _small_block_min_size;
 415       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 416     }
 417   }
 418 };
 419 
 420 // Used to manage the free list of Metablocks (a block corresponds
 421 // to the allocation of a quantum of metadata).
 422 class BlockFreelist : public CHeapObj<mtClass> {
 423   BlockTreeDictionary* const _dictionary;
 424   SmallBlocks* _small_blocks;
 425 
 426   // Only allocate and split from freelist if the size of the allocation
 427   // is at least 1/4th the size of the available block.
 428   const static int WasteMultiplier = 4;
 429 
 430   // Accessors
 431   BlockTreeDictionary* dictionary() const { return _dictionary; }
 432   SmallBlocks* small_blocks() {
 433     if (_small_blocks == NULL) {
 434       _small_blocks = new SmallBlocks();
 435     }
 436     return _small_blocks;
 437   }
 438 
 439  public:
 440   BlockFreelist();
 441   ~BlockFreelist();
 442 
 443   // Get and return a block to the free list
 444   MetaWord* get_block(size_t word_size);
 445   void return_block(MetaWord* p, size_t word_size);
 446 
 447   size_t total_size() const  {
 448     size_t result = dictionary()->total_size();
 449     if (_small_blocks != NULL) {
 450       result = result + _small_blocks->total_size();
 451     }
 452     return result;
 453   }
 454 
 455   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 456   void print_on(outputStream* st) const;
 457 };
 458 
 459 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 460 template <typename T> struct all_ones  { static const T value; };
 461 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 462 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 463 
 464 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 465 // keeps information about
 466 // - where a chunk starts
 467 // - whether a chunk is in-use or free
 468 // A bit in this bitmap represents one range of memory in the smallest
 469 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 470 class OccupancyMap : public CHeapObj<mtInternal> {
 471 
 472   // The address range this map covers.
 473   const MetaWord* const _reference_address;
 474   const size_t _word_size;
 475 
 476   // The word size of a specialized chunk, aka the number of words one
 477   // bit in this map represents.
 478   const size_t _smallest_chunk_word_size;
 479 
 480   // map data
 481   // Data are organized in two bit layers:
 482   // The first layer is the chunk-start-map. Here, a bit is set to mark
 483   // the corresponding region as the head of a chunk.
 484   // The second layer is the in-use-map. Here, a set bit indicates that
 485   // the corresponding belongs to a chunk which is in use.
 486   uint8_t* _map[2];
 487 
 488   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 489 
 490   // length, in bytes, of bitmap data
 491   size_t _map_size;
 492 
 493   // Returns true if bit at position pos at bit-layer layer is set.
 494   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 495     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 496     const unsigned byteoffset = pos / 8;
 497     assert(byteoffset < _map_size,
 498            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 499     const unsigned mask = 1 << (pos % 8);
 500     return (_map[layer][byteoffset] & mask) > 0;
 501   }
 502 
 503   // Changes bit at position pos at bit-layer layer to value v.
 504   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 505     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 506     const unsigned byteoffset = pos / 8;
 507     assert(byteoffset < _map_size,
 508            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 509     const unsigned mask = 1 << (pos % 8);
 510     if (v) {
 511       _map[layer][byteoffset] |= mask;
 512     } else {
 513       _map[layer][byteoffset] &= ~mask;
 514     }
 515   }
 516 
 517   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 518   // pos is 32/64 aligned and num_bits is 32/64.
 519   // This is the typical case when coalescing to medium chunks, whose size is
 520   // 32 or 64 times the specialized chunk size (depending on class or non class
 521   // case), so they occupy 64 bits which should be 64bit aligned, because
 522   // chunks are chunk-size aligned.
 523   template <typename T>
 524   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 525     assert(_map_size > 0, "not initialized");
 526     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 527     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 528     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 529     const size_t byteoffset = pos / 8;
 530     assert(byteoffset <= (_map_size - sizeof(T)),
 531            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 532     const T w = *(T*)(_map[layer] + byteoffset);
 533     return w > 0 ? true : false;
 534   }
 535 
 536   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 537   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 538     if (pos % 32 == 0 && num_bits == 32) {
 539       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 540     } else if (pos % 64 == 0 && num_bits == 64) {
 541       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 542     } else {
 543       for (unsigned n = 0; n < num_bits; n ++) {
 544         if (get_bit_at_position(pos + n, layer)) {
 545           return true;
 546         }
 547       }
 548     }
 549     return false;
 550   }
 551 
 552   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 553   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 554     assert(word_size % _smallest_chunk_word_size == 0,
 555         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 556     const unsigned pos = get_bitpos_for_address(p);
 557     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 558     return is_any_bit_set_in_region(pos, num_bits, layer);
 559   }
 560 
 561   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 562   // pos is 32/64 aligned and num_bits is 32/64.
 563   // This is the typical case when coalescing to medium chunks, whose size
 564   // is 32 or 64 times the specialized chunk size (depending on class or non
 565   // class case), so they occupy 64 bits which should be 64bit aligned,
 566   // because chunks are chunk-size aligned.
 567   template <typename T>
 568   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 569     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 570            (unsigned)(sizeof(T) * 8), pos);
 571     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 572            num_bits, (unsigned)(sizeof(T) * 8));
 573     const size_t byteoffset = pos / 8;
 574     assert(byteoffset <= (_map_size - sizeof(T)),
 575            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 576     T* const pw = (T*)(_map[layer] + byteoffset);
 577     *pw = v ? all_ones<T>::value : (T) 0;
 578   }
 579 
 580   // Set all bits in a region starting at pos to a value.
 581   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 582     assert(_map_size > 0, "not initialized");
 583     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 584     if (pos % 32 == 0 && num_bits == 32) {
 585       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 586     } else if (pos % 64 == 0 && num_bits == 64) {
 587       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 588     } else {
 589       for (unsigned n = 0; n < num_bits; n ++) {
 590         set_bit_at_position(pos + n, layer, v);
 591       }
 592     }
 593   }
 594 
 595   // Helper: sets all bits in a region [p, p+word_size).
 596   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 597     assert(word_size % _smallest_chunk_word_size == 0,
 598         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 599     const unsigned pos = get_bitpos_for_address(p);
 600     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 601     set_bits_of_region(pos, num_bits, layer, v);
 602   }
 603 
 604   // Helper: given an address, return the bit position representing that address.
 605   unsigned get_bitpos_for_address(const MetaWord* p) const {
 606     assert(_reference_address != NULL, "not initialized");
 607     assert(p >= _reference_address && p < _reference_address + _word_size,
 608            "Address %p out of range for occupancy map [%p..%p).",
 609             p, _reference_address, _reference_address + _word_size);
 610     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 611            "Address not aligned (%p).", p);
 612     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 613     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 614     return (unsigned) d;
 615   }
 616 
 617  public:
 618 
 619   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 620     _reference_address(reference_address), _word_size(word_size),
 621     _smallest_chunk_word_size(smallest_chunk_word_size) {
 622     assert(reference_address != NULL, "invalid reference address");
 623     assert(is_aligned(reference_address, smallest_chunk_word_size),
 624            "Reference address not aligned to smallest chunk size.");
 625     assert(is_aligned(word_size, smallest_chunk_word_size),
 626            "Word_size shall be a multiple of the smallest chunk size.");
 627     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 628     size_t num_bits = word_size / smallest_chunk_word_size;
 629     _map_size = (num_bits + 7) / 8;
 630     assert(_map_size * 8 >= num_bits, "sanity");
 631     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 632     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 633     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 634     memset(_map[1], 0, _map_size);
 635     memset(_map[0], 0, _map_size);
 636     // Sanity test: the first respectively last possible chunk start address in
 637     // the covered range shall map to the first and last bit in the bitmap.
 638     assert(get_bitpos_for_address(reference_address) == 0,
 639       "First chunk address in range must map to fist bit in bitmap.");
 640     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 641       "Last chunk address in range must map to last bit in bitmap.");
 642   }
 643 
 644   ~OccupancyMap() {
 645     os::free(_map[0]);
 646     os::free(_map[1]);
 647   }
 648 
 649   // Returns true if at address x a chunk is starting.
 650   bool chunk_starts_at_address(MetaWord* p) const {
 651     const unsigned pos = get_bitpos_for_address(p);
 652     return get_bit_at_position(pos, layer_chunk_start_map);
 653   }
 654 
 655   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 656     const unsigned pos = get_bitpos_for_address(p);
 657     set_bit_at_position(pos, layer_chunk_start_map, v);
 658   }
 659 
 660   // Removes all chunk-start-bits inside a region, typically as a
 661   // result of a chunk merge.
 662   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 663     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 664   }
 665 
 666   // Returns true if there are life (in use) chunks in the region limited
 667   // by [p, p+word_size).
 668   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 669     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 670   }
 671 
 672   // Marks the region starting at p with the size word_size as in use
 673   // or free, depending on v.
 674   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 675     set_bits_of_region(p, word_size, layer_in_use_map, v);
 676   }
 677 
 678 #ifdef ASSERT
 679   // Verify occupancy map for the address range [from, to).
 680   // We need to tell it the address range, because the memory the
 681   // occupancy map is covering may not be fully comitted yet.
 682   void verify(MetaWord* from, MetaWord* to) {
 683     Metachunk* chunk = NULL;
 684     int nth_bit_for_chunk = 0;
 685     MetaWord* chunk_end = NULL;
 686     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 687       const unsigned pos = get_bitpos_for_address(p);
 688       // Check the chunk-starts-info:
 689       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 690         // Chunk start marked in bitmap.
 691         chunk = (Metachunk*) p;
 692         if (chunk_end != NULL) {
 693           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 694                  "the next chunk to start at %p).", p, chunk_end);
 695         }
 696         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 697         if (chunk->get_chunk_type() != HumongousIndex) {
 698           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 699         }
 700         chunk_end = p + chunk->word_size();
 701         nth_bit_for_chunk = 0;
 702         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 703       } else {
 704         // No chunk start marked in bitmap.
 705         assert(chunk != NULL, "Chunk should start at start of address range.");
 706         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 707         nth_bit_for_chunk ++;
 708       }
 709       // Check the in-use-info:
 710       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 711       if (in_use_bit) {
 712         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 713                chunk, nth_bit_for_chunk);
 714       } else {
 715         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 716                chunk, nth_bit_for_chunk);
 717       }
 718     }
 719   }
 720 
 721   // Verify that a given chunk is correctly accounted for in the bitmap.
 722   void verify_for_chunk(Metachunk* chunk) {
 723     assert(chunk_starts_at_address((MetaWord*) chunk),
 724            "No chunk start marked in map for chunk %p.", chunk);
 725     // For chunks larger than the minimal chunk size, no other chunk
 726     // must start in its area.
 727     if (chunk->word_size() > _smallest_chunk_word_size) {
 728       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 729                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 730              "No chunk must start within another chunk.");
 731     }
 732     if (!chunk->is_tagged_free()) {
 733       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 734              "Chunk %p is in use but marked as free in map (%d %d).",
 735              chunk, chunk->get_chunk_type(), chunk->get_origin());
 736     } else {
 737       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 738              "Chunk %p is free but marked as in-use in map (%d %d).",
 739              chunk, chunk->get_chunk_type(), chunk->get_origin());
 740     }
 741   }
 742 
 743 #endif // ASSERT
 744 
 745 };
 746 
 747 // A VirtualSpaceList node.
 748 class VirtualSpaceNode : public CHeapObj<mtClass> {
 749   friend class VirtualSpaceList;
 750 
 751   // Link to next VirtualSpaceNode
 752   VirtualSpaceNode* _next;
 753 
 754   // Whether this node is contained in class or metaspace.
 755   const bool _is_class;
 756 
 757   // total in the VirtualSpace
 758   MemRegion _reserved;
 759   ReservedSpace _rs;
 760   VirtualSpace _virtual_space;
 761   MetaWord* _top;
 762   // count of chunks contained in this VirtualSpace
 763   uintx _container_count;
 764 
 765   OccupancyMap* _occupancy_map;
 766 
 767   // Convenience functions to access the _virtual_space
 768   char* low()  const { return virtual_space()->low(); }
 769   char* high() const { return virtual_space()->high(); }
 770 
 771   // The first Metachunk will be allocated at the bottom of the
 772   // VirtualSpace
 773   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 774 
 775   // Committed but unused space in the virtual space
 776   size_t free_words_in_vs() const;
 777 
 778   // True if this node belongs to class metaspace.
 779   bool is_class() const { return _is_class; }
 780 
 781   // Helper function for take_from_committed: allocate padding chunks
 782   // until top is at the given address.
 783   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 784 
 785  public:
 786 
 787   VirtualSpaceNode(bool is_class, size_t byte_size);
 788   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 789     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 790   ~VirtualSpaceNode();
 791 
 792   // Convenience functions for logical bottom and end
 793   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 794   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 795 
 796   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 797   OccupancyMap* occupancy_map() { return _occupancy_map; }
 798 
 799   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 800 
 801   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 802   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 803 
 804   bool is_pre_committed() const { return _virtual_space.special(); }
 805 
 806   // address of next available space in _virtual_space;
 807   // Accessors
 808   VirtualSpaceNode* next() { return _next; }
 809   void set_next(VirtualSpaceNode* v) { _next = v; }
 810 
 811   void set_reserved(MemRegion const v) { _reserved = v; }
 812   void set_top(MetaWord* v) { _top = v; }
 813 
 814   // Accessors
 815   MemRegion* reserved() { return &_reserved; }
 816   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 817 
 818   // Returns true if "word_size" is available in the VirtualSpace
 819   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 820 
 821   MetaWord* top() const { return _top; }
 822   void inc_top(size_t word_size) { _top += word_size; }
 823 
 824   uintx container_count() { return _container_count; }
 825   void inc_container_count();
 826   void dec_container_count();
 827 #ifdef ASSERT
 828   uintx container_count_slow();
 829   void verify_container_count();
 830 #endif
 831 
 832   // used and capacity in this single entry in the list
 833   size_t used_words_in_vs() const;
 834   size_t capacity_words_in_vs() const;
 835 
 836   bool initialize();
 837 
 838   // get space from the virtual space
 839   Metachunk* take_from_committed(size_t chunk_word_size);
 840 
 841   // Allocate a chunk from the virtual space and return it.
 842   Metachunk* get_chunk_vs(size_t chunk_word_size);
 843 
 844   // Expands/shrinks the committed space in a virtual space.  Delegates
 845   // to Virtualspace
 846   bool expand_by(size_t min_words, size_t preferred_words);
 847 
 848   // In preparation for deleting this node, remove all the chunks
 849   // in the node from any freelist.
 850   void purge(ChunkManager* chunk_manager);
 851 
 852   // If an allocation doesn't fit in the current node a new node is created.
 853   // Allocate chunks out of the remaining committed space in this node
 854   // to avoid wasting that memory.
 855   // This always adds up because all the chunk sizes are multiples of
 856   // the smallest chunk size.
 857   void retire(ChunkManager* chunk_manager);
 858 
 859 
 860   void print_on(outputStream* st) const;
 861   void print_map(outputStream* st, bool is_class) const;
 862 
 863   // Debug support
 864   DEBUG_ONLY(void mangle();)
 865   // Verify counters, all chunks in this list node and the occupancy map.
 866   DEBUG_ONLY(void verify();)
 867   // Verify that all free chunks in this node are ideally merged
 868   // (there not should be multiple small chunks where a large chunk could exist.)
 869   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 870 
 871 };
 872 
 873 #define assert_is_aligned(value, alignment)                  \
 874   assert(is_aligned((value), (alignment)),                   \
 875          SIZE_FORMAT_HEX " is not aligned to "               \
 876          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 877 
 878 // Decide if large pages should be committed when the memory is reserved.
 879 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 880   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 881     size_t words = bytes / BytesPerWord;
 882     bool is_class = false; // We never reserve large pages for the class space.
 883     if (MetaspaceGC::can_expand(words, is_class) &&
 884         MetaspaceGC::allowed_expansion() >= words) {
 885       return true;
 886     }
 887   }
 888 
 889   return false;
 890 }
 891 
 892   // byte_size is the size of the associated virtualspace.
 893 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 894   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 895   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 896   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 897   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 898 
 899   if (_rs.is_reserved()) {
 900     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 901     assert(_rs.size() != 0, "Catch if we get a 0 size");
 902     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 903     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 904 
 905     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 906   }
 907 }
 908 
 909 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 910   DEBUG_ONLY(this->verify();)
 911   Metachunk* chunk = first_chunk();
 912   Metachunk* invalid_chunk = (Metachunk*) top();
 913   while (chunk < invalid_chunk ) {
 914     assert(chunk->is_tagged_free(), "Should be tagged free");
 915     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 916     chunk_manager->remove_chunk(chunk);
 917     chunk->remove_sentinel();
 918     assert(chunk->next() == NULL &&
 919            chunk->prev() == NULL,
 920            "Was not removed from its list");
 921     chunk = (Metachunk*) next;
 922   }
 923 }
 924 
 925 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 926 
 927   if (bottom() == top()) {
 928     return;
 929   }
 930 
 931   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 932   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 933   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 934 
 935   int line_len = 100;
 936   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 937   line_len = (int)(section_len / spec_chunk_size);
 938 
 939   static const int NUM_LINES = 4;
 940 
 941   char* lines[NUM_LINES];
 942   for (int i = 0; i < NUM_LINES; i ++) {
 943     lines[i] = (char*)os::malloc(line_len, mtInternal);
 944   }
 945   int pos = 0;
 946   const MetaWord* p = bottom();
 947   const Metachunk* chunk = (const Metachunk*)p;
 948   const MetaWord* chunk_end = p + chunk->word_size();
 949   while (p < top()) {
 950     if (pos == line_len) {
 951       pos = 0;
 952       for (int i = 0; i < NUM_LINES; i ++) {
 953         st->fill_to(22);
 954         st->print_raw(lines[i], line_len);
 955         st->cr();
 956       }
 957     }
 958     if (pos == 0) {
 959       st->print(PTR_FORMAT ":", p2i(p));
 960     }
 961     if (p == chunk_end) {
 962       chunk = (Metachunk*)p;
 963       chunk_end = p + chunk->word_size();
 964     }
 965     // line 1: chunk starting points (a dot if that area is a chunk start).
 966     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 967 
 968     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 969     // chunk is in use.
 970     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 971     if (chunk->word_size() == spec_chunk_size) {
 972       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 973     } else if (chunk->word_size() == small_chunk_size) {
 974       lines[1][pos] = chunk_is_free ? 's' : 'S';
 975     } else if (chunk->word_size() == med_chunk_size) {
 976       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 977     } else if (chunk->word_size() > med_chunk_size) {
 978       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 979     } else {
 980       ShouldNotReachHere();
 981     }
 982 
 983     // Line 3: chunk origin
 984     const ChunkOrigin origin = chunk->get_origin();
 985     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 986 
 987     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 988     //         but were never used.
 989     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 990 
 991     p += spec_chunk_size;
 992     pos ++;
 993   }
 994   if (pos > 0) {
 995     for (int i = 0; i < NUM_LINES; i ++) {
 996       st->fill_to(22);
 997       st->print_raw(lines[i], line_len);
 998       st->cr();
 999     }
1000   }
1001   for (int i = 0; i < NUM_LINES; i ++) {
1002     os::free(lines[i]);
1003   }
1004 }
1005 
1006 
1007 #ifdef ASSERT
1008 uintx VirtualSpaceNode::container_count_slow() {
1009   uintx count = 0;
1010   Metachunk* chunk = first_chunk();
1011   Metachunk* invalid_chunk = (Metachunk*) top();
1012   while (chunk < invalid_chunk ) {
1013     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1014     do_verify_chunk(chunk);
1015     // Don't count the chunks on the free lists.  Those are
1016     // still part of the VirtualSpaceNode but not currently
1017     // counted.
1018     if (!chunk->is_tagged_free()) {
1019       count++;
1020     }
1021     chunk = (Metachunk*) next;
1022   }
1023   return count;
1024 }
1025 #endif
1026 
1027 #ifdef ASSERT
1028 // Verify counters, all chunks in this list node and the occupancy map.
1029 void VirtualSpaceNode::verify() {
1030   uintx num_in_use_chunks = 0;
1031   Metachunk* chunk = first_chunk();
1032   Metachunk* invalid_chunk = (Metachunk*) top();
1033 
1034   // Iterate the chunks in this node and verify each chunk.
1035   while (chunk < invalid_chunk ) {
1036     DEBUG_ONLY(do_verify_chunk(chunk);)
1037     if (!chunk->is_tagged_free()) {
1038       num_in_use_chunks ++;
1039     }
1040     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1041     chunk = (Metachunk*) next;
1042   }
1043   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1044          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1045   // Also verify the occupancy map.
1046   occupancy_map()->verify(this->bottom(), this->top());
1047 }
1048 #endif // ASSERT
1049 
1050 #ifdef ASSERT
1051 // Verify that all free chunks in this node are ideally merged
1052 // (there not should be multiple small chunks where a large chunk could exist.)
1053 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1054   Metachunk* chunk = first_chunk();
1055   Metachunk* invalid_chunk = (Metachunk*) top();
1056   // Shorthands.
1057   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1058   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1059   int num_free_chunks_since_last_med_boundary = -1;
1060   int num_free_chunks_since_last_small_boundary = -1;
1061   while (chunk < invalid_chunk ) {
1062     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1063     // Reset the counter when encountering a non-free chunk.
1064     if (chunk->get_chunk_type() != HumongousIndex) {
1065       if (chunk->is_tagged_free()) {
1066         // Count successive free, non-humongous chunks.
1067         if (is_aligned(chunk, size_small)) {
1068           assert(num_free_chunks_since_last_small_boundary <= 1,
1069                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1070           num_free_chunks_since_last_small_boundary = 0;
1071         } else if (num_free_chunks_since_last_small_boundary != -1) {
1072           num_free_chunks_since_last_small_boundary ++;
1073         }
1074         if (is_aligned(chunk, size_med)) {
1075           assert(num_free_chunks_since_last_med_boundary <= 1,
1076                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1077           num_free_chunks_since_last_med_boundary = 0;
1078         } else if (num_free_chunks_since_last_med_boundary != -1) {
1079           num_free_chunks_since_last_med_boundary ++;
1080         }
1081       } else {
1082         // Encountering a non-free chunk, reset counters.
1083         num_free_chunks_since_last_med_boundary = -1;
1084         num_free_chunks_since_last_small_boundary = -1;
1085       }
1086     } else {
1087       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1088       num_free_chunks_since_last_med_boundary = -1;
1089       num_free_chunks_since_last_small_boundary = -1;
1090     }
1091 
1092     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1093     chunk = (Metachunk*) next;
1094   }
1095 }
1096 #endif // ASSERT
1097 
1098 // List of VirtualSpaces for metadata allocation.
1099 class VirtualSpaceList : public CHeapObj<mtClass> {
1100   friend class VirtualSpaceNode;
1101 
1102   enum VirtualSpaceSizes {
1103     VirtualSpaceSize = 256 * K
1104   };
1105 
1106   // Head of the list
1107   VirtualSpaceNode* _virtual_space_list;
1108   // virtual space currently being used for allocations
1109   VirtualSpaceNode* _current_virtual_space;
1110 
1111   // Is this VirtualSpaceList used for the compressed class space
1112   bool _is_class;
1113 
1114   // Sum of reserved and committed memory in the virtual spaces
1115   size_t _reserved_words;
1116   size_t _committed_words;
1117 
1118   // Number of virtual spaces
1119   size_t _virtual_space_count;
1120 
1121   ~VirtualSpaceList();
1122 
1123   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1124 
1125   void set_virtual_space_list(VirtualSpaceNode* v) {
1126     _virtual_space_list = v;
1127   }
1128   void set_current_virtual_space(VirtualSpaceNode* v) {
1129     _current_virtual_space = v;
1130   }
1131 
1132   void link_vs(VirtualSpaceNode* new_entry);
1133 
1134   // Get another virtual space and add it to the list.  This
1135   // is typically prompted by a failed attempt to allocate a chunk
1136   // and is typically followed by the allocation of a chunk.
1137   bool create_new_virtual_space(size_t vs_word_size);
1138 
1139   // Chunk up the unused committed space in the current
1140   // virtual space and add the chunks to the free list.
1141   void retire_current_virtual_space();
1142 
1143  public:
1144   VirtualSpaceList(size_t word_size);
1145   VirtualSpaceList(ReservedSpace rs);
1146 
1147   size_t free_bytes();
1148 
1149   Metachunk* get_new_chunk(size_t chunk_word_size,
1150                            size_t suggested_commit_granularity);
1151 
1152   bool expand_node_by(VirtualSpaceNode* node,
1153                       size_t min_words,
1154                       size_t preferred_words);
1155 
1156   bool expand_by(size_t min_words,
1157                  size_t preferred_words);
1158 
1159   VirtualSpaceNode* current_virtual_space() {
1160     return _current_virtual_space;
1161   }
1162 
1163   bool is_class() const { return _is_class; }
1164 
1165   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1166 
1167   size_t reserved_words()  { return _reserved_words; }
1168   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1169   size_t committed_words() { return _committed_words; }
1170   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1171 
1172   void inc_reserved_words(size_t v);
1173   void dec_reserved_words(size_t v);
1174   void inc_committed_words(size_t v);
1175   void dec_committed_words(size_t v);
1176   void inc_virtual_space_count();
1177   void dec_virtual_space_count();
1178 
1179   bool contains(const void* ptr);
1180 
1181   // Unlink empty VirtualSpaceNodes and free it.
1182   void purge(ChunkManager* chunk_manager);
1183 
1184   void print_on(outputStream* st) const;
1185   void print_map(outputStream* st) const;
1186 
1187   class VirtualSpaceListIterator : public StackObj {
1188     VirtualSpaceNode* _virtual_spaces;
1189    public:
1190     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1191       _virtual_spaces(virtual_spaces) {}
1192 
1193     bool repeat() {
1194       return _virtual_spaces != NULL;
1195     }
1196 
1197     VirtualSpaceNode* get_next() {
1198       VirtualSpaceNode* result = _virtual_spaces;
1199       if (_virtual_spaces != NULL) {
1200         _virtual_spaces = _virtual_spaces->next();
1201       }
1202       return result;
1203     }
1204   };
1205 };
1206 
1207 class Metadebug : AllStatic {
1208   // Debugging support for Metaspaces
1209   static int _allocation_fail_alot_count;
1210 
1211  public:
1212 
1213   static void init_allocation_fail_alot_count();
1214 #ifdef ASSERT
1215   static bool test_metadata_failure();
1216 #endif
1217 };
1218 
1219 int Metadebug::_allocation_fail_alot_count = 0;
1220 
1221 //  SpaceManager - used by Metaspace to handle allocations
1222 class SpaceManager : public CHeapObj<mtClass> {
1223   friend class ClassLoaderMetaspace;
1224   friend class Metadebug;
1225 
1226  private:
1227 
1228   // protects allocations
1229   Mutex* const _lock;
1230 
1231   // Type of metadata allocated.
1232   const Metaspace::MetadataType   _mdtype;
1233 
1234   // Type of metaspace
1235   const Metaspace::MetaspaceType  _space_type;
1236 
1237   // List of chunks in use by this SpaceManager.  Allocations
1238   // are done from the current chunk.  The list is used for deallocating
1239   // chunks when the SpaceManager is freed.
1240   Metachunk* _chunks_in_use[NumberOfInUseLists];
1241   Metachunk* _current_chunk;
1242 
1243   // Maximum number of small chunks to allocate to a SpaceManager
1244   static uint const _small_chunk_limit;
1245 
1246   // Maximum number of specialize chunks to allocate for anonymous and delegating
1247   // metadata space to a SpaceManager
1248   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1249 
1250   // Sum of all space in allocated chunks
1251   size_t _allocated_blocks_words;
1252 
1253   // Sum of all allocated chunks
1254   size_t _allocated_chunks_words;
1255   size_t _allocated_chunks_count;
1256 
1257   // Free lists of blocks are per SpaceManager since they
1258   // are assumed to be in chunks in use by the SpaceManager
1259   // and all chunks in use by a SpaceManager are freed when
1260   // the class loader using the SpaceManager is collected.
1261   BlockFreelist* _block_freelists;
1262 
1263  private:
1264   // Accessors
1265   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1266   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1267     _chunks_in_use[index] = v;
1268   }
1269 
1270   BlockFreelist* block_freelists() const { return _block_freelists; }
1271 
1272   Metaspace::MetadataType mdtype() { return _mdtype; }
1273 
1274   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1275   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1276 
1277   Metachunk* current_chunk() const { return _current_chunk; }
1278   void set_current_chunk(Metachunk* v) {
1279     _current_chunk = v;
1280   }
1281 
1282   Metachunk* find_current_chunk(size_t word_size);
1283 
1284   // Add chunk to the list of chunks in use
1285   void add_chunk(Metachunk* v, bool make_current);
1286   void retire_current_chunk();
1287 
1288   Mutex* lock() const { return _lock; }
1289 
1290  protected:
1291   void initialize();
1292 
1293  public:
1294   SpaceManager(Metaspace::MetadataType mdtype,
1295                Metaspace::MetaspaceType space_type,
1296                Mutex* lock);
1297   ~SpaceManager();
1298 
1299   enum ChunkMultiples {
1300     MediumChunkMultiple = 4
1301   };
1302 
1303   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1304   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1305   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1306 
1307   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1308 
1309   // Accessors
1310   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1311 
1312   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1313   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1314   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1315 
1316   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1317 
1318   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1319 
1320   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1321   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1322   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1323   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1324   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1325 
1326   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1327 
1328   // Increment the per Metaspace and global running sums for Metachunks
1329   // by the given size.  This is used when a Metachunk to added to
1330   // the in-use list.
1331   void inc_size_metrics(size_t words);
1332   // Increment the per Metaspace and global running sums Metablocks by the given
1333   // size.  This is used when a Metablock is allocated.
1334   void inc_used_metrics(size_t words);
1335   // Delete the portion of the running sums for this SpaceManager. That is,
1336   // the globals running sums for the Metachunks and Metablocks are
1337   // decremented for all the Metachunks in-use by this SpaceManager.
1338   void dec_total_from_size_metrics();
1339 
1340   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1341   // or return the unadjusted size if the requested size is humongous.
1342   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1343   size_t adjust_initial_chunk_size(size_t requested) const;
1344 
1345   // Get the initial chunks size for this metaspace type.
1346   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1347 
1348   size_t sum_capacity_in_chunks_in_use() const;
1349   size_t sum_used_in_chunks_in_use() const;
1350   size_t sum_free_in_chunks_in_use() const;
1351   size_t sum_waste_in_chunks_in_use() const;
1352   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1353 
1354   size_t sum_count_in_chunks_in_use();
1355   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1356 
1357   Metachunk* get_new_chunk(size_t chunk_word_size);
1358 
1359   // Block allocation and deallocation.
1360   // Allocates a block from the current chunk
1361   MetaWord* allocate(size_t word_size);
1362 
1363   // Helper for allocations
1364   MetaWord* allocate_work(size_t word_size);
1365 
1366   // Returns a block to the per manager freelist
1367   void deallocate(MetaWord* p, size_t word_size);
1368 
1369   // Based on the allocation size and a minimum chunk size,
1370   // returned chunk size (for expanding space for chunk allocation).
1371   size_t calc_chunk_size(size_t allocation_word_size);
1372 
1373   // Called when an allocation from the current chunk fails.
1374   // Gets a new chunk (may require getting a new virtual space),
1375   // and allocates from that chunk.
1376   MetaWord* grow_and_allocate(size_t word_size);
1377 
1378   // Notify memory usage to MemoryService.
1379   void track_metaspace_memory_usage();
1380 
1381   // debugging support.
1382 
1383   void dump(outputStream* const out) const;
1384   void print_on(outputStream* st) const;
1385   void locked_print_chunks_in_use_on(outputStream* st) const;
1386 
1387   void verify();
1388   void verify_chunk_size(Metachunk* chunk);
1389 #ifdef ASSERT
1390   void verify_allocated_blocks_words();
1391 #endif
1392 
1393   // This adjusts the size given to be greater than the minimum allocation size in
1394   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1395   size_t get_allocation_word_size(size_t word_size) {
1396     size_t byte_size = word_size * BytesPerWord;
1397 
1398     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1399     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1400 
1401     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1402     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1403 
1404     return raw_word_size;
1405   }
1406 };
1407 
1408 uint const SpaceManager::_small_chunk_limit = 4;
1409 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1410 
1411 void VirtualSpaceNode::inc_container_count() {
1412   assert_lock_strong(MetaspaceExpand_lock);
1413   _container_count++;
1414 }
1415 
1416 void VirtualSpaceNode::dec_container_count() {
1417   assert_lock_strong(MetaspaceExpand_lock);
1418   _container_count--;
1419 }
1420 
1421 #ifdef ASSERT
1422 void VirtualSpaceNode::verify_container_count() {
1423   assert(_container_count == container_count_slow(),
1424          "Inconsistency in container_count _container_count " UINTX_FORMAT
1425          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1426 }
1427 #endif
1428 
1429 // BlockFreelist methods
1430 
1431 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1432 
1433 BlockFreelist::~BlockFreelist() {
1434   delete _dictionary;
1435   if (_small_blocks != NULL) {
1436     delete _small_blocks;
1437   }
1438 }
1439 
1440 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1441   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1442 
1443   Metablock* free_chunk = ::new (p) Metablock(word_size);
1444   if (word_size < SmallBlocks::small_block_max_size()) {
1445     small_blocks()->return_block(free_chunk, word_size);
1446   } else {
1447   dictionary()->return_chunk(free_chunk);
1448 }
1449   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1450             SIZE_FORMAT, p2i(free_chunk), word_size);
1451 }
1452 
1453 MetaWord* BlockFreelist::get_block(size_t word_size) {
1454   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1455 
1456   // Try small_blocks first.
1457   if (word_size < SmallBlocks::small_block_max_size()) {
1458     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1459     // this space manager.
1460     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1461     if (new_block != NULL) {
1462       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1463               p2i(new_block), word_size);
1464       return new_block;
1465     }
1466   }
1467 
1468   if (word_size < BlockFreelist::min_dictionary_size()) {
1469     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1470     return NULL;
1471   }
1472 
1473   Metablock* free_block = dictionary()->get_chunk(word_size);
1474   if (free_block == NULL) {
1475     return NULL;
1476   }
1477 
1478   const size_t block_size = free_block->size();
1479   if (block_size > WasteMultiplier * word_size) {
1480     return_block((MetaWord*)free_block, block_size);
1481     return NULL;
1482   }
1483 
1484   MetaWord* new_block = (MetaWord*)free_block;
1485   assert(block_size >= word_size, "Incorrect size of block from freelist");
1486   const size_t unused = block_size - word_size;
1487   if (unused >= SmallBlocks::small_block_min_size()) {
1488     return_block(new_block + word_size, unused);
1489   }
1490 
1491   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1492             p2i(new_block), word_size);
1493   return new_block;
1494 }
1495 
1496 void BlockFreelist::print_on(outputStream* st) const {
1497   dictionary()->print_free_lists(st);
1498   if (_small_blocks != NULL) {
1499     _small_blocks->print_on(st);
1500   }
1501 }
1502 
1503 // VirtualSpaceNode methods
1504 
1505 VirtualSpaceNode::~VirtualSpaceNode() {
1506   _rs.release();
1507   if (_occupancy_map != NULL) {
1508     delete _occupancy_map;
1509   }
1510 #ifdef ASSERT
1511   size_t word_size = sizeof(*this) / BytesPerWord;
1512   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1513 #endif
1514 }
1515 
1516 size_t VirtualSpaceNode::used_words_in_vs() const {
1517   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1518 }
1519 
1520 // Space committed in the VirtualSpace
1521 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1522   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1523 }
1524 
1525 size_t VirtualSpaceNode::free_words_in_vs() const {
1526   return pointer_delta(end(), top(), sizeof(MetaWord));
1527 }
1528 
1529 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1530 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1531 
1532   assert(target_top > top(), "Sanity");
1533 
1534   // Padding chunks are added to the freelist.
1535   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1536 
1537   // shorthands
1538   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1539   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1540   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1541 
1542   while (top() < target_top) {
1543 
1544     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1545     // for padding chunks, so it is not worth it.
1546     size_t padding_chunk_word_size = small_word_size;
1547     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1548       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1549       padding_chunk_word_size = spec_word_size;
1550     }
1551     MetaWord* here = top();
1552     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1553     inc_top(padding_chunk_word_size);
1554 
1555     // Create new padding chunk.
1556     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1557     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1558 
1559     Metachunk* const padding_chunk =
1560       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1561     assert(padding_chunk == (Metachunk*)here, "Sanity");
1562     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1563     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1564                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1565                                        (is_class() ? "class space " : "metaspace"),
1566                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1567 
1568     // Mark chunk start in occupancy map.
1569     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1570 
1571     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1572     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1573     // will assert that).
1574     do_update_in_use_info_for_chunk(padding_chunk, true);
1575 
1576     // Return Chunk to freelist.
1577     inc_container_count();
1578     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1579     // Please note: at this point, ChunkManager::return_single_chunk()
1580     // may already have merged the padding chunk with neighboring chunks, so
1581     // it may have vanished at this point. Do not reference the padding
1582     // chunk beyond this point.
1583   }
1584 
1585   assert(top() == target_top, "Sanity");
1586 
1587 } // allocate_padding_chunks_until_top_is_at()
1588 
1589 // Allocates the chunk from the virtual space only.
1590 // This interface is also used internally for debugging.  Not all
1591 // chunks removed here are necessarily used for allocation.
1592 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1593   // Non-humongous chunks are to be allocated aligned to their chunk
1594   // size. So, start addresses of medium chunks are aligned to medium
1595   // chunk size, those of small chunks to small chunk size and so
1596   // forth. This facilitates merging of free chunks and reduces
1597   // fragmentation. Chunk sizes are spec < small < medium, with each
1598   // larger chunk size being a multiple of the next smaller chunk
1599   // size.
1600   // Because of this alignment, me may need to create a number of padding
1601   // chunks. These chunks are created and added to the freelist.
1602 
1603   // The chunk manager to which we will give our padding chunks.
1604   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1605 
1606   // shorthands
1607   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1608   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1609   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1610 
1611   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1612          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1613 
1614   // Chunk alignment (in bytes) == chunk size unless humongous.
1615   // Humongous chunks are aligned to the smallest chunk size (spec).
1616   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1617                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1618 
1619   // Do we have enough space to create the requested chunk plus
1620   // any padding chunks needed?
1621   MetaWord* const next_aligned =
1622     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1623   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1624     return NULL;
1625   }
1626 
1627   // Before allocating the requested chunk, allocate padding chunks if necessary.
1628   // We only need to do this for small or medium chunks: specialized chunks are the
1629   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1630   // (implicitly, also aligned to smallest chunk size).
1631   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1632     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1633         (is_class() ? "class space " : "metaspace"),
1634         top(), next_aligned);
1635     allocate_padding_chunks_until_top_is_at(next_aligned);
1636     // Now, top should be aligned correctly.
1637     assert_is_aligned(top(), required_chunk_alignment);
1638   }
1639 
1640   // Now, top should be aligned correctly.
1641   assert_is_aligned(top(), required_chunk_alignment);
1642 
1643   // Bottom of the new chunk
1644   MetaWord* chunk_limit = top();
1645   assert(chunk_limit != NULL, "Not safe to call this method");
1646 
1647   // The virtual spaces are always expanded by the
1648   // commit granularity to enforce the following condition.
1649   // Without this the is_available check will not work correctly.
1650   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1651       "The committed memory doesn't match the expanded memory.");
1652 
1653   if (!is_available(chunk_word_size)) {
1654     LogTarget(Debug, gc, metaspace, freelist) lt;
1655     if (lt.is_enabled()) {
1656       LogStream ls(lt);
1657       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1658       // Dump some information about the virtual space that is nearly full
1659       print_on(&ls);
1660     }
1661     return NULL;
1662   }
1663 
1664   // Take the space  (bump top on the current virtual space).
1665   inc_top(chunk_word_size);
1666 
1667   // Initialize the chunk
1668   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1669   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1670   assert(result == (Metachunk*)chunk_limit, "Sanity");
1671   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1672   do_update_in_use_info_for_chunk(result, true);
1673 
1674   inc_container_count();
1675 
1676   if (VerifyMetaspace) {
1677     DEBUG_ONLY(chunk_manager->locked_verify());
1678     DEBUG_ONLY(this->verify());
1679   }
1680 
1681   DEBUG_ONLY(do_verify_chunk(result));
1682 
1683   result->inc_use_count();
1684 
1685   return result;
1686 }
1687 
1688 
1689 // Expand the virtual space (commit more of the reserved space)
1690 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1691   size_t min_bytes = min_words * BytesPerWord;
1692   size_t preferred_bytes = preferred_words * BytesPerWord;
1693 
1694   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1695 
1696   if (uncommitted < min_bytes) {
1697     return false;
1698   }
1699 
1700   size_t commit = MIN2(preferred_bytes, uncommitted);
1701   bool result = virtual_space()->expand_by(commit, false);
1702 
1703   if (result) {
1704     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1705               (is_class() ? "class" : "non-class"), commit);
1706   } else {
1707     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1708               (is_class() ? "class" : "non-class"), commit);
1709   }
1710 
1711   assert(result, "Failed to commit memory");
1712 
1713   return result;
1714 }
1715 
1716 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1717   assert_lock_strong(MetaspaceExpand_lock);
1718   Metachunk* result = take_from_committed(chunk_word_size);
1719   return result;
1720 }
1721 
1722 bool VirtualSpaceNode::initialize() {
1723 
1724   if (!_rs.is_reserved()) {
1725     return false;
1726   }
1727 
1728   // These are necessary restriction to make sure that the virtual space always
1729   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1730   // aligned only the middle alignment of the VirtualSpace is used.
1731   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1732   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1733 
1734   // ReservedSpaces marked as special will have the entire memory
1735   // pre-committed. Setting a committed size will make sure that
1736   // committed_size and actual_committed_size agrees.
1737   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1738 
1739   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1740                                             Metaspace::commit_alignment());
1741   if (result) {
1742     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1743         "Checking that the pre-committed memory was registered by the VirtualSpace");
1744 
1745     set_top((MetaWord*)virtual_space()->low());
1746     set_reserved(MemRegion((HeapWord*)_rs.base(),
1747                  (HeapWord*)(_rs.base() + _rs.size())));
1748 
1749     assert(reserved()->start() == (HeapWord*) _rs.base(),
1750            "Reserved start was not set properly " PTR_FORMAT
1751            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1752     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1753            "Reserved size was not set properly " SIZE_FORMAT
1754            " != " SIZE_FORMAT, reserved()->word_size(),
1755            _rs.size() / BytesPerWord);
1756   }
1757 
1758   // Initialize Occupancy Map.
1759   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1760   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1761 
1762   return result;
1763 }
1764 
1765 void VirtualSpaceNode::print_on(outputStream* st) const {
1766   size_t used = used_words_in_vs();
1767   size_t capacity = capacity_words_in_vs();
1768   VirtualSpace* vs = virtual_space();
1769   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1770            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1771            PTR_FORMAT ", " PTR_FORMAT ")",
1772            p2i(vs), capacity / K,
1773            capacity == 0 ? 0 : used * 100 / capacity,
1774            p2i(bottom()), p2i(top()), p2i(end()),
1775            p2i(vs->high_boundary()));
1776 }
1777 
1778 #ifdef ASSERT
1779 void VirtualSpaceNode::mangle() {
1780   size_t word_size = capacity_words_in_vs();
1781   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1782 }
1783 #endif // ASSERT
1784 
1785 // VirtualSpaceList methods
1786 // Space allocated from the VirtualSpace
1787 
1788 VirtualSpaceList::~VirtualSpaceList() {
1789   VirtualSpaceListIterator iter(virtual_space_list());
1790   while (iter.repeat()) {
1791     VirtualSpaceNode* vsl = iter.get_next();
1792     delete vsl;
1793   }
1794 }
1795 
1796 void VirtualSpaceList::inc_reserved_words(size_t v) {
1797   assert_lock_strong(MetaspaceExpand_lock);
1798   _reserved_words = _reserved_words + v;
1799 }
1800 void VirtualSpaceList::dec_reserved_words(size_t v) {
1801   assert_lock_strong(MetaspaceExpand_lock);
1802   _reserved_words = _reserved_words - v;
1803 }
1804 
1805 #define assert_committed_below_limit()                        \
1806   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1807          "Too much committed memory. Committed: " SIZE_FORMAT \
1808          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1809           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1810 
1811 void VirtualSpaceList::inc_committed_words(size_t v) {
1812   assert_lock_strong(MetaspaceExpand_lock);
1813   _committed_words = _committed_words + v;
1814 
1815   assert_committed_below_limit();
1816 }
1817 void VirtualSpaceList::dec_committed_words(size_t v) {
1818   assert_lock_strong(MetaspaceExpand_lock);
1819   _committed_words = _committed_words - v;
1820 
1821   assert_committed_below_limit();
1822 }
1823 
1824 void VirtualSpaceList::inc_virtual_space_count() {
1825   assert_lock_strong(MetaspaceExpand_lock);
1826   _virtual_space_count++;
1827 }
1828 void VirtualSpaceList::dec_virtual_space_count() {
1829   assert_lock_strong(MetaspaceExpand_lock);
1830   _virtual_space_count--;
1831 }
1832 
1833 void ChunkManager::remove_chunk(Metachunk* chunk) {
1834   size_t word_size = chunk->word_size();
1835   ChunkIndex index = list_index(word_size);
1836   if (index != HumongousIndex) {
1837     free_chunks(index)->remove_chunk(chunk);
1838   } else {
1839     humongous_dictionary()->remove_chunk(chunk);
1840   }
1841 
1842   // Chunk has been removed from the chunks free list, update counters.
1843   account_for_removed_chunk(chunk);
1844 }
1845 
1846 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1847   assert_lock_strong(MetaspaceExpand_lock);
1848   assert(chunk != NULL, "invalid chunk pointer");
1849   // Check for valid merge combinations.
1850   assert((chunk->get_chunk_type() == SpecializedIndex &&
1851           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1852          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1853         "Invalid chunk merge combination.");
1854 
1855   const size_t target_chunk_word_size =
1856     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1857 
1858   // [ prospective merge region )
1859   MetaWord* const p_merge_region_start =
1860     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1861   MetaWord* const p_merge_region_end =
1862     p_merge_region_start + target_chunk_word_size;
1863 
1864   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1865   VirtualSpaceNode* const vsn = chunk->container();
1866   OccupancyMap* const ocmap = vsn->occupancy_map();
1867 
1868   // The prospective chunk merge range must be completely contained by the
1869   // committed range of the virtual space node.
1870   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1871     return false;
1872   }
1873 
1874   // Only attempt to merge this range if at its start a chunk starts and at its end
1875   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1876   // of that range, we cannot merge.
1877   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1878     return false;
1879   }
1880   if (p_merge_region_end < vsn->top() &&
1881       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1882     return false;
1883   }
1884 
1885   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1886   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1887     return false;
1888   }
1889 
1890   // Success! Remove all chunks in this region...
1891   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1892     (is_class() ? "class space" : "metaspace"),
1893     p_merge_region_start, p_merge_region_end);
1894 
1895   const int num_chunks_removed =
1896     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1897 
1898   // ... and create a single new bigger chunk.
1899   Metachunk* const p_new_chunk =
1900       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1901   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1902   p_new_chunk->set_origin(origin_merge);
1903 
1904   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1905     (is_class() ? "class space" : "metaspace"),
1906     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1907 
1908   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1909   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1910   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1911 
1912   // Mark chunk as free. Note: it is not necessary to update the occupancy
1913   // map in-use map, because the old chunks were also free, so nothing
1914   // should have changed.
1915   p_new_chunk->set_is_tagged_free(true);
1916 
1917   // Add new chunk to its freelist.
1918   ChunkList* const list = free_chunks(target_chunk_type);
1919   list->return_chunk_at_head(p_new_chunk);
1920 
1921   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1922   // should not have changed, because the size of the space should be the same)
1923   _free_chunks_count -= num_chunks_removed;
1924   _free_chunks_count ++;
1925 
1926   // VirtualSpaceNode::container_count does not have to be modified:
1927   // it means "number of active (non-free) chunks", so merging free chunks
1928   // should not affect that count.
1929 
1930   // At the end of a chunk merge, run verification tests.
1931   if (VerifyMetaspace) {
1932     DEBUG_ONLY(this->locked_verify());
1933     DEBUG_ONLY(vsn->verify());
1934   }
1935 
1936   return true;
1937 }
1938 
1939 // Remove all chunks in the given area - the chunks are supposed to be free -
1940 // from their corresponding freelists. Mark them as invalid.
1941 // - This does not correct the occupancy map.
1942 // - This does not adjust the counters in ChunkManager.
1943 // - Does not adjust container count counter in containing VirtualSpaceNode
1944 // Returns number of chunks removed.
1945 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1946   assert(p != NULL && word_size > 0, "Invalid range.");
1947   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1948   assert_is_aligned(word_size, smallest_chunk_size);
1949 
1950   Metachunk* const start = (Metachunk*) p;
1951   const Metachunk* const end = (Metachunk*)(p + word_size);
1952   Metachunk* cur = start;
1953   int num_removed = 0;
1954   while (cur < end) {
1955     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1956     DEBUG_ONLY(do_verify_chunk(cur));
1957     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1958     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1959     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1960       (is_class() ? "class space" : "metaspace"),
1961       cur, cur->word_size() * sizeof(MetaWord));
1962     cur->remove_sentinel();
1963     // Note: cannot call ChunkManager::remove_chunk, because that
1964     // modifies the counters in ChunkManager, which we do not want. So
1965     // we call remove_chunk on the freelist directly (see also the
1966     // splitting function which does the same).
1967     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1968     list->remove_chunk(cur);
1969     num_removed ++;
1970     cur = next;
1971   }
1972   return num_removed;
1973 }
1974 
1975 // Walk the list of VirtualSpaceNodes and delete
1976 // nodes with a 0 container_count.  Remove Metachunks in
1977 // the node from their respective freelists.
1978 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1979   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1980   assert_lock_strong(MetaspaceExpand_lock);
1981   // Don't use a VirtualSpaceListIterator because this
1982   // list is being changed and a straightforward use of an iterator is not safe.
1983   VirtualSpaceNode* purged_vsl = NULL;
1984   VirtualSpaceNode* prev_vsl = virtual_space_list();
1985   VirtualSpaceNode* next_vsl = prev_vsl;
1986   while (next_vsl != NULL) {
1987     VirtualSpaceNode* vsl = next_vsl;
1988     DEBUG_ONLY(vsl->verify_container_count();)
1989     next_vsl = vsl->next();
1990     // Don't free the current virtual space since it will likely
1991     // be needed soon.
1992     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1993       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1994                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
1995       // Unlink it from the list
1996       if (prev_vsl == vsl) {
1997         // This is the case of the current node being the first node.
1998         assert(vsl == virtual_space_list(), "Expected to be the first node");
1999         set_virtual_space_list(vsl->next());
2000       } else {
2001         prev_vsl->set_next(vsl->next());
2002       }
2003 
2004       vsl->purge(chunk_manager);
2005       dec_reserved_words(vsl->reserved_words());
2006       dec_committed_words(vsl->committed_words());
2007       dec_virtual_space_count();
2008       purged_vsl = vsl;
2009       delete vsl;
2010     } else {
2011       prev_vsl = vsl;
2012     }
2013   }
2014 #ifdef ASSERT
2015   if (purged_vsl != NULL) {
2016     // List should be stable enough to use an iterator here.
2017     VirtualSpaceListIterator iter(virtual_space_list());
2018     while (iter.repeat()) {
2019       VirtualSpaceNode* vsl = iter.get_next();
2020       assert(vsl != purged_vsl, "Purge of vsl failed");
2021     }
2022   }
2023 #endif
2024 }
2025 
2026 
2027 // This function looks at the mmap regions in the metaspace without locking.
2028 // The chunks are added with store ordering and not deleted except for at
2029 // unloading time during a safepoint.
2030 bool VirtualSpaceList::contains(const void* ptr) {
2031   // List should be stable enough to use an iterator here because removing virtual
2032   // space nodes is only allowed at a safepoint.
2033   VirtualSpaceListIterator iter(virtual_space_list());
2034   while (iter.repeat()) {
2035     VirtualSpaceNode* vsn = iter.get_next();
2036     if (vsn->contains(ptr)) {
2037       return true;
2038     }
2039   }
2040   return false;
2041 }
2042 
2043 void VirtualSpaceList::retire_current_virtual_space() {
2044   assert_lock_strong(MetaspaceExpand_lock);
2045 
2046   VirtualSpaceNode* vsn = current_virtual_space();
2047 
2048   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2049                                   Metaspace::chunk_manager_metadata();
2050 
2051   vsn->retire(cm);
2052 }
2053 
2054 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2055   DEBUG_ONLY(verify_container_count();)
2056   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2057   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2058     ChunkIndex index = (ChunkIndex)i;
2059     size_t chunk_size = chunk_manager->size_by_index(index);
2060 
2061     while (free_words_in_vs() >= chunk_size) {
2062       Metachunk* chunk = get_chunk_vs(chunk_size);
2063       // Chunk will be allocated aligned, so allocation may require
2064       // additional padding chunks. That may cause above allocation to
2065       // fail. Just ignore the failed allocation and continue with the
2066       // next smaller chunk size. As the VirtualSpaceNode comitted
2067       // size should be a multiple of the smallest chunk size, we
2068       // should always be able to fill the VirtualSpace completely.
2069       if (chunk == NULL) {
2070         break;
2071       }
2072       chunk_manager->return_single_chunk(index, chunk);
2073     }
2074     DEBUG_ONLY(verify_container_count();)
2075   }
2076   assert(free_words_in_vs() == 0, "should be empty now");
2077 }
2078 
2079 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2080                                    _is_class(false),
2081                                    _virtual_space_list(NULL),
2082                                    _current_virtual_space(NULL),
2083                                    _reserved_words(0),
2084                                    _committed_words(0),
2085                                    _virtual_space_count(0) {
2086   MutexLockerEx cl(MetaspaceExpand_lock,
2087                    Mutex::_no_safepoint_check_flag);
2088   create_new_virtual_space(word_size);
2089 }
2090 
2091 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2092                                    _is_class(true),
2093                                    _virtual_space_list(NULL),
2094                                    _current_virtual_space(NULL),
2095                                    _reserved_words(0),
2096                                    _committed_words(0),
2097                                    _virtual_space_count(0) {
2098   MutexLockerEx cl(MetaspaceExpand_lock,
2099                    Mutex::_no_safepoint_check_flag);
2100   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2101   bool succeeded = class_entry->initialize();
2102   if (succeeded) {
2103     link_vs(class_entry);
2104   }
2105 }
2106 
2107 size_t VirtualSpaceList::free_bytes() {
2108   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2109 }
2110 
2111 // Allocate another meta virtual space and add it to the list.
2112 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2113   assert_lock_strong(MetaspaceExpand_lock);
2114 
2115   if (is_class()) {
2116     assert(false, "We currently don't support more than one VirtualSpace for"
2117                   " the compressed class space. The initialization of the"
2118                   " CCS uses another code path and should not hit this path.");
2119     return false;
2120   }
2121 
2122   if (vs_word_size == 0) {
2123     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2124     return false;
2125   }
2126 
2127   // Reserve the space
2128   size_t vs_byte_size = vs_word_size * BytesPerWord;
2129   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2130 
2131   // Allocate the meta virtual space and initialize it.
2132   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2133   if (!new_entry->initialize()) {
2134     delete new_entry;
2135     return false;
2136   } else {
2137     assert(new_entry->reserved_words() == vs_word_size,
2138         "Reserved memory size differs from requested memory size");
2139     // ensure lock-free iteration sees fully initialized node
2140     OrderAccess::storestore();
2141     link_vs(new_entry);
2142     return true;
2143   }
2144 }
2145 
2146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2147   if (virtual_space_list() == NULL) {
2148       set_virtual_space_list(new_entry);
2149   } else {
2150     current_virtual_space()->set_next(new_entry);
2151   }
2152   set_current_virtual_space(new_entry);
2153   inc_reserved_words(new_entry->reserved_words());
2154   inc_committed_words(new_entry->committed_words());
2155   inc_virtual_space_count();
2156 #ifdef ASSERT
2157   new_entry->mangle();
2158 #endif
2159   LogTarget(Trace, gc, metaspace) lt;
2160   if (lt.is_enabled()) {
2161     LogStream ls(lt);
2162     VirtualSpaceNode* vsl = current_virtual_space();
2163     ResourceMark rm;
2164     vsl->print_on(&ls);
2165   }
2166 }
2167 
2168 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2169                                       size_t min_words,
2170                                       size_t preferred_words) {
2171   size_t before = node->committed_words();
2172 
2173   bool result = node->expand_by(min_words, preferred_words);
2174 
2175   size_t after = node->committed_words();
2176 
2177   // after and before can be the same if the memory was pre-committed.
2178   assert(after >= before, "Inconsistency");
2179   inc_committed_words(after - before);
2180 
2181   return result;
2182 }
2183 
2184 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2185   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2186   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2187   assert(min_words <= preferred_words, "Invalid arguments");
2188 
2189   const char* const class_or_not = (is_class() ? "class" : "non-class");
2190 
2191   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2192     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2193               class_or_not);
2194     return  false;
2195   }
2196 
2197   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2198   if (allowed_expansion_words < min_words) {
2199     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2200               class_or_not);
2201     return false;
2202   }
2203 
2204   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2205 
2206   // Commit more memory from the the current virtual space.
2207   bool vs_expanded = expand_node_by(current_virtual_space(),
2208                                     min_words,
2209                                     max_expansion_words);
2210   if (vs_expanded) {
2211      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2212                class_or_not);
2213      return true;
2214   }
2215   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2216             class_or_not);
2217   retire_current_virtual_space();
2218 
2219   // Get another virtual space.
2220   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2221   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2222 
2223   if (create_new_virtual_space(grow_vs_words)) {
2224     if (current_virtual_space()->is_pre_committed()) {
2225       // The memory was pre-committed, so we are done here.
2226       assert(min_words <= current_virtual_space()->committed_words(),
2227           "The new VirtualSpace was pre-committed, so it"
2228           "should be large enough to fit the alloc request.");
2229       return true;
2230     }
2231 
2232     return expand_node_by(current_virtual_space(),
2233                           min_words,
2234                           max_expansion_words);
2235   }
2236 
2237   return false;
2238 }
2239 
2240 // Given a chunk, calculate the largest possible padding space which
2241 // could be required when allocating it.
2242 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2243   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2244   if (chunk_type != HumongousIndex) {
2245     // Normal, non-humongous chunks are allocated at chunk size
2246     // boundaries, so the largest padding space required would be that
2247     // minus the smallest chunk size.
2248     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2249     return chunk_word_size - smallest_chunk_size;
2250   } else {
2251     // Humongous chunks are allocated at smallest-chunksize
2252     // boundaries, so there is no padding required.
2253     return 0;
2254   }
2255 }
2256 
2257 
2258 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2259 
2260   // Allocate a chunk out of the current virtual space.
2261   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2262 
2263   if (next != NULL) {
2264     return next;
2265   }
2266 
2267   // The expand amount is currently only determined by the requested sizes
2268   // and not how much committed memory is left in the current virtual space.
2269 
2270   // We must have enough space for the requested size and any
2271   // additional reqired padding chunks.
2272   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2273 
2274   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2275   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2276   if (min_word_size >= preferred_word_size) {
2277     // Can happen when humongous chunks are allocated.
2278     preferred_word_size = min_word_size;
2279   }
2280 
2281   bool expanded = expand_by(min_word_size, preferred_word_size);
2282   if (expanded) {
2283     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2284     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2285   }
2286 
2287    return next;
2288 }
2289 
2290 void VirtualSpaceList::print_on(outputStream* st) const {
2291   VirtualSpaceListIterator iter(virtual_space_list());
2292   while (iter.repeat()) {
2293     VirtualSpaceNode* node = iter.get_next();
2294     node->print_on(st);
2295   }
2296 }
2297 
2298 void VirtualSpaceList::print_map(outputStream* st) const {
2299   VirtualSpaceNode* list = virtual_space_list();
2300   VirtualSpaceListIterator iter(list);
2301   unsigned i = 0;
2302   while (iter.repeat()) {
2303     st->print_cr("Node %u:", i);
2304     VirtualSpaceNode* node = iter.get_next();
2305     node->print_map(st, this->is_class());
2306     i ++;
2307   }
2308 }
2309 
2310 // MetaspaceGC methods
2311 
2312 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2313 // Within the VM operation after the GC the attempt to allocate the metadata
2314 // should succeed.  If the GC did not free enough space for the metaspace
2315 // allocation, the HWM is increased so that another virtualspace will be
2316 // allocated for the metadata.  With perm gen the increase in the perm
2317 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2318 // metaspace policy uses those as the small and large steps for the HWM.
2319 //
2320 // After the GC the compute_new_size() for MetaspaceGC is called to
2321 // resize the capacity of the metaspaces.  The current implementation
2322 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2323 // to resize the Java heap by some GC's.  New flags can be implemented
2324 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2325 // free space is desirable in the metaspace capacity to decide how much
2326 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2327 // free space is desirable in the metaspace capacity before decreasing
2328 // the HWM.
2329 
2330 // Calculate the amount to increase the high water mark (HWM).
2331 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2332 // another expansion is not requested too soon.  If that is not
2333 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2334 // If that is still not enough, expand by the size of the allocation
2335 // plus some.
2336 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2337   size_t min_delta = MinMetaspaceExpansion;
2338   size_t max_delta = MaxMetaspaceExpansion;
2339   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2340 
2341   if (delta <= min_delta) {
2342     delta = min_delta;
2343   } else if (delta <= max_delta) {
2344     // Don't want to hit the high water mark on the next
2345     // allocation so make the delta greater than just enough
2346     // for this allocation.
2347     delta = max_delta;
2348   } else {
2349     // This allocation is large but the next ones are probably not
2350     // so increase by the minimum.
2351     delta = delta + min_delta;
2352   }
2353 
2354   assert_is_aligned(delta, Metaspace::commit_alignment());
2355 
2356   return delta;
2357 }
2358 
2359 size_t MetaspaceGC::capacity_until_GC() {
2360   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2361   assert(value >= MetaspaceSize, "Not initialized properly?");
2362   return value;
2363 }
2364 
2365 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2366   assert_is_aligned(v, Metaspace::commit_alignment());
2367 
2368   intptr_t capacity_until_GC = _capacity_until_GC;
2369   intptr_t new_value = capacity_until_GC + v;
2370 
2371   if (new_value < capacity_until_GC) {
2372     // The addition wrapped around, set new_value to aligned max value.
2373     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2374   }
2375 
2376   intptr_t expected = _capacity_until_GC;
2377   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2378 
2379   if (expected != actual) {
2380     return false;
2381   }
2382 
2383   if (new_cap_until_GC != NULL) {
2384     *new_cap_until_GC = new_value;
2385   }
2386   if (old_cap_until_GC != NULL) {
2387     *old_cap_until_GC = capacity_until_GC;
2388   }
2389   return true;
2390 }
2391 
2392 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2393   assert_is_aligned(v, Metaspace::commit_alignment());
2394 
2395   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2396 }
2397 
2398 void MetaspaceGC::initialize() {
2399   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2400   // we can't do a GC during initialization.
2401   _capacity_until_GC = MaxMetaspaceSize;
2402 }
2403 
2404 void MetaspaceGC::post_initialize() {
2405   // Reset the high-water mark once the VM initialization is done.
2406   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2407 }
2408 
2409 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2410   // Check if the compressed class space is full.
2411   if (is_class && Metaspace::using_class_space()) {
2412     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2413     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2414       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2415                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2416       return false;
2417     }
2418   }
2419 
2420   // Check if the user has imposed a limit on the metaspace memory.
2421   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2422   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2423     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2424               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2425     return false;
2426   }
2427 
2428   return true;
2429 }
2430 
2431 size_t MetaspaceGC::allowed_expansion() {
2432   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2433   size_t capacity_until_gc = capacity_until_GC();
2434 
2435   assert(capacity_until_gc >= committed_bytes,
2436          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2437          capacity_until_gc, committed_bytes);
2438 
2439   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2440   size_t left_until_GC = capacity_until_gc - committed_bytes;
2441   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2442   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2443             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2444             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2445 
2446   return left_to_commit / BytesPerWord;
2447 }
2448 
2449 void MetaspaceGC::compute_new_size() {
2450   assert(_shrink_factor <= 100, "invalid shrink factor");
2451   uint current_shrink_factor = _shrink_factor;
2452   _shrink_factor = 0;
2453 
2454   // Using committed_bytes() for used_after_gc is an overestimation, since the
2455   // chunk free lists are included in committed_bytes() and the memory in an
2456   // un-fragmented chunk free list is available for future allocations.
2457   // However, if the chunk free lists becomes fragmented, then the memory may
2458   // not be available for future allocations and the memory is therefore "in use".
2459   // Including the chunk free lists in the definition of "in use" is therefore
2460   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2461   // shrink below committed_bytes() and this has caused serious bugs in the past.
2462   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2463   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2464 
2465   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2466   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2467 
2468   const double min_tmp = used_after_gc / maximum_used_percentage;
2469   size_t minimum_desired_capacity =
2470     (size_t)MIN2(min_tmp, double(max_uintx));
2471   // Don't shrink less than the initial generation size
2472   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2473                                   MetaspaceSize);
2474 
2475   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2476   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2477                            minimum_free_percentage, maximum_used_percentage);
2478   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2479 
2480 
2481   size_t shrink_bytes = 0;
2482   if (capacity_until_GC < minimum_desired_capacity) {
2483     // If we have less capacity below the metaspace HWM, then
2484     // increment the HWM.
2485     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2486     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2487     // Don't expand unless it's significant
2488     if (expand_bytes >= MinMetaspaceExpansion) {
2489       size_t new_capacity_until_GC = 0;
2490       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2491       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2492 
2493       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2494                                                new_capacity_until_GC,
2495                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2496       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2497                                minimum_desired_capacity / (double) K,
2498                                expand_bytes / (double) K,
2499                                MinMetaspaceExpansion / (double) K,
2500                                new_capacity_until_GC / (double) K);
2501     }
2502     return;
2503   }
2504 
2505   // No expansion, now see if we want to shrink
2506   // We would never want to shrink more than this
2507   assert(capacity_until_GC >= minimum_desired_capacity,
2508          SIZE_FORMAT " >= " SIZE_FORMAT,
2509          capacity_until_GC, minimum_desired_capacity);
2510   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2511 
2512   // Should shrinking be considered?
2513   if (MaxMetaspaceFreeRatio < 100) {
2514     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2515     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2516     const double max_tmp = used_after_gc / minimum_used_percentage;
2517     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2518     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2519                                     MetaspaceSize);
2520     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2521                              maximum_free_percentage, minimum_used_percentage);
2522     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2523                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2524 
2525     assert(minimum_desired_capacity <= maximum_desired_capacity,
2526            "sanity check");
2527 
2528     if (capacity_until_GC > maximum_desired_capacity) {
2529       // Capacity too large, compute shrinking size
2530       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2531       // We don't want shrink all the way back to initSize if people call
2532       // System.gc(), because some programs do that between "phases" and then
2533       // we'd just have to grow the heap up again for the next phase.  So we
2534       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2535       // on the third call, and 100% by the fourth call.  But if we recompute
2536       // size without shrinking, it goes back to 0%.
2537       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2538 
2539       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2540 
2541       assert(shrink_bytes <= max_shrink_bytes,
2542              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2543              shrink_bytes, max_shrink_bytes);
2544       if (current_shrink_factor == 0) {
2545         _shrink_factor = 10;
2546       } else {
2547         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2548       }
2549       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2550                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2551       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2552                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2553     }
2554   }
2555 
2556   // Don't shrink unless it's significant
2557   if (shrink_bytes >= MinMetaspaceExpansion &&
2558       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2559     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2560     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2561                                              new_capacity_until_GC,
2562                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2563   }
2564 }
2565 
2566 // Metadebug methods
2567 
2568 void Metadebug::init_allocation_fail_alot_count() {
2569   if (MetadataAllocationFailALot) {
2570     _allocation_fail_alot_count =
2571       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2572   }
2573 }
2574 
2575 #ifdef ASSERT
2576 bool Metadebug::test_metadata_failure() {
2577   if (MetadataAllocationFailALot &&
2578       Threads::is_vm_complete()) {
2579     if (_allocation_fail_alot_count > 0) {
2580       _allocation_fail_alot_count--;
2581     } else {
2582       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2583       init_allocation_fail_alot_count();
2584       return true;
2585     }
2586   }
2587   return false;
2588 }
2589 #endif
2590 
2591 // ChunkManager methods
2592 size_t ChunkManager::free_chunks_total_words() {
2593   return _free_chunks_total;
2594 }
2595 
2596 size_t ChunkManager::free_chunks_total_bytes() {
2597   return free_chunks_total_words() * BytesPerWord;
2598 }
2599 
2600 // Update internal accounting after a chunk was added
2601 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2602   assert_lock_strong(MetaspaceExpand_lock);
2603   _free_chunks_count ++;
2604   _free_chunks_total += c->word_size();
2605 }
2606 
2607 // Update internal accounting after a chunk was removed
2608 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2609   assert_lock_strong(MetaspaceExpand_lock);
2610   assert(_free_chunks_count >= 1,
2611     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2612   assert(_free_chunks_total >= c->word_size(),
2613     "ChunkManager::_free_chunks_total: about to go negative"
2614      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2615   _free_chunks_count --;
2616   _free_chunks_total -= c->word_size();
2617 }
2618 
2619 size_t ChunkManager::free_chunks_count() {
2620 #ifdef ASSERT
2621   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2622     MutexLockerEx cl(MetaspaceExpand_lock,
2623                      Mutex::_no_safepoint_check_flag);
2624     // This lock is only needed in debug because the verification
2625     // of the _free_chunks_totals walks the list of free chunks
2626     slow_locked_verify_free_chunks_count();
2627   }
2628 #endif
2629   return _free_chunks_count;
2630 }
2631 
2632 ChunkIndex ChunkManager::list_index(size_t size) {
2633   return get_chunk_type_by_size(size, is_class());
2634 }
2635 
2636 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2637   index_bounds_check(index);
2638   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2639   return get_size_for_nonhumongous_chunktype(index, is_class());
2640 }
2641 
2642 void ChunkManager::locked_verify_free_chunks_total() {
2643   assert_lock_strong(MetaspaceExpand_lock);
2644   assert(sum_free_chunks() == _free_chunks_total,
2645          "_free_chunks_total " SIZE_FORMAT " is not the"
2646          " same as sum " SIZE_FORMAT, _free_chunks_total,
2647          sum_free_chunks());
2648 }
2649 
2650 void ChunkManager::verify_free_chunks_total() {
2651   MutexLockerEx cl(MetaspaceExpand_lock,
2652                      Mutex::_no_safepoint_check_flag);
2653   locked_verify_free_chunks_total();
2654 }
2655 
2656 void ChunkManager::locked_verify_free_chunks_count() {
2657   assert_lock_strong(MetaspaceExpand_lock);
2658   assert(sum_free_chunks_count() == _free_chunks_count,
2659          "_free_chunks_count " SIZE_FORMAT " is not the"
2660          " same as sum " SIZE_FORMAT, _free_chunks_count,
2661          sum_free_chunks_count());
2662 }
2663 
2664 void ChunkManager::verify_free_chunks_count() {
2665 #ifdef ASSERT
2666   MutexLockerEx cl(MetaspaceExpand_lock,
2667                      Mutex::_no_safepoint_check_flag);
2668   locked_verify_free_chunks_count();
2669 #endif
2670 }
2671 
2672 void ChunkManager::verify() {
2673   MutexLockerEx cl(MetaspaceExpand_lock,
2674                      Mutex::_no_safepoint_check_flag);
2675   locked_verify();
2676 }
2677 
2678 void ChunkManager::locked_verify() {
2679   locked_verify_free_chunks_count();
2680   locked_verify_free_chunks_total();
2681   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2682     ChunkList* list = free_chunks(i);
2683     if (list != NULL) {
2684       Metachunk* chunk = list->head();
2685       while (chunk) {
2686         DEBUG_ONLY(do_verify_chunk(chunk);)
2687         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2688         chunk = chunk->next();
2689       }
2690     }
2691   }
2692 }
2693 
2694 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2695   assert_lock_strong(MetaspaceExpand_lock);
2696   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2697                 _free_chunks_total, _free_chunks_count);
2698 }
2699 
2700 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2701   assert_lock_strong(MetaspaceExpand_lock);
2702   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2703                 sum_free_chunks(), sum_free_chunks_count());
2704 }
2705 
2706 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2707   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2708          "Bad index: %d", (int)index);
2709 
2710   return &_free_chunks[index];
2711 }
2712 
2713 // These methods that sum the free chunk lists are used in printing
2714 // methods that are used in product builds.
2715 size_t ChunkManager::sum_free_chunks() {
2716   assert_lock_strong(MetaspaceExpand_lock);
2717   size_t result = 0;
2718   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2719     ChunkList* list = free_chunks(i);
2720 
2721     if (list == NULL) {
2722       continue;
2723     }
2724 
2725     result = result + list->count() * list->size();
2726   }
2727   result = result + humongous_dictionary()->total_size();
2728   return result;
2729 }
2730 
2731 size_t ChunkManager::sum_free_chunks_count() {
2732   assert_lock_strong(MetaspaceExpand_lock);
2733   size_t count = 0;
2734   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2735     ChunkList* list = free_chunks(i);
2736     if (list == NULL) {
2737       continue;
2738     }
2739     count = count + list->count();
2740   }
2741   count = count + humongous_dictionary()->total_free_blocks();
2742   return count;
2743 }
2744 
2745 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2746   ChunkIndex index = list_index(word_size);
2747   assert(index < HumongousIndex, "No humongous list");
2748   return free_chunks(index);
2749 }
2750 
2751 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2752 // split up the larger chunk into n smaller chunks, at least one of which should be
2753 // the target chunk of target chunk size. The smaller chunks, including the target
2754 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2755 // Note that this chunk is supposed to be removed from the freelist right away.
2756 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2757   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2758 
2759   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2760   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2761 
2762   MetaWord* const region_start = (MetaWord*)larger_chunk;
2763   const size_t region_word_len = larger_chunk->word_size();
2764   MetaWord* const region_end = region_start + region_word_len;
2765   VirtualSpaceNode* const vsn = larger_chunk->container();
2766   OccupancyMap* const ocmap = vsn->occupancy_map();
2767 
2768   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2769   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2770   // at an address suitable to place the smaller target chunk.
2771   assert_is_aligned(region_start, target_chunk_word_size);
2772 
2773   // Remove old chunk.
2774   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2775   larger_chunk->remove_sentinel();
2776 
2777   // Prevent access to the old chunk from here on.
2778   larger_chunk = NULL;
2779   // ... and wipe it.
2780   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2781 
2782   // In its place create first the target chunk...
2783   MetaWord* p = region_start;
2784   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2785   assert(target_chunk == (Metachunk*)p, "Sanity");
2786   target_chunk->set_origin(origin_split);
2787 
2788   // Note: we do not need to mark its start in the occupancy map
2789   // because it coincides with the old chunk start.
2790 
2791   // Mark chunk as free and return to the freelist.
2792   do_update_in_use_info_for_chunk(target_chunk, false);
2793   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2794 
2795   // This chunk should now be valid and can be verified.
2796   DEBUG_ONLY(do_verify_chunk(target_chunk));
2797 
2798   // In the remaining space create the remainder chunks.
2799   p += target_chunk->word_size();
2800   assert(p < region_end, "Sanity");
2801 
2802   while (p < region_end) {
2803 
2804     // Find the largest chunk size which fits the alignment requirements at address p.
2805     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2806     size_t this_chunk_word_size = 0;
2807     for(;;) {
2808       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2809       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2810         break;
2811       } else {
2812         this_chunk_index = prev_chunk_index(this_chunk_index);
2813         assert(this_chunk_index >= target_chunk_index, "Sanity");
2814       }
2815     }
2816 
2817     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2818     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2819     assert(p + this_chunk_word_size <= region_end, "Sanity");
2820 
2821     // Create splitting chunk.
2822     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2823     assert(this_chunk == (Metachunk*)p, "Sanity");
2824     this_chunk->set_origin(origin_split);
2825     ocmap->set_chunk_starts_at_address(p, true);
2826     do_update_in_use_info_for_chunk(this_chunk, false);
2827 
2828     // This chunk should be valid and can be verified.
2829     DEBUG_ONLY(do_verify_chunk(this_chunk));
2830 
2831     // Return this chunk to freelist and correct counter.
2832     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2833     _free_chunks_count ++;
2834 
2835     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2836       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2837       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2838       p2i(region_start), p2i(region_end));
2839 
2840     p += this_chunk_word_size;
2841 
2842   }
2843 
2844   return target_chunk;
2845 }
2846 
2847 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2848   assert_lock_strong(MetaspaceExpand_lock);
2849 
2850   slow_locked_verify();
2851 
2852   Metachunk* chunk = NULL;
2853   bool we_did_split_a_chunk = false;
2854 
2855   if (list_index(word_size) != HumongousIndex) {
2856 
2857     ChunkList* free_list = find_free_chunks_list(word_size);
2858     assert(free_list != NULL, "Sanity check");
2859 
2860     chunk = free_list->head();
2861 
2862     if (chunk == NULL) {
2863       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2864       // This is the counterpart of the coalescing-upon-chunk-return.
2865 
2866       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2867 
2868       // Is there a larger chunk we could split?
2869       Metachunk* larger_chunk = NULL;
2870       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2871       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2872         larger_chunk = free_chunks(larger_chunk_index)->head();
2873         if (larger_chunk == NULL) {
2874           larger_chunk_index = next_chunk_index(larger_chunk_index);
2875         }
2876       }
2877 
2878       if (larger_chunk != NULL) {
2879         assert(larger_chunk->word_size() > word_size, "Sanity");
2880         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2881 
2882         // We found a larger chunk. Lets split it up:
2883         // - remove old chunk
2884         // - in its place, create new smaller chunks, with at least one chunk
2885         //   being of target size, the others sized as large as possible. This
2886         //   is to make sure the resulting chunks are "as coalesced as possible"
2887         //   (similar to VirtualSpaceNode::retire()).
2888         // Note: during this operation both ChunkManager and VirtualSpaceNode
2889         //  are temporarily invalid, so be careful with asserts.
2890 
2891         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2892            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2893           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2894           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2895 
2896         chunk = split_chunk(word_size, larger_chunk);
2897 
2898         // This should have worked.
2899         assert(chunk != NULL, "Sanity");
2900         assert(chunk->word_size() == word_size, "Sanity");
2901         assert(chunk->is_tagged_free(), "Sanity");
2902 
2903         we_did_split_a_chunk = true;
2904 
2905       }
2906     }
2907 
2908     if (chunk == NULL) {
2909       return NULL;
2910     }
2911 
2912     // Remove the chunk as the head of the list.
2913     free_list->remove_chunk(chunk);
2914 
2915     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2916                                        p2i(free_list), free_list->count());
2917 
2918   } else {
2919     chunk = humongous_dictionary()->get_chunk(word_size);
2920 
2921     if (chunk == NULL) {
2922       return NULL;
2923     }
2924 
2925     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2926                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2927   }
2928 
2929   // Chunk has been removed from the chunk manager; update counters.
2930   account_for_removed_chunk(chunk);
2931   do_update_in_use_info_for_chunk(chunk, true);
2932   chunk->container()->inc_container_count();
2933   chunk->inc_use_count();
2934 
2935   // Remove it from the links to this freelist
2936   chunk->set_next(NULL);
2937   chunk->set_prev(NULL);
2938 
2939   // Run some verifications (some more if we did a chunk split)
2940 #ifdef ASSERT
2941   if (VerifyMetaspace) {
2942     locked_verify();
2943     VirtualSpaceNode* const vsn = chunk->container();
2944     vsn->verify();
2945     if (we_did_split_a_chunk) {
2946       vsn->verify_free_chunks_are_ideally_merged();
2947     }
2948   }
2949 #endif
2950 
2951   return chunk;
2952 }
2953 
2954 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2955   assert_lock_strong(MetaspaceExpand_lock);
2956   slow_locked_verify();
2957 
2958   // Take from the beginning of the list
2959   Metachunk* chunk = free_chunks_get(word_size);
2960   if (chunk == NULL) {
2961     return NULL;
2962   }
2963 
2964   assert((word_size <= chunk->word_size()) ||
2965          (list_index(chunk->word_size()) == HumongousIndex),
2966          "Non-humongous variable sized chunk");
2967   LogTarget(Debug, gc, metaspace, freelist) lt;
2968   if (lt.is_enabled()) {
2969     size_t list_count;
2970     if (list_index(word_size) < HumongousIndex) {
2971       ChunkList* list = find_free_chunks_list(word_size);
2972       list_count = list->count();
2973     } else {
2974       list_count = humongous_dictionary()->total_count();
2975     }
2976     LogStream ls(lt);
2977     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2978              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2979     ResourceMark rm;
2980     locked_print_free_chunks(&ls);
2981   }
2982 
2983   return chunk;
2984 }
2985 
2986 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2987   assert_lock_strong(MetaspaceExpand_lock);
2988   DEBUG_ONLY(do_verify_chunk(chunk);)
2989   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
2990   assert(chunk != NULL, "Expected chunk.");
2991   assert(chunk->container() != NULL, "Container should have been set.");
2992   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2993   index_bounds_check(index);
2994 
2995   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2996   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2997   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2998   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
2999 
3000   if (index != HumongousIndex) {
3001     // Return non-humongous chunk to freelist.
3002     ChunkList* list = free_chunks(index);
3003     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3004     list->return_chunk_at_head(chunk);
3005     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3006         chunk_size_name(index), p2i(chunk));
3007   } else {
3008     // Return humongous chunk to dictionary.
3009     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3010     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3011            "Humongous chunk has wrong alignment.");
3012     _humongous_dictionary.return_chunk(chunk);
3013     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3014         chunk_size_name(index), p2i(chunk), chunk->word_size());
3015   }
3016   chunk->container()->dec_container_count();
3017   do_update_in_use_info_for_chunk(chunk, false);
3018 
3019   // Chunk has been added; update counters.
3020   account_for_added_chunk(chunk);
3021 
3022   // Attempt coalesce returned chunks with its neighboring chunks:
3023   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3024   if (index == SmallIndex || index == SpecializedIndex) {
3025     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3026       // This did not work. But if this chunk is special, we still may form a small chunk?
3027       if (index == SpecializedIndex) {
3028         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3029           // give up.
3030         }
3031       }
3032     }
3033   }
3034 
3035 }
3036 
3037 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3038   index_bounds_check(index);
3039   if (chunks == NULL) {
3040     return;
3041   }
3042   LogTarget(Trace, gc, metaspace, freelist) log;
3043   if (log.is_enabled()) { // tracing
3044     log.print("returning list of %s chunks...", chunk_size_name(index));
3045   }
3046   unsigned num_chunks_returned = 0;
3047   size_t size_chunks_returned = 0;
3048   Metachunk* cur = chunks;
3049   while (cur != NULL) {
3050     // Capture the next link before it is changed
3051     // by the call to return_chunk_at_head();
3052     Metachunk* next = cur->next();
3053     if (log.is_enabled()) { // tracing
3054       num_chunks_returned ++;
3055       size_chunks_returned += cur->word_size();
3056     }
3057     return_single_chunk(index, cur);
3058     cur = next;
3059   }
3060   if (log.is_enabled()) { // tracing
3061     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3062         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3063     if (index != HumongousIndex) {
3064       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3065     } else {
3066       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3067     }
3068   }
3069 }
3070 
3071 void ChunkManager::print_on(outputStream* out) const {
3072   _humongous_dictionary.report_statistics(out);
3073 }
3074 
3075 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3076   assert_lock_strong(MetaspaceExpand_lock);
3077   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3078     stat->num_by_type[i] = num_free_chunks(i);
3079     stat->single_size_by_type[i] = size_by_index(i);
3080     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3081   }
3082   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3083   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3084 }
3085 
3086 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3087   MutexLockerEx cl(MetaspaceExpand_lock,
3088                    Mutex::_no_safepoint_check_flag);
3089   locked_get_statistics(stat);
3090 }
3091 
3092 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3093   size_t total = 0;
3094   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3095 
3096   const char* unit = scale_unit(scale);
3097   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3098     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3099                    stat->num_by_type[i], chunk_size_name(i),
3100                    stat->single_size_by_type[i]);
3101     if (scale == 1) {
3102       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3103     } else {
3104       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3105     }
3106 
3107     total += stat->total_size_by_type[i];
3108   }
3109 
3110 
3111   total += stat->total_size_humongous_chunks;
3112 
3113   if (scale == 1) {
3114     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3115     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3116 
3117     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3118   } else {
3119     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3120     stat->num_humongous_chunks,
3121     (float)stat->total_size_humongous_chunks / scale, unit);
3122 
3123     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3124   }
3125 
3126 }
3127 
3128 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3129   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3130 
3131   // Note: keep lock protection only to retrieving statistics; keep printing
3132   // out of lock protection
3133   ChunkManagerStatistics stat;
3134   out->print_cr("Chunkmanager (non-class):");
3135   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3136   if (non_class_cm != NULL) {
3137     non_class_cm->get_statistics(&stat);
3138     ChunkManager::print_statistics(&stat, out, scale);
3139   } else {
3140     out->print_cr("unavailable.");
3141   }
3142   out->print_cr("Chunkmanager (class):");
3143   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3144   if (class_cm != NULL) {
3145     class_cm->get_statistics(&stat);
3146     ChunkManager::print_statistics(&stat, out, scale);
3147   } else {
3148     out->print_cr("unavailable.");
3149   }
3150 }
3151 
3152 // SpaceManager methods
3153 
3154 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3155   size_t chunk_sizes[] = {
3156       specialized_chunk_size(is_class_space),
3157       small_chunk_size(is_class_space),
3158       medium_chunk_size(is_class_space)
3159   };
3160 
3161   // Adjust up to one of the fixed chunk sizes ...
3162   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3163     if (requested <= chunk_sizes[i]) {
3164       return chunk_sizes[i];
3165     }
3166   }
3167 
3168   // ... or return the size as a humongous chunk.
3169   return requested;
3170 }
3171 
3172 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3173   return adjust_initial_chunk_size(requested, is_class());
3174 }
3175 
3176 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3177   size_t requested;
3178 
3179   if (is_class()) {
3180     switch (type) {
3181     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3182     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3183     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3184     default:                                 requested = ClassSmallChunk; break;
3185     }
3186   } else {
3187     switch (type) {
3188     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3189     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3190     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3191     default:                                 requested = SmallChunk; break;
3192     }
3193   }
3194 
3195   // Adjust to one of the fixed chunk sizes (unless humongous)
3196   const size_t adjusted = adjust_initial_chunk_size(requested);
3197 
3198   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3199          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3200 
3201   return adjusted;
3202 }
3203 
3204 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3205   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3206   size_t free = 0;
3207   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3208     Metachunk* chunk = chunks_in_use(i);
3209     while (chunk != NULL) {
3210       free += chunk->free_word_size();
3211       chunk = chunk->next();
3212     }
3213   }
3214   return free;
3215 }
3216 
3217 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3218   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3219   size_t result = 0;
3220   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3221    result += sum_waste_in_chunks_in_use(i);
3222   }
3223 
3224   return result;
3225 }
3226 
3227 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3228   size_t result = 0;
3229   Metachunk* chunk = chunks_in_use(index);
3230   // Count the free space in all the chunk but not the
3231   // current chunk from which allocations are still being done.
3232   while (chunk != NULL) {
3233     if (chunk != current_chunk()) {
3234       result += chunk->free_word_size();
3235     }
3236     chunk = chunk->next();
3237   }
3238   return result;
3239 }
3240 
3241 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3242   // For CMS use "allocated_chunks_words()" which does not need the
3243   // Metaspace lock.  For the other collectors sum over the
3244   // lists.  Use both methods as a check that "allocated_chunks_words()"
3245   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3246   // to use in the product and allocated_chunks_words() should be used
3247   // but allow for  checking that allocated_chunks_words() returns the same
3248   // value as sum_capacity_in_chunks_in_use() which is the definitive
3249   // answer.
3250   if (UseConcMarkSweepGC) {
3251     return allocated_chunks_words();
3252   } else {
3253     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3254     size_t sum = 0;
3255     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3256       Metachunk* chunk = chunks_in_use(i);
3257       while (chunk != NULL) {
3258         sum += chunk->word_size();
3259         chunk = chunk->next();
3260       }
3261     }
3262   return sum;
3263   }
3264 }
3265 
3266 size_t SpaceManager::sum_count_in_chunks_in_use() {
3267   size_t count = 0;
3268   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3269     count = count + sum_count_in_chunks_in_use(i);
3270   }
3271 
3272   return count;
3273 }
3274 
3275 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3276   size_t count = 0;
3277   Metachunk* chunk = chunks_in_use(i);
3278   while (chunk != NULL) {
3279     count++;
3280     chunk = chunk->next();
3281   }
3282   return count;
3283 }
3284 
3285 
3286 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3287   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3288   size_t used = 0;
3289   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3290     Metachunk* chunk = chunks_in_use(i);
3291     while (chunk != NULL) {
3292       used += chunk->used_word_size();
3293       chunk = chunk->next();
3294     }
3295   }
3296   return used;
3297 }
3298 
3299 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3300 
3301   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302     Metachunk* chunk = chunks_in_use(i);
3303     st->print("SpaceManager: %s " PTR_FORMAT,
3304                  chunk_size_name(i), p2i(chunk));
3305     if (chunk != NULL) {
3306       st->print_cr(" free " SIZE_FORMAT,
3307                    chunk->free_word_size());
3308     } else {
3309       st->cr();
3310     }
3311   }
3312 
3313   chunk_manager()->locked_print_free_chunks(st);
3314   chunk_manager()->locked_print_sum_free_chunks(st);
3315 }
3316 
3317 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3318 
3319   // Decide between a small chunk and a medium chunk.  Up to
3320   // _small_chunk_limit small chunks can be allocated.
3321   // After that a medium chunk is preferred.
3322   size_t chunk_word_size;
3323 
3324   // Special case for anonymous metadata space.
3325   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3326   // rarely about 4K (64-bits JVM).
3327   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3328   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3329   // reduces space waste from 60+% to around 30%.
3330   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3331       _mdtype == Metaspace::NonClassType &&
3332       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3333       word_size + Metachunk::overhead() <= SpecializedChunk) {
3334     return SpecializedChunk;
3335   }
3336 
3337   if (chunks_in_use(MediumIndex) == NULL &&
3338       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3339     chunk_word_size = (size_t) small_chunk_size();
3340     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3341       chunk_word_size = medium_chunk_size();
3342     }
3343   } else {
3344     chunk_word_size = medium_chunk_size();
3345   }
3346 
3347   // Might still need a humongous chunk.  Enforce
3348   // humongous allocations sizes to be aligned up to
3349   // the smallest chunk size.
3350   size_t if_humongous_sized_chunk =
3351     align_up(word_size + Metachunk::overhead(),
3352                   smallest_chunk_size());
3353   chunk_word_size =
3354     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3355 
3356   assert(!SpaceManager::is_humongous(word_size) ||
3357          chunk_word_size == if_humongous_sized_chunk,
3358          "Size calculation is wrong, word_size " SIZE_FORMAT
3359          " chunk_word_size " SIZE_FORMAT,
3360          word_size, chunk_word_size);
3361   Log(gc, metaspace, alloc) log;
3362   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3363     log.debug("Metadata humongous allocation:");
3364     log.debug("  word_size " PTR_FORMAT, word_size);
3365     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3366     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3367   }
3368   return chunk_word_size;
3369 }
3370 
3371 void SpaceManager::track_metaspace_memory_usage() {
3372   if (is_init_completed()) {
3373     if (is_class()) {
3374       MemoryService::track_compressed_class_memory_usage();
3375     }
3376     MemoryService::track_metaspace_memory_usage();
3377   }
3378 }
3379 
3380 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3381   assert(vs_list()->current_virtual_space() != NULL,
3382          "Should have been set");
3383   assert(current_chunk() == NULL ||
3384          current_chunk()->allocate(word_size) == NULL,
3385          "Don't need to expand");
3386   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3387 
3388   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3389     size_t words_left = 0;
3390     size_t words_used = 0;
3391     if (current_chunk() != NULL) {
3392       words_left = current_chunk()->free_word_size();
3393       words_used = current_chunk()->used_word_size();
3394     }
3395     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3396                                        word_size, words_used, words_left);
3397   }
3398 
3399   // Get another chunk
3400   size_t chunk_word_size = calc_chunk_size(word_size);
3401   Metachunk* next = get_new_chunk(chunk_word_size);
3402 
3403   MetaWord* mem = NULL;
3404 
3405   // If a chunk was available, add it to the in-use chunk list
3406   // and do an allocation from it.
3407   if (next != NULL) {
3408     // Add to this manager's list of chunks in use.
3409     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3410     // case it usually makes no sense to make it the current chunk, since the next allocation would
3411     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3412     // good chunk which could be used for more normal allocations.
3413     bool make_current_chunk = true;
3414     if (next->get_chunk_type() == HumongousIndex &&
3415         current_chunk() != NULL) {
3416       make_current_chunk = false;
3417     }
3418     add_chunk(next, make_current_chunk);
3419     mem = next->allocate(word_size);
3420   }
3421 
3422   // Track metaspace memory usage statistic.
3423   track_metaspace_memory_usage();
3424 
3425   return mem;
3426 }
3427 
3428 void SpaceManager::print_on(outputStream* st) const {
3429 
3430   for (ChunkIndex i = ZeroIndex;
3431        i < NumberOfInUseLists ;
3432        i = next_chunk_index(i) ) {
3433     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3434                  p2i(chunks_in_use(i)),
3435                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3436   }
3437   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3438                " Humongous " SIZE_FORMAT,
3439                sum_waste_in_chunks_in_use(SmallIndex),
3440                sum_waste_in_chunks_in_use(MediumIndex),
3441                sum_waste_in_chunks_in_use(HumongousIndex));
3442   // block free lists
3443   if (block_freelists() != NULL) {
3444     st->print_cr("total in block free lists " SIZE_FORMAT,
3445       block_freelists()->total_size());
3446   }
3447 }
3448 
3449 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3450                            Metaspace::MetaspaceType space_type,
3451                            Mutex* lock) :
3452   _mdtype(mdtype),
3453   _space_type(space_type),
3454   _allocated_blocks_words(0),
3455   _allocated_chunks_words(0),
3456   _allocated_chunks_count(0),
3457   _block_freelists(NULL),
3458   _lock(lock)
3459 {
3460   initialize();
3461 }
3462 
3463 void SpaceManager::inc_size_metrics(size_t words) {
3464   assert_lock_strong(MetaspaceExpand_lock);
3465   // Total of allocated Metachunks and allocated Metachunks count
3466   // for each SpaceManager
3467   _allocated_chunks_words = _allocated_chunks_words + words;
3468   _allocated_chunks_count++;
3469   // Global total of capacity in allocated Metachunks
3470   MetaspaceUtils::inc_capacity(mdtype(), words);
3471   // Global total of allocated Metablocks.
3472   // used_words_slow() includes the overhead in each
3473   // Metachunk so include it in the used when the
3474   // Metachunk is first added (so only added once per
3475   // Metachunk).
3476   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3477 }
3478 
3479 void SpaceManager::inc_used_metrics(size_t words) {
3480   // Add to the per SpaceManager total
3481   Atomic::add(words, &_allocated_blocks_words);
3482   // Add to the global total
3483   MetaspaceUtils::inc_used(mdtype(), words);
3484 }
3485 
3486 void SpaceManager::dec_total_from_size_metrics() {
3487   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3488   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3489   // Also deduct the overhead per Metachunk
3490   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3491 }
3492 
3493 void SpaceManager::initialize() {
3494   Metadebug::init_allocation_fail_alot_count();
3495   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3496     _chunks_in_use[i] = NULL;
3497   }
3498   _current_chunk = NULL;
3499   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3500 }
3501 
3502 SpaceManager::~SpaceManager() {
3503   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3504   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3505          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3506          " allocated_chunks_words() " SIZE_FORMAT,
3507          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3508 
3509   MutexLockerEx fcl(MetaspaceExpand_lock,
3510                     Mutex::_no_safepoint_check_flag);
3511 
3512   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3513          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3514          " allocated_chunks_count() " SIZE_FORMAT,
3515          sum_count_in_chunks_in_use(), allocated_chunks_count());
3516 
3517   chunk_manager()->slow_locked_verify();
3518 
3519   dec_total_from_size_metrics();
3520 
3521   Log(gc, metaspace, freelist) log;
3522   if (log.is_trace()) {
3523     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3524     ResourceMark rm;
3525     LogStream ls(log.trace());
3526     locked_print_chunks_in_use_on(&ls);
3527     if (block_freelists() != NULL) {
3528       block_freelists()->print_on(&ls);
3529     }
3530   }
3531 
3532   // Add all the chunks in use by this space manager
3533   // to the global list of free chunks.
3534 
3535   // Follow each list of chunks-in-use and add them to the
3536   // free lists.  Each list is NULL terminated.
3537 
3538   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3539     Metachunk* chunks = chunks_in_use(i);
3540     chunk_manager()->return_chunk_list(i, chunks);
3541     set_chunks_in_use(i, NULL);
3542   }
3543 
3544   chunk_manager()->slow_locked_verify();
3545 
3546   if (_block_freelists != NULL) {
3547     delete _block_freelists;
3548   }
3549 }
3550 
3551 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3552   assert_lock_strong(_lock);
3553   // Allocations and deallocations are in raw_word_size
3554   size_t raw_word_size = get_allocation_word_size(word_size);
3555   // Lazily create a block_freelist
3556   if (block_freelists() == NULL) {
3557     _block_freelists = new BlockFreelist();
3558   }
3559   block_freelists()->return_block(p, raw_word_size);
3560 }
3561 
3562 // Adds a chunk to the list of chunks in use.
3563 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3564 
3565   assert(new_chunk != NULL, "Should not be NULL");
3566   assert(new_chunk->next() == NULL, "Should not be on a list");
3567 
3568   new_chunk->reset_empty();
3569 
3570   // Find the correct list and and set the current
3571   // chunk for that list.
3572   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3573 
3574   if (make_current) {
3575     // If we are to make the chunk current, retire the old current chunk and replace
3576     // it with the new chunk.
3577     retire_current_chunk();
3578     set_current_chunk(new_chunk);
3579   }
3580 
3581   // Add the new chunk at the head of its respective chunk list.
3582   new_chunk->set_next(chunks_in_use(index));
3583   set_chunks_in_use(index, new_chunk);
3584 
3585   // Add to the running sum of capacity
3586   inc_size_metrics(new_chunk->word_size());
3587 
3588   assert(new_chunk->is_empty(), "Not ready for reuse");
3589   Log(gc, metaspace, freelist) log;
3590   if (log.is_trace()) {
3591     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3592     ResourceMark rm;
3593     LogStream ls(log.trace());
3594     new_chunk->print_on(&ls);
3595     chunk_manager()->locked_print_free_chunks(&ls);
3596   }
3597 }
3598 
3599 void SpaceManager::retire_current_chunk() {
3600   if (current_chunk() != NULL) {
3601     size_t remaining_words = current_chunk()->free_word_size();
3602     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3603       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3604       deallocate(ptr, remaining_words);
3605       inc_used_metrics(remaining_words);
3606     }
3607   }
3608 }
3609 
3610 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3611   // Get a chunk from the chunk freelist
3612   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3613 
3614   if (next == NULL) {
3615     next = vs_list()->get_new_chunk(chunk_word_size,
3616                                     medium_chunk_bunch());
3617   }
3618 
3619   Log(gc, metaspace, alloc) log;
3620   if (log.is_debug() && next != NULL &&
3621       SpaceManager::is_humongous(next->word_size())) {
3622     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3623   }
3624 
3625   return next;
3626 }
3627 
3628 MetaWord* SpaceManager::allocate(size_t word_size) {
3629   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3630   size_t raw_word_size = get_allocation_word_size(word_size);
3631   BlockFreelist* fl =  block_freelists();
3632   MetaWord* p = NULL;
3633   // Allocation from the dictionary is expensive in the sense that
3634   // the dictionary has to be searched for a size.  Don't allocate
3635   // from the dictionary until it starts to get fat.  Is this
3636   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3637   // for allocations.  Do some profiling.  JJJ
3638   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3639     p = fl->get_block(raw_word_size);
3640   }
3641   if (p == NULL) {
3642     p = allocate_work(raw_word_size);
3643   }
3644 
3645   return p;
3646 }
3647 
3648 // Returns the address of spaced allocated for "word_size".
3649 // This methods does not know about blocks (Metablocks)
3650 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3651   assert_lock_strong(_lock);
3652 #ifdef ASSERT
3653   if (Metadebug::test_metadata_failure()) {
3654     return NULL;
3655   }
3656 #endif
3657   // Is there space in the current chunk?
3658   MetaWord* result = NULL;
3659 
3660   if (current_chunk() != NULL) {
3661     result = current_chunk()->allocate(word_size);
3662   }
3663 
3664   if (result == NULL) {
3665     result = grow_and_allocate(word_size);
3666   }
3667 
3668   if (result != NULL) {
3669     inc_used_metrics(word_size);
3670     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3671            "Head of the list is being allocated");
3672   }
3673 
3674   return result;
3675 }
3676 
3677 void SpaceManager::verify() {
3678   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3679     Metachunk* curr = chunks_in_use(i);
3680     while (curr != NULL) {
3681       DEBUG_ONLY(do_verify_chunk(curr);)
3682       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3683       curr = curr->next();
3684     }
3685   }
3686 }
3687 
3688 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3689   assert(is_humongous(chunk->word_size()) ||
3690          chunk->word_size() == medium_chunk_size() ||
3691          chunk->word_size() == small_chunk_size() ||
3692          chunk->word_size() == specialized_chunk_size(),
3693          "Chunk size is wrong");
3694   return;
3695 }
3696 
3697 #ifdef ASSERT
3698 void SpaceManager::verify_allocated_blocks_words() {
3699   // Verification is only guaranteed at a safepoint.
3700   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3701     "Verification can fail if the applications is running");
3702   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3703          "allocation total is not consistent " SIZE_FORMAT
3704          " vs " SIZE_FORMAT,
3705          allocated_blocks_words(), sum_used_in_chunks_in_use());
3706 }
3707 
3708 #endif
3709 
3710 void SpaceManager::dump(outputStream* const out) const {
3711   size_t curr_total = 0;
3712   size_t waste = 0;
3713   uint i = 0;
3714   size_t used = 0;
3715   size_t capacity = 0;
3716 
3717   // Add up statistics for all chunks in this SpaceManager.
3718   for (ChunkIndex index = ZeroIndex;
3719        index < NumberOfInUseLists;
3720        index = next_chunk_index(index)) {
3721     for (Metachunk* curr = chunks_in_use(index);
3722          curr != NULL;
3723          curr = curr->next()) {
3724       out->print("%d) ", i++);
3725       curr->print_on(out);
3726       curr_total += curr->word_size();
3727       used += curr->used_word_size();
3728       capacity += curr->word_size();
3729       waste += curr->free_word_size() + curr->overhead();;
3730     }
3731   }
3732 
3733   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3734     if (block_freelists() != NULL) block_freelists()->print_on(out);
3735   }
3736 
3737   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3738   // Free space isn't wasted.
3739   waste -= free;
3740 
3741   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3742                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3743                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3744 }
3745 
3746 // MetaspaceUtils
3747 
3748 
3749 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3750 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3751 
3752 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3753   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3754   return list == NULL ? 0 : list->free_bytes();
3755 }
3756 
3757 size_t MetaspaceUtils::free_bytes() {
3758   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3759 }
3760 
3761 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3762   assert_lock_strong(MetaspaceExpand_lock);
3763   assert(words <= capacity_words(mdtype),
3764          "About to decrement below 0: words " SIZE_FORMAT
3765          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3766          words, mdtype, capacity_words(mdtype));
3767   _capacity_words[mdtype] -= words;
3768 }
3769 
3770 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3771   assert_lock_strong(MetaspaceExpand_lock);
3772   // Needs to be atomic
3773   _capacity_words[mdtype] += words;
3774 }
3775 
3776 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3777   assert(words <= used_words(mdtype),
3778          "About to decrement below 0: words " SIZE_FORMAT
3779          " is greater than _used_words[%u] " SIZE_FORMAT,
3780          words, mdtype, used_words(mdtype));
3781   // For CMS deallocation of the Metaspaces occurs during the
3782   // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
3783   // is not enough since allocation is on a per Metaspace basis
3784   // and protected by the Metaspace lock.
3785   Atomic::sub(words, &_used_words[mdtype]);
3786 }
3787 
3788 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3789   // _used_words tracks allocations for
3790   // each piece of metadata.  Those allocations are
3791   // generally done concurrently by different application
3792   // threads so must be done atomically.
3793   Atomic::add(words, &_used_words[mdtype]);
3794 }
3795 
3796 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3797   size_t used = 0;
3798   ClassLoaderDataGraphMetaspaceIterator iter;
3799   while (iter.repeat()) {
3800     ClassLoaderMetaspace* msp = iter.get_next();
3801     // Sum allocated_blocks_words for each metaspace
3802     if (msp != NULL) {
3803       used += msp->used_words_slow(mdtype);
3804     }
3805   }
3806   return used * BytesPerWord;
3807 }
3808 
3809 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3810   size_t free = 0;
3811   ClassLoaderDataGraphMetaspaceIterator iter;
3812   while (iter.repeat()) {
3813     ClassLoaderMetaspace* msp = iter.get_next();
3814     if (msp != NULL) {
3815       free += msp->free_words_slow(mdtype);
3816     }
3817   }
3818   return free * BytesPerWord;
3819 }
3820 
3821 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3822   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3823     return 0;
3824   }
3825   // Don't count the space in the freelists.  That space will be
3826   // added to the capacity calculation as needed.
3827   size_t capacity = 0;
3828   ClassLoaderDataGraphMetaspaceIterator iter;
3829   while (iter.repeat()) {
3830     ClassLoaderMetaspace* msp = iter.get_next();
3831     if (msp != NULL) {
3832       capacity += msp->capacity_words_slow(mdtype);
3833     }
3834   }
3835   return capacity * BytesPerWord;
3836 }
3837 
3838 size_t MetaspaceUtils::capacity_bytes_slow() {
3839 #ifdef PRODUCT
3840   // Use capacity_bytes() in PRODUCT instead of this function.
3841   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3842 #endif
3843   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3844   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3845   assert(capacity_bytes() == class_capacity + non_class_capacity,
3846          "bad accounting: capacity_bytes() " SIZE_FORMAT
3847          " class_capacity + non_class_capacity " SIZE_FORMAT
3848          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3849          capacity_bytes(), class_capacity + non_class_capacity,
3850          class_capacity, non_class_capacity);
3851 
3852   return class_capacity + non_class_capacity;
3853 }
3854 
3855 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3856   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3857   return list == NULL ? 0 : list->reserved_bytes();
3858 }
3859 
3860 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3861   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3862   return list == NULL ? 0 : list->committed_bytes();
3863 }
3864 
3865 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3866 
3867 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3868   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3869   if (chunk_manager == NULL) {
3870     return 0;
3871   }
3872   chunk_manager->slow_verify();
3873   return chunk_manager->free_chunks_total_words();
3874 }
3875 
3876 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3877   return free_chunks_total_words(mdtype) * BytesPerWord;
3878 }
3879 
3880 size_t MetaspaceUtils::free_chunks_total_words() {
3881   return free_chunks_total_words(Metaspace::ClassType) +
3882          free_chunks_total_words(Metaspace::NonClassType);
3883 }
3884 
3885 size_t MetaspaceUtils::free_chunks_total_bytes() {
3886   return free_chunks_total_words() * BytesPerWord;
3887 }
3888 
3889 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3890   return Metaspace::get_chunk_manager(mdtype) != NULL;
3891 }
3892 
3893 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3894   if (!has_chunk_free_list(mdtype)) {
3895     return MetaspaceChunkFreeListSummary();
3896   }
3897 
3898   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3899   return cm->chunk_free_list_summary();
3900 }
3901 
3902 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3903   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3904                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3905 }
3906 
3907 void MetaspaceUtils::print_on(outputStream* out) {
3908   Metaspace::MetadataType nct = Metaspace::NonClassType;
3909 
3910   out->print_cr(" Metaspace       "
3911                 "used "      SIZE_FORMAT "K, "
3912                 "capacity "  SIZE_FORMAT "K, "
3913                 "committed " SIZE_FORMAT "K, "
3914                 "reserved "  SIZE_FORMAT "K",
3915                 used_bytes()/K,
3916                 capacity_bytes()/K,
3917                 committed_bytes()/K,
3918                 reserved_bytes()/K);
3919 
3920   if (Metaspace::using_class_space()) {
3921     Metaspace::MetadataType ct = Metaspace::ClassType;
3922     out->print_cr("  class space    "
3923                   "used "      SIZE_FORMAT "K, "
3924                   "capacity "  SIZE_FORMAT "K, "
3925                   "committed " SIZE_FORMAT "K, "
3926                   "reserved "  SIZE_FORMAT "K",
3927                   used_bytes(ct)/K,
3928                   capacity_bytes(ct)/K,
3929                   committed_bytes(ct)/K,
3930                   reserved_bytes(ct)/K);
3931   }
3932 }
3933 
3934 // Print information for class space and data space separately.
3935 // This is almost the same as above.
3936 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3937   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3938   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3939   size_t used_bytes = used_bytes_slow(mdtype);
3940   size_t free_bytes = free_bytes_slow(mdtype);
3941   size_t used_and_free = used_bytes + free_bytes +
3942                            free_chunks_capacity_bytes;
3943   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3944              "K + unused in chunks " SIZE_FORMAT "K  + "
3945              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3946              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3947              used_bytes / K,
3948              free_bytes / K,
3949              free_chunks_capacity_bytes / K,
3950              used_and_free / K,
3951              capacity_bytes / K);
3952   // Accounting can only be correct if we got the values during a safepoint
3953   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3954 }
3955 
3956 // Print total fragmentation for class metaspaces
3957 void MetaspaceUtils::print_class_waste(outputStream* out) {
3958   assert(Metaspace::using_class_space(), "class metaspace not used");
3959   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3960   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3961   ClassLoaderDataGraphMetaspaceIterator iter;
3962   while (iter.repeat()) {
3963     ClassLoaderMetaspace* msp = iter.get_next();
3964     if (msp != NULL) {
3965       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3966       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3967       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3968       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3969       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3970       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3971       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3972     }
3973   }
3974   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3975                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3976                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3977                 "large count " SIZE_FORMAT,
3978                 cls_specialized_count, cls_specialized_waste,
3979                 cls_small_count, cls_small_waste,
3980                 cls_medium_count, cls_medium_waste, cls_humongous_count);
3981 }
3982 
3983 // Print total fragmentation for data and class metaspaces separately
3984 void MetaspaceUtils::print_waste(outputStream* out) {
3985   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3986   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3987 
3988   ClassLoaderDataGraphMetaspaceIterator iter;
3989   while (iter.repeat()) {
3990     ClassLoaderMetaspace* msp = iter.get_next();
3991     if (msp != NULL) {
3992       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3993       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3994       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3995       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3996       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3997       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
3998       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3999     }
4000   }
4001   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4002   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4003                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4004                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4005                         "large count " SIZE_FORMAT,
4006              specialized_count, specialized_waste, small_count,
4007              small_waste, medium_count, medium_waste, humongous_count);
4008   if (Metaspace::using_class_space()) {
4009     print_class_waste(out);
4010   }
4011 }
4012 
4013 class MetadataStats {
4014 private:
4015   size_t _capacity;
4016   size_t _used;
4017   size_t _free;
4018   size_t _waste;
4019 
4020 public:
4021   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4022   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4023   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4024 
4025   void add(const MetadataStats& stats) {
4026     _capacity += stats.capacity();
4027     _used += stats.used();
4028     _free += stats.free();
4029     _waste += stats.waste();
4030   }
4031 
4032   size_t capacity() const { return _capacity; }
4033   size_t used() const     { return _used; }
4034   size_t free() const     { return _free; }
4035   size_t waste() const    { return _waste; }
4036 
4037   void print_on(outputStream* out, size_t scale) const;
4038 };
4039 
4040 
4041 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4042   const char* unit = scale_unit(scale);
4043   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4044     (float)capacity() / scale, unit,
4045     (float)used() / scale, unit,
4046     (float)free() / scale, unit,
4047     (float)waste() / scale, unit);
4048 }
4049 
4050 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4051 private:
4052   outputStream*  _out;
4053   size_t         _scale;
4054 
4055   size_t         _total_count;
4056   MetadataStats  _total_metadata;
4057   MetadataStats  _total_class;
4058 
4059   size_t         _total_anon_count;
4060   MetadataStats  _total_anon_metadata;
4061   MetadataStats  _total_anon_class;
4062 
4063 public:
4064   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4065   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4066 
4067   ~PrintCLDMetaspaceInfoClosure() {
4068     print_summary();
4069   }
4070 
4071   void do_cld(ClassLoaderData* cld) {
4072     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4073 
4074     if (cld->is_unloading()) return;
4075     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4076     if (msp == NULL) {
4077       return;
4078     }
4079 
4080     bool anonymous = false;
4081     if (cld->is_anonymous()) {
4082       _out->print_cr("ClassLoader: for anonymous class");
4083       anonymous = true;
4084     } else {
4085       ResourceMark rm;
4086       _out->print_cr("ClassLoader: %s", cld->loader_name());
4087     }
4088 
4089     print_metaspace(msp, anonymous);
4090     _out->cr();
4091   }
4092 
4093 private:
4094   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4095   void print_summary() const;
4096 };
4097 
4098 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4099   assert(msp != NULL, "Sanity");
4100   SpaceManager* vsm = msp->vsm();
4101   const char* unit = scale_unit(_scale);
4102 
4103   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4104   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4105   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4106   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4107 
4108   _total_count ++;
4109   MetadataStats metadata_stats(capacity, used, free, waste);
4110   _total_metadata.add(metadata_stats);
4111 
4112   if (anonymous) {
4113     _total_anon_count ++;
4114     _total_anon_metadata.add(metadata_stats);
4115   }
4116 
4117   _out->print("  Metadata   ");
4118   metadata_stats.print_on(_out, _scale);
4119 
4120   if (Metaspace::using_class_space()) {
4121     vsm = msp->class_vsm();
4122 
4123     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4124     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4125     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4126     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4127 
4128     MetadataStats class_stats(capacity, used, free, waste);
4129     _total_class.add(class_stats);
4130 
4131     if (anonymous) {
4132       _total_anon_class.add(class_stats);
4133     }
4134 
4135     _out->print("  Class data ");
4136     class_stats.print_on(_out, _scale);
4137   }
4138 }
4139 
4140 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4141   const char* unit = scale_unit(_scale);
4142   _out->cr();
4143   _out->print_cr("Summary:");
4144 
4145   MetadataStats total;
4146   total.add(_total_metadata);
4147   total.add(_total_class);
4148 
4149   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4150   total.print_on(_out, _scale);
4151 
4152   _out->print("                    Metadata ");
4153   _total_metadata.print_on(_out, _scale);
4154 
4155   if (Metaspace::using_class_space()) {
4156     _out->print("                  Class data ");
4157     _total_class.print_on(_out, _scale);
4158   }
4159   _out->cr();
4160 
4161   MetadataStats total_anon;
4162   total_anon.add(_total_anon_metadata);
4163   total_anon.add(_total_anon_class);
4164 
4165   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4166   total_anon.print_on(_out, _scale);
4167 
4168   _out->print("                    Metadata ");
4169   _total_anon_metadata.print_on(_out, _scale);
4170 
4171   if (Metaspace::using_class_space()) {
4172     _out->print("                  Class data ");
4173     _total_anon_class.print_on(_out, _scale);
4174   }
4175 }
4176 
4177 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4178   const char* unit = scale_unit(scale);
4179   out->print_cr("Metaspaces:");
4180   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4181     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4182     committed_bytes(Metaspace::NonClassType) / scale, unit);
4183   if (Metaspace::using_class_space()) {
4184     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4185     reserved_bytes(Metaspace::ClassType) / scale, unit,
4186     committed_bytes(Metaspace::ClassType) / scale, unit);
4187   }
4188 
4189   out->cr();
4190   ChunkManager::print_all_chunkmanagers(out, scale);
4191 
4192   out->cr();
4193   out->print_cr("Per-classloader metadata:");
4194   out->cr();
4195 
4196   PrintCLDMetaspaceInfoClosure cl(out, scale);
4197   ClassLoaderDataGraph::cld_do(&cl);
4198 }
4199 
4200 
4201 // Dump global metaspace things from the end of ClassLoaderDataGraph
4202 void MetaspaceUtils::dump(outputStream* out) {
4203   out->print_cr("All Metaspace:");
4204   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4205   out->print("class space: "); print_on(out, Metaspace::ClassType);
4206   print_waste(out);
4207 }
4208 
4209 // Prints an ASCII representation of the given space.
4210 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4211   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4212   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4213   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4214   if (vsl != NULL) {
4215     if (for_class) {
4216       if (!Metaspace::using_class_space()) {
4217         out->print_cr("No Class Space.");
4218         return;
4219       }
4220       out->print_raw("---- Metaspace Map (Class Space) ----");
4221     } else {
4222       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4223     }
4224     // Print legend:
4225     out->cr();
4226     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4227     out->cr();
4228     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4229     vsl->print_map(out);
4230     out->cr();
4231   }
4232 }
4233 
4234 void MetaspaceUtils::verify_free_chunks() {
4235   Metaspace::chunk_manager_metadata()->verify();
4236   if (Metaspace::using_class_space()) {
4237     Metaspace::chunk_manager_class()->verify();
4238   }
4239 }
4240 
4241 void MetaspaceUtils::verify_capacity() {
4242 #ifdef ASSERT
4243   size_t running_sum_capacity_bytes = capacity_bytes();
4244   // For purposes of the running sum of capacity, verify against capacity
4245   size_t capacity_in_use_bytes = capacity_bytes_slow();
4246   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4247          "capacity_words() * BytesPerWord " SIZE_FORMAT
4248          " capacity_bytes_slow()" SIZE_FORMAT,
4249          running_sum_capacity_bytes, capacity_in_use_bytes);
4250   for (Metaspace::MetadataType i = Metaspace::ClassType;
4251        i < Metaspace:: MetadataTypeCount;
4252        i = (Metaspace::MetadataType)(i + 1)) {
4253     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4254     assert(capacity_bytes(i) == capacity_in_use_bytes,
4255            "capacity_bytes(%u) " SIZE_FORMAT
4256            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4257            i, capacity_bytes(i), i, capacity_in_use_bytes);
4258   }
4259 #endif
4260 }
4261 
4262 void MetaspaceUtils::verify_used() {
4263 #ifdef ASSERT
4264   size_t running_sum_used_bytes = used_bytes();
4265   // For purposes of the running sum of used, verify against used
4266   size_t used_in_use_bytes = used_bytes_slow();
4267   assert(used_bytes() == used_in_use_bytes,
4268          "used_bytes() " SIZE_FORMAT
4269          " used_bytes_slow()" SIZE_FORMAT,
4270          used_bytes(), used_in_use_bytes);
4271   for (Metaspace::MetadataType i = Metaspace::ClassType;
4272        i < Metaspace:: MetadataTypeCount;
4273        i = (Metaspace::MetadataType)(i + 1)) {
4274     size_t used_in_use_bytes = used_bytes_slow(i);
4275     assert(used_bytes(i) == used_in_use_bytes,
4276            "used_bytes(%u) " SIZE_FORMAT
4277            " used_bytes_slow(%u)" SIZE_FORMAT,
4278            i, used_bytes(i), i, used_in_use_bytes);
4279   }
4280 #endif
4281 }
4282 
4283 void MetaspaceUtils::verify_metrics() {
4284   verify_capacity();
4285   verify_used();
4286 }
4287 
4288 
4289 // Metaspace methods
4290 
4291 size_t Metaspace::_first_chunk_word_size = 0;
4292 size_t Metaspace::_first_class_chunk_word_size = 0;
4293 
4294 size_t Metaspace::_commit_alignment = 0;
4295 size_t Metaspace::_reserve_alignment = 0;
4296 
4297 VirtualSpaceList* Metaspace::_space_list = NULL;
4298 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4299 
4300 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4301 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4302 
4303 #define VIRTUALSPACEMULTIPLIER 2
4304 
4305 #ifdef _LP64
4306 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4307 
4308 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4309   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4310   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4311   // narrow_klass_base is the lower of the metaspace base and the cds base
4312   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4313   // between the lower base and higher address.
4314   address lower_base;
4315   address higher_address;
4316 #if INCLUDE_CDS
4317   if (UseSharedSpaces) {
4318     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4319                           (address)(metaspace_base + compressed_class_space_size()));
4320     lower_base = MIN2(metaspace_base, cds_base);
4321   } else
4322 #endif
4323   {
4324     higher_address = metaspace_base + compressed_class_space_size();
4325     lower_base = metaspace_base;
4326 
4327     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4328     // If compressed class space fits in lower 32G, we don't need a base.
4329     if (higher_address <= (address)klass_encoding_max) {
4330       lower_base = 0; // Effectively lower base is zero.
4331     }
4332   }
4333 
4334   Universe::set_narrow_klass_base(lower_base);
4335 
4336   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4337   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4338   // how dump time narrow_klass_shift is set. Although, CDS can work
4339   // with zero-shift mode also, to be consistent with AOT it uses
4340   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4341   // can be used at same time as AOT code.
4342   if (!UseSharedSpaces
4343       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4344     Universe::set_narrow_klass_shift(0);
4345   } else {
4346     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4347   }
4348   AOTLoader::set_narrow_klass_shift();
4349 }
4350 
4351 #if INCLUDE_CDS
4352 // Return TRUE if the specified metaspace_base and cds_base are close enough
4353 // to work with compressed klass pointers.
4354 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4355   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4356   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4357   address lower_base = MIN2((address)metaspace_base, cds_base);
4358   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4359                                 (address)(metaspace_base + compressed_class_space_size()));
4360   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4361 }
4362 #endif
4363 
4364 // Try to allocate the metaspace at the requested addr.
4365 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4366   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4367   assert(using_class_space(), "called improperly");
4368   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4369   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4370          "Metaspace size is too big");
4371   assert_is_aligned(requested_addr, _reserve_alignment);
4372   assert_is_aligned(cds_base, _reserve_alignment);
4373   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4374 
4375   // Don't use large pages for the class space.
4376   bool large_pages = false;
4377 
4378 #if !(defined(AARCH64) || defined(AIX))
4379   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4380                                              _reserve_alignment,
4381                                              large_pages,
4382                                              requested_addr);
4383 #else // AARCH64
4384   ReservedSpace metaspace_rs;
4385 
4386   // Our compressed klass pointers may fit nicely into the lower 32
4387   // bits.
4388   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4389     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4390                                  _reserve_alignment,
4391                                  large_pages,
4392                                  requested_addr);
4393   }
4394 
4395   if (! metaspace_rs.is_reserved()) {
4396     // Aarch64: Try to align metaspace so that we can decode a compressed
4397     // klass with a single MOVK instruction.  We can do this iff the
4398     // compressed class base is a multiple of 4G.
4399     // Aix: Search for a place where we can find memory. If we need to load
4400     // the base, 4G alignment is helpful, too.
4401     size_t increment = AARCH64_ONLY(4*)G;
4402     for (char *a = align_up(requested_addr, increment);
4403          a < (char*)(1024*G);
4404          a += increment) {
4405       if (a == (char *)(32*G)) {
4406         // Go faster from here on. Zero-based is no longer possible.
4407         increment = 4*G;
4408       }
4409 
4410 #if INCLUDE_CDS
4411       if (UseSharedSpaces
4412           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4413         // We failed to find an aligned base that will reach.  Fall
4414         // back to using our requested addr.
4415         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4416                                      _reserve_alignment,
4417                                      large_pages,
4418                                      requested_addr);
4419         break;
4420       }
4421 #endif
4422 
4423       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4424                                    _reserve_alignment,
4425                                    large_pages,
4426                                    a);
4427       if (metaspace_rs.is_reserved())
4428         break;
4429     }
4430   }
4431 
4432 #endif // AARCH64
4433 
4434   if (!metaspace_rs.is_reserved()) {
4435 #if INCLUDE_CDS
4436     if (UseSharedSpaces) {
4437       size_t increment = align_up(1*G, _reserve_alignment);
4438 
4439       // Keep trying to allocate the metaspace, increasing the requested_addr
4440       // by 1GB each time, until we reach an address that will no longer allow
4441       // use of CDS with compressed klass pointers.
4442       char *addr = requested_addr;
4443       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4444              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4445         addr = addr + increment;
4446         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4447                                      _reserve_alignment, large_pages, addr);
4448       }
4449     }
4450 #endif
4451     // If no successful allocation then try to allocate the space anywhere.  If
4452     // that fails then OOM doom.  At this point we cannot try allocating the
4453     // metaspace as if UseCompressedClassPointers is off because too much
4454     // initialization has happened that depends on UseCompressedClassPointers.
4455     // So, UseCompressedClassPointers cannot be turned off at this point.
4456     if (!metaspace_rs.is_reserved()) {
4457       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4458                                    _reserve_alignment, large_pages);
4459       if (!metaspace_rs.is_reserved()) {
4460         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4461                                               compressed_class_space_size()));
4462       }
4463     }
4464   }
4465 
4466   // If we got here then the metaspace got allocated.
4467   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4468 
4469 #if INCLUDE_CDS
4470   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4471   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4472     FileMapInfo::stop_sharing_and_unmap(
4473         "Could not allocate metaspace at a compatible address");
4474   }
4475 #endif
4476   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4477                                   UseSharedSpaces ? (address)cds_base : 0);
4478 
4479   initialize_class_space(metaspace_rs);
4480 
4481   LogTarget(Trace, gc, metaspace) lt;
4482   if (lt.is_enabled()) {
4483     ResourceMark rm;
4484     LogStream ls(lt);
4485     print_compressed_class_space(&ls, requested_addr);
4486   }
4487 }
4488 
4489 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4490   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4491                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4492   if (_class_space_list != NULL) {
4493     address base = (address)_class_space_list->current_virtual_space()->bottom();
4494     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4495                  compressed_class_space_size(), p2i(base));
4496     if (requested_addr != 0) {
4497       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4498     }
4499     st->cr();
4500   }
4501 }
4502 
4503 // For UseCompressedClassPointers the class space is reserved above the top of
4504 // the Java heap.  The argument passed in is at the base of the compressed space.
4505 void Metaspace::initialize_class_space(ReservedSpace rs) {
4506   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4507   assert(rs.size() >= CompressedClassSpaceSize,
4508          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4509   assert(using_class_space(), "Must be using class space");
4510   _class_space_list = new VirtualSpaceList(rs);
4511   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4512 
4513   if (!_class_space_list->initialization_succeeded()) {
4514     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4515   }
4516 }
4517 
4518 #endif
4519 
4520 void Metaspace::ergo_initialize() {
4521   if (DumpSharedSpaces) {
4522     // Using large pages when dumping the shared archive is currently not implemented.
4523     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4524   }
4525 
4526   size_t page_size = os::vm_page_size();
4527   if (UseLargePages && UseLargePagesInMetaspace) {
4528     page_size = os::large_page_size();
4529   }
4530 
4531   _commit_alignment  = page_size;
4532   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4533 
4534   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4535   // override if MaxMetaspaceSize was set on the command line or not.
4536   // This information is needed later to conform to the specification of the
4537   // java.lang.management.MemoryUsage API.
4538   //
4539   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4540   // globals.hpp to the aligned value, but this is not possible, since the
4541   // alignment depends on other flags being parsed.
4542   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4543 
4544   if (MetaspaceSize > MaxMetaspaceSize) {
4545     MetaspaceSize = MaxMetaspaceSize;
4546   }
4547 
4548   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4549 
4550   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4551 
4552   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4553   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4554 
4555   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4556 
4557   // Initial virtual space size will be calculated at global_initialize()
4558   size_t min_metaspace_sz =
4559       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4560   if (UseCompressedClassPointers) {
4561     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4562       if (min_metaspace_sz >= MaxMetaspaceSize) {
4563         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4564       } else {
4565         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4566                       MaxMetaspaceSize - min_metaspace_sz);
4567       }
4568     }
4569   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4570     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4571                   min_metaspace_sz);
4572   }
4573 
4574   set_compressed_class_space_size(CompressedClassSpaceSize);
4575 }
4576 
4577 void Metaspace::global_initialize() {
4578   MetaspaceGC::initialize();
4579 
4580 #if INCLUDE_CDS
4581   if (DumpSharedSpaces) {
4582     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4583   } else if (UseSharedSpaces) {
4584     // If any of the archived space fails to map, UseSharedSpaces
4585     // is reset to false. Fall through to the
4586     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4587     // metaspace.
4588     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4589   }
4590 
4591   if (!DumpSharedSpaces && !UseSharedSpaces)
4592 #endif // INCLUDE_CDS
4593   {
4594 #ifdef _LP64
4595     if (using_class_space()) {
4596       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4597       allocate_metaspace_compressed_klass_ptrs(base, 0);
4598     }
4599 #endif // _LP64
4600   }
4601 
4602   // Initialize these before initializing the VirtualSpaceList
4603   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4604   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4605   // Make the first class chunk bigger than a medium chunk so it's not put
4606   // on the medium chunk list.   The next chunk will be small and progress
4607   // from there.  This size calculated by -version.
4608   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4609                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4610   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4611   // Arbitrarily set the initial virtual space to a multiple
4612   // of the boot class loader size.
4613   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4614   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4615 
4616   // Initialize the list of virtual spaces.
4617   _space_list = new VirtualSpaceList(word_size);
4618   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4619 
4620   if (!_space_list->initialization_succeeded()) {
4621     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4622   }
4623 
4624   _tracer = new MetaspaceTracer();
4625 }
4626 
4627 void Metaspace::post_initialize() {
4628   MetaspaceGC::post_initialize();
4629 }
4630 
4631 void Metaspace::verify_global_initialization() {
4632   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4633   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4634 
4635   if (using_class_space()) {
4636     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4637     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4638   }
4639 }
4640 
4641 size_t Metaspace::align_word_size_up(size_t word_size) {
4642   size_t byte_size = word_size * wordSize;
4643   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4644 }
4645 
4646 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4647                               MetaspaceObj::Type type, TRAPS) {
4648   assert(!_frozen, "sanity");
4649   if (HAS_PENDING_EXCEPTION) {
4650     assert(false, "Should not allocate with exception pending");
4651     return NULL;  // caller does a CHECK_NULL too
4652   }
4653 
4654   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4655         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4656 
4657   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4658 
4659   // Try to allocate metadata.
4660   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4661 
4662   if (result == NULL) {
4663     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4664 
4665     // Allocation failed.
4666     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4667       // Only start a GC if the bootstrapping has completed.
4668       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4669       // the VM thread.
4670 
4671       // Try to clean out some memory and retry.
4672       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4673     }
4674   }
4675 
4676   if (result == NULL) {
4677     if (DumpSharedSpaces) {
4678       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4679       // We should abort to avoid generating a potentially bad archive.
4680       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4681           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4682       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4683       vm_exit(1);
4684     }
4685     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4686   }
4687 
4688   // Zero initialize.
4689   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4690 
4691   return result;
4692 }
4693 
4694 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4695   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4696 
4697   // If result is still null, we are out of memory.
4698   Log(gc, metaspace, freelist) log;
4699   if (log.is_info()) {
4700     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4701              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4702     ResourceMark rm;
4703     if (log.is_debug()) {
4704       if (loader_data->metaspace_or_null() != NULL) {
4705         LogStream ls(log.debug());
4706         loader_data->print_value_on(&ls);
4707       }
4708     }
4709     LogStream ls(log.info());
4710     MetaspaceUtils::dump(&ls);
4711     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4712     ChunkManager::print_all_chunkmanagers(&ls);
4713   }
4714 
4715   bool out_of_compressed_class_space = false;
4716   if (is_class_space_allocation(mdtype)) {
4717     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4718     out_of_compressed_class_space =
4719       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4720       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4721       CompressedClassSpaceSize;
4722   }
4723 
4724   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4725   const char* space_string = out_of_compressed_class_space ?
4726     "Compressed class space" : "Metaspace";
4727 
4728   report_java_out_of_memory(space_string);
4729 
4730   if (JvmtiExport::should_post_resource_exhausted()) {
4731     JvmtiExport::post_resource_exhausted(
4732         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4733         space_string);
4734   }
4735 
4736   if (!is_init_completed()) {
4737     vm_exit_during_initialization("OutOfMemoryError", space_string);
4738   }
4739 
4740   if (out_of_compressed_class_space) {
4741     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4742   } else {
4743     THROW_OOP(Universe::out_of_memory_error_metaspace());
4744   }
4745 }
4746 
4747 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4748   switch (mdtype) {
4749     case Metaspace::ClassType: return "Class";
4750     case Metaspace::NonClassType: return "Metadata";
4751     default:
4752       assert(false, "Got bad mdtype: %d", (int) mdtype);
4753       return NULL;
4754   }
4755 }
4756 
4757 void Metaspace::purge(MetadataType mdtype) {
4758   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4759 }
4760 
4761 void Metaspace::purge() {
4762   MutexLockerEx cl(MetaspaceExpand_lock,
4763                    Mutex::_no_safepoint_check_flag);
4764   purge(NonClassType);
4765   if (using_class_space()) {
4766     purge(ClassType);
4767   }
4768 }
4769 
4770 bool Metaspace::contains(const void* ptr) {
4771   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4772     return true;
4773   }
4774   return contains_non_shared(ptr);
4775 }
4776 
4777 bool Metaspace::contains_non_shared(const void* ptr) {
4778   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4779      return true;
4780   }
4781 
4782   return get_space_list(NonClassType)->contains(ptr);
4783 }
4784 
4785 // ClassLoaderMetaspace
4786 
4787 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4788   initialize(lock, type);
4789 }
4790 
4791 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4792   delete _vsm;
4793   if (Metaspace::using_class_space()) {
4794     delete _class_vsm;
4795   }
4796 }
4797 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4798   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4799   if (chunk != NULL) {
4800     // Add to this manager's list of chunks in use and make it the current_chunk().
4801     get_space_manager(mdtype)->add_chunk(chunk, true);
4802   }
4803 }
4804 
4805 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4806   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4807 
4808   // Get a chunk from the chunk freelist
4809   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4810 
4811   if (chunk == NULL) {
4812     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4813                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4814   }
4815 
4816   return chunk;
4817 }
4818 
4819 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4820   Metaspace::verify_global_initialization();
4821 
4822   // Allocate SpaceManager for metadata objects.
4823   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4824 
4825   if (Metaspace::using_class_space()) {
4826     // Allocate SpaceManager for classes.
4827     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4828   }
4829 
4830   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4831 
4832   // Allocate chunk for metadata objects
4833   initialize_first_chunk(type, Metaspace::NonClassType);
4834 
4835   // Allocate chunk for class metadata objects
4836   if (Metaspace::using_class_space()) {
4837     initialize_first_chunk(type, Metaspace::ClassType);
4838   }
4839 }
4840 
4841 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4842   Metaspace::assert_not_frozen();
4843   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4844   if (Metaspace::is_class_space_allocation(mdtype)) {
4845     return  class_vsm()->allocate(word_size);
4846   } else {
4847     return  vsm()->allocate(word_size);
4848   }
4849 }
4850 
4851 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4852   Metaspace::assert_not_frozen();
4853   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4854   assert(delta_bytes > 0, "Must be");
4855 
4856   size_t before = 0;
4857   size_t after = 0;
4858   MetaWord* res;
4859   bool incremented;
4860 
4861   // Each thread increments the HWM at most once. Even if the thread fails to increment
4862   // the HWM, an allocation is still attempted. This is because another thread must then
4863   // have incremented the HWM and therefore the allocation might still succeed.
4864   do {
4865     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4866     res = allocate(word_size, mdtype);
4867   } while (!incremented && res == NULL);
4868 
4869   if (incremented) {
4870     Metaspace::tracer()->report_gc_threshold(before, after,
4871                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4872     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4873   }
4874 
4875   return res;
4876 }
4877 
4878 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4879   if (mdtype == Metaspace::ClassType) {
4880     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4881   } else {
4882     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4883   }
4884 }
4885 
4886 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4887   Metaspace::assert_not_frozen();
4888   if (mdtype == Metaspace::ClassType) {
4889     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4890   } else {
4891     return vsm()->sum_free_in_chunks_in_use();
4892   }
4893 }
4894 
4895 // Space capacity in the Metaspace.  It includes
4896 // space in the list of chunks from which allocations
4897 // have been made. Don't include space in the global freelist and
4898 // in the space available in the dictionary which
4899 // is already counted in some chunk.
4900 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4901   if (mdtype == Metaspace::ClassType) {
4902     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4903   } else {
4904     return vsm()->sum_capacity_in_chunks_in_use();
4905   }
4906 }
4907 
4908 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4909   return used_words_slow(mdtype) * BytesPerWord;
4910 }
4911 
4912 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4913   return capacity_words_slow(mdtype) * BytesPerWord;
4914 }
4915 
4916 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4917   return vsm()->allocated_blocks_bytes() +
4918       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4919 }
4920 
4921 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4922   return vsm()->allocated_chunks_bytes() +
4923       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4924 }
4925 
4926 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4927   Metaspace::assert_not_frozen();
4928   assert(!SafepointSynchronize::is_at_safepoint()
4929          || Thread::current()->is_VM_thread(), "should be the VM thread");
4930 
4931   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4932 
4933   if (is_class && Metaspace::using_class_space()) {
4934     class_vsm()->deallocate(ptr, word_size);
4935   } else {
4936     vsm()->deallocate(ptr, word_size);
4937   }
4938 }
4939 
4940 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4941   assert(Metaspace::using_class_space(), "Has to use class space");
4942   return class_vsm()->calc_chunk_size(word_size);
4943 }
4944 
4945 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4946   // Print both class virtual space counts and metaspace.
4947   if (Verbose) {
4948     vsm()->print_on(out);
4949     if (Metaspace::using_class_space()) {
4950       class_vsm()->print_on(out);
4951     }
4952   }
4953 }
4954 
4955 void ClassLoaderMetaspace::verify() {
4956   vsm()->verify();
4957   if (Metaspace::using_class_space()) {
4958     class_vsm()->verify();
4959   }
4960 }
4961 
4962 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4963   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4964   vsm()->dump(out);
4965   if (Metaspace::using_class_space()) {
4966     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4967     class_vsm()->dump(out);
4968   }
4969 }
4970 
4971 
4972 
4973 #ifdef ASSERT
4974 static void do_verify_chunk(Metachunk* chunk) {
4975   guarantee(chunk != NULL, "Sanity");
4976   // Verify chunk itself; then verify that it is consistent with the
4977   // occupany map of its containing node.
4978   chunk->verify();
4979   VirtualSpaceNode* const vsn = chunk->container();
4980   OccupancyMap* const ocmap = vsn->occupancy_map();
4981   ocmap->verify_for_chunk(chunk);
4982 }
4983 #endif
4984 
4985 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4986   chunk->set_is_tagged_free(!inuse);
4987   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4988   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4989 }
4990 
4991 /////////////// Unit tests ///////////////
4992 
4993 #ifndef PRODUCT
4994 
4995 class TestMetaspaceUtilsTest : AllStatic {
4996  public:
4997   static void test_reserved() {
4998     size_t reserved = MetaspaceUtils::reserved_bytes();
4999 
5000     assert(reserved > 0, "assert");
5001 
5002     size_t committed  = MetaspaceUtils::committed_bytes();
5003     assert(committed <= reserved, "assert");
5004 
5005     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5006     assert(reserved_metadata > 0, "assert");
5007     assert(reserved_metadata <= reserved, "assert");
5008 
5009     if (UseCompressedClassPointers) {
5010       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5011       assert(reserved_class > 0, "assert");
5012       assert(reserved_class < reserved, "assert");
5013     }
5014   }
5015 
5016   static void test_committed() {
5017     size_t committed = MetaspaceUtils::committed_bytes();
5018 
5019     assert(committed > 0, "assert");
5020 
5021     size_t reserved  = MetaspaceUtils::reserved_bytes();
5022     assert(committed <= reserved, "assert");
5023 
5024     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5025     assert(committed_metadata > 0, "assert");
5026     assert(committed_metadata <= committed, "assert");
5027 
5028     if (UseCompressedClassPointers) {
5029       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5030       assert(committed_class > 0, "assert");
5031       assert(committed_class < committed, "assert");
5032     }
5033   }
5034 
5035   static void test_virtual_space_list_large_chunk() {
5036     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5037     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5038     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5039     // vm_allocation_granularity aligned on Windows.
5040     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5041     large_size += (os::vm_page_size()/BytesPerWord);
5042     vs_list->get_new_chunk(large_size, 0);
5043   }
5044 
5045   static void test() {
5046     test_reserved();
5047     test_committed();
5048     test_virtual_space_list_large_chunk();
5049   }
5050 };
5051 
5052 void TestMetaspaceUtils_test() {
5053   TestMetaspaceUtilsTest::test();
5054 }
5055 
5056 class TestVirtualSpaceNodeTest {
5057   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5058                                           size_t& num_small_chunks,
5059                                           size_t& num_specialized_chunks) {
5060     num_medium_chunks = words_left / MediumChunk;
5061     words_left = words_left % MediumChunk;
5062 
5063     num_small_chunks = words_left / SmallChunk;
5064     words_left = words_left % SmallChunk;
5065     // how many specialized chunks can we get?
5066     num_specialized_chunks = words_left / SpecializedChunk;
5067     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5068   }
5069 
5070  public:
5071   static void test() {
5072     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5073     const size_t vsn_test_size_words = MediumChunk  * 4;
5074     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5075 
5076     // The chunk sizes must be multiples of eachother, or this will fail
5077     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5078     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5079 
5080     { // No committed memory in VSN
5081       ChunkManager cm(false);
5082       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5083       vsn.initialize();
5084       vsn.retire(&cm);
5085       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5086     }
5087 
5088     { // All of VSN is committed, half is used by chunks
5089       ChunkManager cm(false);
5090       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5091       vsn.initialize();
5092       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5093       vsn.get_chunk_vs(MediumChunk);
5094       vsn.get_chunk_vs(MediumChunk);
5095       vsn.retire(&cm);
5096       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5097       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5098     }
5099 
5100     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5101     // This doesn't work for systems with vm_page_size >= 16K.
5102     if (page_chunks < MediumChunk) {
5103       // 4 pages of VSN is committed, some is used by chunks
5104       ChunkManager cm(false);
5105       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5106 
5107       vsn.initialize();
5108       vsn.expand_by(page_chunks, page_chunks);
5109       vsn.get_chunk_vs(SmallChunk);
5110       vsn.get_chunk_vs(SpecializedChunk);
5111       vsn.retire(&cm);
5112 
5113       // committed - used = words left to retire
5114       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5115 
5116       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5117       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5118 
5119       assert(num_medium_chunks == 0, "should not get any medium chunks");
5120       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5121       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5122     }
5123 
5124     { // Half of VSN is committed, a humongous chunk is used
5125       ChunkManager cm(false);
5126       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5127       vsn.initialize();
5128       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5129       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5130       vsn.retire(&cm);
5131 
5132       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5133       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5134       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5135 
5136       assert(num_medium_chunks == 0, "should not get any medium chunks");
5137       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5138       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5139     }
5140 
5141   }
5142 
5143 #define assert_is_available_positive(word_size) \
5144   assert(vsn.is_available(word_size), \
5145          #word_size ": " PTR_FORMAT " bytes were not available in " \
5146          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5147          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5148 
5149 #define assert_is_available_negative(word_size) \
5150   assert(!vsn.is_available(word_size), \
5151          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5152          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5153          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5154 
5155   static void test_is_available_positive() {
5156     // Reserve some memory.
5157     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5158     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5159 
5160     // Commit some memory.
5161     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5162     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5163     assert(expanded, "Failed to commit");
5164 
5165     // Check that is_available accepts the committed size.
5166     assert_is_available_positive(commit_word_size);
5167 
5168     // Check that is_available accepts half the committed size.
5169     size_t expand_word_size = commit_word_size / 2;
5170     assert_is_available_positive(expand_word_size);
5171   }
5172 
5173   static void test_is_available_negative() {
5174     // Reserve some memory.
5175     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5176     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5177 
5178     // Commit some memory.
5179     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5180     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5181     assert(expanded, "Failed to commit");
5182 
5183     // Check that is_available doesn't accept a too large size.
5184     size_t two_times_commit_word_size = commit_word_size * 2;
5185     assert_is_available_negative(two_times_commit_word_size);
5186   }
5187 
5188   static void test_is_available_overflow() {
5189     // Reserve some memory.
5190     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5191     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5192 
5193     // Commit some memory.
5194     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5195     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5196     assert(expanded, "Failed to commit");
5197 
5198     // Calculate a size that will overflow the virtual space size.
5199     void* virtual_space_max = (void*)(uintptr_t)-1;
5200     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5201     size_t overflow_size = bottom_to_max + BytesPerWord;
5202     size_t overflow_word_size = overflow_size / BytesPerWord;
5203 
5204     // Check that is_available can handle the overflow.
5205     assert_is_available_negative(overflow_word_size);
5206   }
5207 
5208   static void test_is_available() {
5209     TestVirtualSpaceNodeTest::test_is_available_positive();
5210     TestVirtualSpaceNodeTest::test_is_available_negative();
5211     TestVirtualSpaceNodeTest::test_is_available_overflow();
5212   }
5213 };
5214 
5215 // The following test is placed here instead of a gtest / unittest file
5216 // because the ChunkManager class is only available in this file.
5217 void ChunkManager_test_list_index() {
5218   {
5219     // Test previous bug where a query for a humongous class metachunk,
5220     // incorrectly matched the non-class medium metachunk size.
5221     {
5222       ChunkManager manager(true);
5223 
5224       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5225 
5226       ChunkIndex index = manager.list_index(MediumChunk);
5227 
5228       assert(index == HumongousIndex,
5229           "Requested size is larger than ClassMediumChunk,"
5230           " so should return HumongousIndex. Got index: %d", (int)index);
5231     }
5232 
5233     // Check the specified sizes as well.
5234     {
5235       ChunkManager manager(true);
5236       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5237       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5238       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5239       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5240     }
5241     {
5242       ChunkManager manager(false);
5243       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5244       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5245       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5246       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5247     }
5248 
5249   }
5250 
5251 }
5252 
5253 #endif // !PRODUCT
5254 
5255 #ifdef ASSERT
5256 
5257 // The following test is placed here instead of a gtest / unittest file
5258 // because the ChunkManager class is only available in this file.
5259 class SpaceManagerTest : AllStatic {
5260   friend void SpaceManager_test_adjust_initial_chunk_size();
5261 
5262   static void test_adjust_initial_chunk_size(bool is_class) {
5263     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5264     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5265     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5266 
5267 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5268     do {                                                                         \
5269       size_t v = value;                                                          \
5270       size_t e = expected;                                                       \
5271       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5272              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5273     } while (0)
5274 
5275     // Smallest (specialized)
5276     test_adjust_initial_chunk_size(1,            smallest, is_class);
5277     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5278     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5279 
5280     // Small
5281     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5282     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5283     test_adjust_initial_chunk_size(normal,       normal, is_class);
5284 
5285     // Medium
5286     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5287     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5288     test_adjust_initial_chunk_size(medium,     medium, is_class);
5289 
5290     // Humongous
5291     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5292 
5293 #undef test_adjust_initial_chunk_size
5294   }
5295 
5296   static void test_adjust_initial_chunk_size() {
5297     test_adjust_initial_chunk_size(false);
5298     test_adjust_initial_chunk_size(true);
5299   }
5300 };
5301 
5302 void SpaceManager_test_adjust_initial_chunk_size() {
5303   SpaceManagerTest::test_adjust_initial_chunk_size();
5304 }
5305 
5306 #endif // ASSERT
5307 
5308 struct chunkmanager_statistics_t {
5309   int num_specialized_chunks;
5310   int num_small_chunks;
5311   int num_medium_chunks;
5312   int num_humongous_chunks;
5313 };
5314 
5315 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5316   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5317   ChunkManager::ChunkManagerStatistics stat;
5318   chunk_manager->get_statistics(&stat);
5319   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5320   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5321   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5322   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5323 }
5324 
5325 struct chunk_geometry_t {
5326   size_t specialized_chunk_word_size;
5327   size_t small_chunk_word_size;
5328   size_t medium_chunk_word_size;
5329 };
5330 
5331 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5332   if (mdType == Metaspace::NonClassType) {
5333     out->specialized_chunk_word_size = SpecializedChunk;
5334     out->small_chunk_word_size = SmallChunk;
5335     out->medium_chunk_word_size = MediumChunk;
5336   } else {
5337     out->specialized_chunk_word_size = ClassSpecializedChunk;
5338     out->small_chunk_word_size = ClassSmallChunk;
5339     out->medium_chunk_word_size = ClassMediumChunk;
5340   }
5341 }