1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Helper function that does a bunch of checks for a chunk.
  58 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  59 
  60 // Given a Metachunk, update its in-use information (both in the
  61 // chunk and the occupancy map).
  62 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  63 
  64 size_t const allocation_from_dictionary_limit = 4 * K;
  65 
  66 MetaWord* last_allocated = 0;
  67 
  68 size_t Metaspace::_compressed_class_space_size;
  69 const MetaspaceTracer* Metaspace::_tracer = NULL;
  70 
  71 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  72 
  73 enum ChunkSizes {    // in words.
  74   ClassSpecializedChunk = 128,
  75   SpecializedChunk = 128,
  76   ClassSmallChunk = 256,
  77   SmallChunk = 512,
  78   ClassMediumChunk = 4 * K,
  79   MediumChunk = 8 * K
  80 };
  81 
  82 // Returns size of this chunk type.
  83 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  84   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  85   size_t size = 0;
  86   if (is_class) {
  87     switch(chunktype) {
  88       case SpecializedIndex: size = ClassSpecializedChunk; break;
  89       case SmallIndex: size = ClassSmallChunk; break;
  90       case MediumIndex: size = ClassMediumChunk; break;
  91       default:
  92         ShouldNotReachHere();
  93     }
  94   } else {
  95     switch(chunktype) {
  96       case SpecializedIndex: size = SpecializedChunk; break;
  97       case SmallIndex: size = SmallChunk; break;
  98       case MediumIndex: size = MediumChunk; break;
  99       default:
 100         ShouldNotReachHere();
 101     }
 102   }
 103   return size;
 104 }
 105 
 106 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 107   if (is_class) {
 108     if (size == ClassSpecializedChunk) {
 109       return SpecializedIndex;
 110     } else if (size == ClassSmallChunk) {
 111       return SmallIndex;
 112     } else if (size == ClassMediumChunk) {
 113       return MediumIndex;
 114     } else if (size > ClassMediumChunk) {
 115       // A valid humongous chunk size is a multiple of the smallest chunk size.
 116       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 117       return HumongousIndex;
 118     }
 119   } else {
 120     if (size == SpecializedChunk) {
 121       return SpecializedIndex;
 122     } else if (size == SmallChunk) {
 123       return SmallIndex;
 124     } else if (size == MediumChunk) {
 125       return MediumIndex;
 126     } else if (size > MediumChunk) {
 127       // A valid humongous chunk size is a multiple of the smallest chunk size.
 128       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 129       return HumongousIndex;
 130     }
 131   }
 132   ShouldNotReachHere();
 133   return (ChunkIndex)-1;
 134 }
 135 
 136 
 137 static ChunkIndex next_chunk_index(ChunkIndex i) {
 138   assert(i < NumberOfInUseLists, "Out of bound");
 139   return (ChunkIndex) (i+1);
 140 }
 141 
 142 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 143   assert(i > ZeroIndex, "Out of bound");
 144   return (ChunkIndex) (i-1);
 145 }
 146 
 147 static const char* scale_unit(size_t scale) {
 148   switch(scale) {
 149     case 1: return "BYTES";
 150     case K: return "KB";
 151     case M: return "MB";
 152     case G: return "GB";
 153     default:
 154       ShouldNotReachHere();
 155       return NULL;
 156   }
 157 }
 158 
 159 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 160 uint MetaspaceGC::_shrink_factor = 0;
 161 bool MetaspaceGC::_should_concurrent_collect = false;
 162 
 163 typedef class FreeList<Metachunk> ChunkList;
 164 
 165 // Manages the global free lists of chunks.
 166 class ChunkManager : public CHeapObj<mtInternal> {
 167   friend class TestVirtualSpaceNodeTest;
 168 
 169   // Free list of chunks of different sizes.
 170   //   SpecializedChunk
 171   //   SmallChunk
 172   //   MediumChunk
 173   ChunkList _free_chunks[NumberOfFreeLists];
 174 
 175   // Whether or not this is the class chunkmanager.
 176   const bool _is_class;
 177 
 178   // Return non-humongous chunk list by its index.
 179   ChunkList* free_chunks(ChunkIndex index);
 180 
 181   // Returns non-humongous chunk list for the given chunk word size.
 182   ChunkList* find_free_chunks_list(size_t word_size);
 183 
 184   //   HumongousChunk
 185   ChunkTreeDictionary _humongous_dictionary;
 186 
 187   // Returns the humongous chunk dictionary.
 188   ChunkTreeDictionary* humongous_dictionary() {
 189     return &_humongous_dictionary;
 190   }
 191 
 192   // Size, in metaspace words, of all chunks managed by this ChunkManager
 193   size_t _free_chunks_total;
 194   // Number of chunks in this ChunkManager
 195   size_t _free_chunks_count;
 196 
 197   // Update counters after a chunk was added or removed removed.
 198   void account_for_added_chunk(const Metachunk* c);
 199   void account_for_removed_chunk(const Metachunk* c);
 200 
 201   // Debug support
 202 
 203   size_t sum_free_chunks();
 204   size_t sum_free_chunks_count();
 205 
 206   void locked_verify_free_chunks_total();
 207   void slow_locked_verify_free_chunks_total() {
 208     if (VerifyMetaspace) {
 209       locked_verify_free_chunks_total();
 210     }
 211   }
 212   void locked_verify_free_chunks_count();
 213   void slow_locked_verify_free_chunks_count() {
 214     if (VerifyMetaspace) {
 215       locked_verify_free_chunks_count();
 216     }
 217   }
 218   void verify_free_chunks_count();
 219 
 220   // Given a pointer to a chunk, attempts to merge it with neighboring
 221   // free chunks to form a bigger chunk. Returns true if successful.
 222   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 223 
 224   // Helper for chunk merging:
 225   //  Given an address range with 1-n chunks which are all supposed to be
 226   //  free and hence currently managed by this ChunkManager, remove them
 227   //  from this ChunkManager and mark them as invalid.
 228   // - This does not correct the occupancy map.
 229   // - This does not adjust the counters in ChunkManager.
 230   // - Does not adjust container count counter in containing VirtualSpaceNode.
 231   // Returns number of chunks removed.
 232   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 233 
 234   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 235   // split up the larger chunk into n smaller chunks, at least one of which should be
 236   // the target chunk of target chunk size. The smaller chunks, including the target
 237   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 238   // Note that this chunk is supposed to be removed from the freelist right away.
 239   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 240 
 241  public:
 242 
 243   struct ChunkManagerStatistics {
 244     size_t num_by_type[NumberOfFreeLists];
 245     size_t single_size_by_type[NumberOfFreeLists];
 246     size_t total_size_by_type[NumberOfFreeLists];
 247     size_t num_humongous_chunks;
 248     size_t total_size_humongous_chunks;
 249   };
 250 
 251   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 252   void get_statistics(ChunkManagerStatistics* stat) const;
 253   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 254 
 255 
 256   ChunkManager(bool is_class)
 257       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 258     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 259     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 260     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 261   }
 262 
 263   // Add or delete (return) a chunk to the global freelist.
 264   Metachunk* chunk_freelist_allocate(size_t word_size);
 265 
 266   // Map a size to a list index assuming that there are lists
 267   // for special, small, medium, and humongous chunks.
 268   ChunkIndex list_index(size_t size);
 269 
 270   // Map a given index to the chunk size.
 271   size_t size_by_index(ChunkIndex index) const;
 272 
 273   bool is_class() const { return _is_class; }
 274 
 275   // Convenience accessors.
 276   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 277   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 278   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 279 
 280   // Take a chunk from the ChunkManager. The chunk is expected to be in
 281   // the chunk manager (the freelist if non-humongous, the dictionary if
 282   // humongous).
 283   void remove_chunk(Metachunk* chunk);
 284 
 285   // Return a single chunk of type index to the ChunkManager.
 286   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 287 
 288   // Add the simple linked list of chunks to the freelist of chunks
 289   // of type index.
 290   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 291 
 292   // Total of the space in the free chunks list
 293   size_t free_chunks_total_words();
 294   size_t free_chunks_total_bytes();
 295 
 296   // Number of chunks in the free chunks list
 297   size_t free_chunks_count();
 298 
 299   // Remove from a list by size.  Selects list based on size of chunk.
 300   Metachunk* free_chunks_get(size_t chunk_word_size);
 301 
 302 #define index_bounds_check(index)                                         \
 303   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 304 
 305   size_t num_free_chunks(ChunkIndex index) const {
 306     index_bounds_check(index);
 307 
 308     if (index == HumongousIndex) {
 309       return _humongous_dictionary.total_free_blocks();
 310     }
 311 
 312     ssize_t count = _free_chunks[index].count();
 313     return count == -1 ? 0 : (size_t) count;
 314   }
 315 
 316   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 317     index_bounds_check(index);
 318 
 319     size_t word_size = 0;
 320     if (index == HumongousIndex) {
 321       word_size = _humongous_dictionary.total_size();
 322     } else {
 323       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 324       word_size = size_per_chunk_in_words * num_free_chunks(index);
 325     }
 326 
 327     return word_size * BytesPerWord;
 328   }
 329 
 330   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 331     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 332                                          num_free_chunks(SmallIndex),
 333                                          num_free_chunks(MediumIndex),
 334                                          num_free_chunks(HumongousIndex),
 335                                          size_free_chunks_in_bytes(SpecializedIndex),
 336                                          size_free_chunks_in_bytes(SmallIndex),
 337                                          size_free_chunks_in_bytes(MediumIndex),
 338                                          size_free_chunks_in_bytes(HumongousIndex));
 339   }
 340 
 341   // Debug support
 342   void verify();
 343   void slow_verify() {
 344     if (VerifyMetaspace) {
 345       verify();
 346     }
 347   }
 348   void locked_verify();
 349   void slow_locked_verify() {
 350     if (VerifyMetaspace) {
 351       locked_verify();
 352     }
 353   }
 354   void verify_free_chunks_total();
 355 
 356   void locked_print_free_chunks(outputStream* st);
 357   void locked_print_sum_free_chunks(outputStream* st);
 358 
 359   void print_on(outputStream* st) const;
 360 
 361   // Prints composition for both non-class and (if available)
 362   // class chunk manager.
 363   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 364 };
 365 
 366 class SmallBlocks : public CHeapObj<mtClass> {
 367   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 368   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 369 
 370  private:
 371   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 372 
 373   FreeList<Metablock>& list_at(size_t word_size) {
 374     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 375     return _small_lists[word_size - _small_block_min_size];
 376   }
 377 
 378  public:
 379   SmallBlocks() {
 380     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 381       uint k = i - _small_block_min_size;
 382       _small_lists[k].set_size(i);
 383     }
 384   }
 385 
 386   size_t total_size() const {
 387     size_t result = 0;
 388     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 389       uint k = i - _small_block_min_size;
 390       result = result + _small_lists[k].count() * _small_lists[k].size();
 391     }
 392     return result;
 393   }
 394 
 395   static uint small_block_max_size() { return _small_block_max_size; }
 396   static uint small_block_min_size() { return _small_block_min_size; }
 397 
 398   MetaWord* get_block(size_t word_size) {
 399     if (list_at(word_size).count() > 0) {
 400       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 401       return new_block;
 402     } else {
 403       return NULL;
 404     }
 405   }
 406   void return_block(Metablock* free_chunk, size_t word_size) {
 407     list_at(word_size).return_chunk_at_head(free_chunk, false);
 408     assert(list_at(word_size).count() > 0, "Should have a chunk");
 409   }
 410 
 411   void print_on(outputStream* st) const {
 412     st->print_cr("SmallBlocks:");
 413     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 414       uint k = i - _small_block_min_size;
 415       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 416     }
 417   }
 418 };
 419 
 420 // Used to manage the free list of Metablocks (a block corresponds
 421 // to the allocation of a quantum of metadata).
 422 class BlockFreelist : public CHeapObj<mtClass> {
 423   BlockTreeDictionary* const _dictionary;
 424   SmallBlocks* _small_blocks;
 425 
 426   // Only allocate and split from freelist if the size of the allocation
 427   // is at least 1/4th the size of the available block.
 428   const static int WasteMultiplier = 4;
 429 
 430   // Accessors
 431   BlockTreeDictionary* dictionary() const { return _dictionary; }
 432   SmallBlocks* small_blocks() {
 433     if (_small_blocks == NULL) {
 434       _small_blocks = new SmallBlocks();
 435     }
 436     return _small_blocks;
 437   }
 438 
 439  public:
 440   BlockFreelist();
 441   ~BlockFreelist();
 442 
 443   // Get and return a block to the free list
 444   MetaWord* get_block(size_t word_size);
 445   void return_block(MetaWord* p, size_t word_size);
 446 
 447   size_t total_size() const  {
 448     size_t result = dictionary()->total_size();
 449     if (_small_blocks != NULL) {
 450       result = result + _small_blocks->total_size();
 451     }
 452     return result;
 453   }
 454 
 455   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 456   void print_on(outputStream* st) const;
 457 };
 458 
 459 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 460 template <typename T> struct all_ones  { static const T value; };
 461 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 462 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 463 
 464 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 465 // keeps information about
 466 // - where a chunk starts
 467 // - whether a chunk is in-use or free
 468 // A bit in this bitmap represents one range of memory in the smallest
 469 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 470 class OccupancyMap : public CHeapObj<mtInternal> {
 471 
 472   // The address range this map covers.
 473   const MetaWord* const _reference_address;
 474   const size_t _word_size;
 475 
 476   // The word size of a specialized chunk, aka the number of words one
 477   // bit in this map represents.
 478   const size_t _smallest_chunk_word_size;
 479 
 480   // map data
 481   // Data are organized in two bit layers:
 482   // The first layer is the chunk-start-map. Here, a bit is set to mark
 483   // the corresponding region as the head of a chunk.
 484   // The second layer is the in-use-map. Here, a set bit indicates that
 485   // the corresponding belongs to a chunk which is in use.
 486   uint8_t* _map[2];
 487 
 488   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 489 
 490   // length, in bytes, of bitmap data
 491   size_t _map_size;
 492 
 493   // Returns true if bit at position pos at bit-layer layer is set.
 494   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 495     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 496     const unsigned byteoffset = pos / 8;
 497     assert(byteoffset < _map_size,
 498            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 499     const unsigned mask = 1 << (pos % 8);
 500     return (_map[layer][byteoffset] & mask) > 0;
 501   }
 502 
 503   // Changes bit at position pos at bit-layer layer to value v.
 504   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 505     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 506     const unsigned byteoffset = pos / 8;
 507     assert(byteoffset < _map_size,
 508            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 509     const unsigned mask = 1 << (pos % 8);
 510     if (v) {
 511       _map[layer][byteoffset] |= mask;
 512     } else {
 513       _map[layer][byteoffset] &= ~mask;
 514     }
 515   }
 516 
 517   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 518   // pos is 32/64 aligned and num_bits is 32/64.
 519   // This is the typical case when coalescing to medium chunks, whose size is
 520   // 32 or 64 times the specialized chunk size (depending on class or non class
 521   // case), so they occupy 64 bits which should be 64bit aligned, because
 522   // chunks are chunk-size aligned.
 523   template <typename T>
 524   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 525     assert(_map_size > 0, "not initialized");
 526     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 527     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 528     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 529     const size_t byteoffset = pos / 8;
 530     assert(byteoffset <= (_map_size - sizeof(T)),
 531            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 532     const T w = *(T*)(_map[layer] + byteoffset);
 533     return w > 0 ? true : false;
 534   }
 535 
 536   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 537   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 538     if (pos % 32 == 0 && num_bits == 32) {
 539       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 540     } else if (pos % 64 == 0 && num_bits == 64) {
 541       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 542     } else {
 543       for (unsigned n = 0; n < num_bits; n ++) {
 544         if (get_bit_at_position(pos + n, layer)) {
 545           return true;
 546         }
 547       }
 548     }
 549     return false;
 550   }
 551 
 552   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 553   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 554     assert(word_size % _smallest_chunk_word_size == 0,
 555         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 556     const unsigned pos = get_bitpos_for_address(p);
 557     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 558     return is_any_bit_set_in_region(pos, num_bits, layer);
 559   }
 560 
 561   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 562   // pos is 32/64 aligned and num_bits is 32/64.
 563   // This is the typical case when coalescing to medium chunks, whose size
 564   // is 32 or 64 times the specialized chunk size (depending on class or non
 565   // class case), so they occupy 64 bits which should be 64bit aligned,
 566   // because chunks are chunk-size aligned.
 567   template <typename T>
 568   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 569     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 570            (unsigned)(sizeof(T) * 8), pos);
 571     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 572            num_bits, (unsigned)(sizeof(T) * 8));
 573     const size_t byteoffset = pos / 8;
 574     assert(byteoffset <= (_map_size - sizeof(T)),
 575            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 576     T* const pw = (T*)(_map[layer] + byteoffset);
 577     *pw = v ? all_ones<T>::value : (T) 0;
 578   }
 579 
 580   // Set all bits in a region starting at pos to a value.
 581   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 582     assert(_map_size > 0, "not initialized");
 583     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 584     if (pos % 32 == 0 && num_bits == 32) {
 585       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 586     } else if (pos % 64 == 0 && num_bits == 64) {
 587       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 588     } else {
 589       for (unsigned n = 0; n < num_bits; n ++) {
 590         set_bit_at_position(pos + n, layer, v);
 591       }
 592     }
 593   }
 594 
 595   // Helper: sets all bits in a region [p, p+word_size).
 596   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 597     assert(word_size % _smallest_chunk_word_size == 0,
 598         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 599     const unsigned pos = get_bitpos_for_address(p);
 600     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 601     set_bits_of_region(pos, num_bits, layer, v);
 602   }
 603 
 604   // Helper: given an address, return the bit position representing that address.
 605   unsigned get_bitpos_for_address(const MetaWord* p) const {
 606     assert(_reference_address != NULL, "not initialized");
 607     assert(p >= _reference_address && p < _reference_address + _word_size,
 608            "Address %p out of range for occupancy map [%p..%p).",
 609             p, _reference_address, _reference_address + _word_size);
 610     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 611            "Address not aligned (%p).", p);
 612     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 613     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 614     return (unsigned) d;
 615   }
 616 
 617  public:
 618 
 619   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 620     _reference_address(reference_address), _word_size(word_size),
 621     _smallest_chunk_word_size(smallest_chunk_word_size) {
 622     assert(reference_address != NULL, "invalid reference address");
 623     assert(is_aligned(reference_address, smallest_chunk_word_size),
 624            "Reference address not aligned to smallest chunk size.");
 625     assert(is_aligned(word_size, smallest_chunk_word_size),
 626            "Word_size shall be a multiple of the smallest chunk size.");
 627     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 628     size_t num_bits = word_size / smallest_chunk_word_size;
 629     _map_size = (num_bits + 7) / 8;
 630     assert(_map_size * 8 >= num_bits, "sanity");
 631     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 632     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 633     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 634     memset(_map[1], 0, _map_size);
 635     memset(_map[0], 0, _map_size);
 636     // Sanity test: the first respectively last possible chunk start address in
 637     // the covered range shall map to the first and last bit in the bitmap.
 638     assert(get_bitpos_for_address(reference_address) == 0,
 639       "First chunk address in range must map to fist bit in bitmap.");
 640     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 641       "Last chunk address in range must map to last bit in bitmap.");
 642   }
 643 
 644   ~OccupancyMap() {
 645     os::free(_map[0]);
 646     os::free(_map[1]);
 647   }
 648 
 649   // Returns true if at address x a chunk is starting.
 650   bool chunk_starts_at_address(MetaWord* p) const {
 651     const unsigned pos = get_bitpos_for_address(p);
 652     return get_bit_at_position(pos, layer_chunk_start_map);
 653   }
 654 
 655   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 656     const unsigned pos = get_bitpos_for_address(p);
 657     set_bit_at_position(pos, layer_chunk_start_map, v);
 658   }
 659 
 660   // Removes all chunk-start-bits inside a region, typically as a
 661   // result of a chunk merge.
 662   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 663     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 664   }
 665 
 666   // Returns true if there are life (in use) chunks in the region limited
 667   // by [p, p+word_size).
 668   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 669     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 670   }
 671 
 672   // Marks the region starting at p with the size word_size as in use
 673   // or free, depending on v.
 674   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 675     set_bits_of_region(p, word_size, layer_in_use_map, v);
 676   }
 677 
 678 #ifdef ASSERT
 679   // Verify occupancy map for the address range [from, to).
 680   // We need to tell it the address range, because the memory the
 681   // occupancy map is covering may not be fully comitted yet.
 682   void verify(MetaWord* from, MetaWord* to) {
 683     Metachunk* chunk = NULL;
 684     int nth_bit_for_chunk = 0;
 685     MetaWord* chunk_end = NULL;
 686     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 687       const unsigned pos = get_bitpos_for_address(p);
 688       // Check the chunk-starts-info:
 689       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 690         // Chunk start marked in bitmap.
 691         chunk = (Metachunk*) p;
 692         if (chunk_end != NULL) {
 693           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 694                  "the next chunk to start at %p).", p, chunk_end);
 695         }
 696         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 697         if (chunk->get_chunk_type() != HumongousIndex) {
 698           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 699         }
 700         chunk_end = p + chunk->word_size();
 701         nth_bit_for_chunk = 0;
 702         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 703       } else {
 704         // No chunk start marked in bitmap.
 705         assert(chunk != NULL, "Chunk should start at start of address range.");
 706         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 707         nth_bit_for_chunk ++;
 708       }
 709       // Check the in-use-info:
 710       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 711       if (in_use_bit) {
 712         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 713                chunk, nth_bit_for_chunk);
 714       } else {
 715         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 716                chunk, nth_bit_for_chunk);
 717       }
 718     }
 719   }
 720 
 721   // Verify that a given chunk is correctly accounted for in the bitmap.
 722   void verify_for_chunk(Metachunk* chunk) {
 723     assert(chunk_starts_at_address((MetaWord*) chunk),
 724            "No chunk start marked in map for chunk %p.", chunk);
 725     // For chunks larger than the minimal chunk size, no other chunk
 726     // must start in its area.
 727     if (chunk->word_size() > _smallest_chunk_word_size) {
 728       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 729                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 730              "No chunk must start within another chunk.");
 731     }
 732     if (!chunk->is_tagged_free()) {
 733       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 734              "Chunk %p is in use but marked as free in map (%d %d).",
 735              chunk, chunk->get_chunk_type(), chunk->get_origin());
 736     } else {
 737       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 738              "Chunk %p is free but marked as in-use in map (%d %d).",
 739              chunk, chunk->get_chunk_type(), chunk->get_origin());
 740     }
 741   }
 742 
 743 #endif // ASSERT
 744 
 745 };
 746 
 747 // A VirtualSpaceList node.
 748 class VirtualSpaceNode : public CHeapObj<mtClass> {
 749   friend class VirtualSpaceList;
 750 
 751   // Link to next VirtualSpaceNode
 752   VirtualSpaceNode* _next;
 753 
 754   // Whether this node is contained in class or metaspace.
 755   const bool _is_class;
 756 
 757   // total in the VirtualSpace
 758   MemRegion _reserved;
 759   ReservedSpace _rs;
 760   VirtualSpace _virtual_space;
 761   MetaWord* _top;
 762   // count of chunks contained in this VirtualSpace
 763   uintx _container_count;
 764 
 765   OccupancyMap* _occupancy_map;
 766 
 767   // Convenience functions to access the _virtual_space
 768   char* low()  const { return virtual_space()->low(); }
 769   char* high() const { return virtual_space()->high(); }
 770 
 771   // The first Metachunk will be allocated at the bottom of the
 772   // VirtualSpace
 773   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 774 
 775   // Committed but unused space in the virtual space
 776   size_t free_words_in_vs() const;
 777 
 778   // True if this node belongs to class metaspace.
 779   bool is_class() const { return _is_class; }
 780 
 781   // Helper function for take_from_committed: allocate padding chunks
 782   // until top is at the given address.
 783   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 784 
 785  public:
 786 
 787   VirtualSpaceNode(bool is_class, size_t byte_size);
 788   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 789     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 790   ~VirtualSpaceNode();
 791 
 792   // Convenience functions for logical bottom and end
 793   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 794   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 795 
 796   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 797   OccupancyMap* occupancy_map() { return _occupancy_map; }
 798 
 799   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 800 
 801   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 802   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 803 
 804   bool is_pre_committed() const { return _virtual_space.special(); }
 805 
 806   // address of next available space in _virtual_space;
 807   // Accessors
 808   VirtualSpaceNode* next() { return _next; }
 809   void set_next(VirtualSpaceNode* v) { _next = v; }
 810 
 811   void set_reserved(MemRegion const v) { _reserved = v; }
 812   void set_top(MetaWord* v) { _top = v; }
 813 
 814   // Accessors
 815   MemRegion* reserved() { return &_reserved; }
 816   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 817 
 818   // Returns true if "word_size" is available in the VirtualSpace
 819   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 820 
 821   MetaWord* top() const { return _top; }
 822   void inc_top(size_t word_size) { _top += word_size; }
 823 
 824   uintx container_count() { return _container_count; }
 825   void inc_container_count();
 826   void dec_container_count();
 827 #ifdef ASSERT
 828   uintx container_count_slow();
 829   void verify_container_count();
 830 #endif
 831 
 832   // used and capacity in this single entry in the list
 833   size_t used_words_in_vs() const;
 834   size_t capacity_words_in_vs() const;
 835 
 836   bool initialize();
 837 
 838   // get space from the virtual space
 839   Metachunk* take_from_committed(size_t chunk_word_size);
 840 
 841   // Allocate a chunk from the virtual space and return it.
 842   Metachunk* get_chunk_vs(size_t chunk_word_size);
 843 
 844   // Expands/shrinks the committed space in a virtual space.  Delegates
 845   // to Virtualspace
 846   bool expand_by(size_t min_words, size_t preferred_words);
 847 
 848   // In preparation for deleting this node, remove all the chunks
 849   // in the node from any freelist.
 850   void purge(ChunkManager* chunk_manager);
 851 
 852   // If an allocation doesn't fit in the current node a new node is created.
 853   // Allocate chunks out of the remaining committed space in this node
 854   // to avoid wasting that memory.
 855   // This always adds up because all the chunk sizes are multiples of
 856   // the smallest chunk size.
 857   void retire(ChunkManager* chunk_manager);
 858 
 859 
 860   void print_on(outputStream* st) const;
 861   void print_map(outputStream* st, bool is_class) const;
 862 
 863   // Debug support
 864   DEBUG_ONLY(void mangle();)
 865   // Verify counters, all chunks in this list node and the occupancy map.
 866   DEBUG_ONLY(void verify();)
 867   // Verify that all free chunks in this node are ideally merged
 868   // (there not should be multiple small chunks where a large chunk could exist.)
 869   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 870 
 871 };
 872 
 873 #define assert_is_aligned(value, alignment)                  \
 874   assert(is_aligned((value), (alignment)),                   \
 875          SIZE_FORMAT_HEX " is not aligned to "               \
 876          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 877 
 878 // Decide if large pages should be committed when the memory is reserved.
 879 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 880   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 881     size_t words = bytes / BytesPerWord;
 882     bool is_class = false; // We never reserve large pages for the class space.
 883     if (MetaspaceGC::can_expand(words, is_class) &&
 884         MetaspaceGC::allowed_expansion() >= words) {
 885       return true;
 886     }
 887   }
 888 
 889   return false;
 890 }
 891 
 892   // byte_size is the size of the associated virtualspace.
 893 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 894   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 895   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 896   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 897   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 898 
 899   if (_rs.is_reserved()) {
 900     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 901     assert(_rs.size() != 0, "Catch if we get a 0 size");
 902     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 903     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 904 
 905     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 906   }
 907 }
 908 
 909 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 910   DEBUG_ONLY(this->verify();)
 911   Metachunk* chunk = first_chunk();
 912   Metachunk* invalid_chunk = (Metachunk*) top();
 913   while (chunk < invalid_chunk ) {
 914     assert(chunk->is_tagged_free(), "Should be tagged free");
 915     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 916     chunk_manager->remove_chunk(chunk);
 917     chunk->remove_sentinel();
 918     assert(chunk->next() == NULL &&
 919            chunk->prev() == NULL,
 920            "Was not removed from its list");
 921     chunk = (Metachunk*) next;
 922   }
 923 }
 924 
 925 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 926 
 927   if (bottom() == top()) {
 928     return;
 929   }
 930 
 931   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 932   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 933   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 934 
 935   int line_len = 100;
 936   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 937   line_len = (int)(section_len / spec_chunk_size);
 938 
 939   static const int NUM_LINES = 4;
 940 
 941   char* lines[NUM_LINES];
 942   for (int i = 0; i < NUM_LINES; i ++) {
 943     lines[i] = (char*)os::malloc(line_len, mtInternal);
 944   }
 945   int pos = 0;
 946   const MetaWord* p = bottom();
 947   const Metachunk* chunk = (const Metachunk*)p;
 948   const MetaWord* chunk_end = p + chunk->word_size();
 949   while (p < top()) {
 950     if (pos == line_len) {
 951       pos = 0;
 952       for (int i = 0; i < NUM_LINES; i ++) {
 953         st->fill_to(22);
 954         st->print_raw(lines[i], line_len);
 955         st->cr();
 956       }
 957     }
 958     if (pos == 0) {
 959       st->print(PTR_FORMAT ":", p2i(p));
 960     }
 961     if (p == chunk_end) {
 962       chunk = (Metachunk*)p;
 963       chunk_end = p + chunk->word_size();
 964     }
 965     // line 1: chunk starting points (a dot if that area is a chunk start).
 966     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 967 
 968     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 969     // chunk is in use.
 970     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 971     if (chunk->word_size() == spec_chunk_size) {
 972       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 973     } else if (chunk->word_size() == small_chunk_size) {
 974       lines[1][pos] = chunk_is_free ? 's' : 'S';
 975     } else if (chunk->word_size() == med_chunk_size) {
 976       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 977     } else if (chunk->word_size() > med_chunk_size) {
 978       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 979     } else {
 980       ShouldNotReachHere();
 981     }
 982 
 983     // Line 3: chunk origin
 984     const ChunkOrigin origin = chunk->get_origin();
 985     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 986 
 987     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 988     //         but were never used.
 989     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 990 
 991     p += spec_chunk_size;
 992     pos ++;
 993   }
 994   if (pos > 0) {
 995     for (int i = 0; i < NUM_LINES; i ++) {
 996       st->fill_to(22);
 997       st->print_raw(lines[i], line_len);
 998       st->cr();
 999     }
1000   }
1001   for (int i = 0; i < NUM_LINES; i ++) {
1002     os::free(lines[i]);
1003   }
1004 }
1005 
1006 
1007 #ifdef ASSERT
1008 uintx VirtualSpaceNode::container_count_slow() {
1009   uintx count = 0;
1010   Metachunk* chunk = first_chunk();
1011   Metachunk* invalid_chunk = (Metachunk*) top();
1012   while (chunk < invalid_chunk ) {
1013     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1014     do_verify_chunk(chunk);
1015     // Don't count the chunks on the free lists.  Those are
1016     // still part of the VirtualSpaceNode but not currently
1017     // counted.
1018     if (!chunk->is_tagged_free()) {
1019       count++;
1020     }
1021     chunk = (Metachunk*) next;
1022   }
1023   return count;
1024 }
1025 #endif
1026 
1027 #ifdef ASSERT
1028 // Verify counters, all chunks in this list node and the occupancy map.
1029 void VirtualSpaceNode::verify() {
1030   uintx num_in_use_chunks = 0;
1031   Metachunk* chunk = first_chunk();
1032   Metachunk* invalid_chunk = (Metachunk*) top();
1033 
1034   // Iterate the chunks in this node and verify each chunk.
1035   while (chunk < invalid_chunk ) {
1036     DEBUG_ONLY(do_verify_chunk(chunk);)
1037     if (!chunk->is_tagged_free()) {
1038       num_in_use_chunks ++;
1039     }
1040     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1041     chunk = (Metachunk*) next;
1042   }
1043   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1044          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1045   // Also verify the occupancy map.
1046   occupancy_map()->verify(this->bottom(), this->top());
1047 }
1048 #endif // ASSERT
1049 
1050 #ifdef ASSERT
1051 // Verify that all free chunks in this node are ideally merged
1052 // (there not should be multiple small chunks where a large chunk could exist.)
1053 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1054   Metachunk* chunk = first_chunk();
1055   Metachunk* invalid_chunk = (Metachunk*) top();
1056   // Shorthands.
1057   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1058   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1059   int num_free_chunks_since_last_med_boundary = -1;
1060   int num_free_chunks_since_last_small_boundary = -1;
1061   while (chunk < invalid_chunk ) {
1062     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1063     // Reset the counter when encountering a non-free chunk.
1064     if (chunk->get_chunk_type() != HumongousIndex) {
1065       if (chunk->is_tagged_free()) {
1066         // Count successive free, non-humongous chunks.
1067         if (is_aligned(chunk, size_small)) {
1068           assert(num_free_chunks_since_last_small_boundary <= 1,
1069                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1070           num_free_chunks_since_last_small_boundary = 0;
1071         } else if (num_free_chunks_since_last_small_boundary != -1) {
1072           num_free_chunks_since_last_small_boundary ++;
1073         }
1074         if (is_aligned(chunk, size_med)) {
1075           assert(num_free_chunks_since_last_med_boundary <= 1,
1076                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1077           num_free_chunks_since_last_med_boundary = 0;
1078         } else if (num_free_chunks_since_last_med_boundary != -1) {
1079           num_free_chunks_since_last_med_boundary ++;
1080         }
1081       } else {
1082         // Encountering a non-free chunk, reset counters.
1083         num_free_chunks_since_last_med_boundary = -1;
1084         num_free_chunks_since_last_small_boundary = -1;
1085       }
1086     } else {
1087       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1088       num_free_chunks_since_last_med_boundary = -1;
1089       num_free_chunks_since_last_small_boundary = -1;
1090     }
1091 
1092     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1093     chunk = (Metachunk*) next;
1094   }
1095 }
1096 #endif // ASSERT
1097 
1098 // List of VirtualSpaces for metadata allocation.
1099 class VirtualSpaceList : public CHeapObj<mtClass> {
1100   friend class VirtualSpaceNode;
1101 
1102   enum VirtualSpaceSizes {
1103     VirtualSpaceSize = 256 * K
1104   };
1105 
1106   // Head of the list
1107   VirtualSpaceNode* _virtual_space_list;
1108   // virtual space currently being used for allocations
1109   VirtualSpaceNode* _current_virtual_space;
1110 
1111   // Is this VirtualSpaceList used for the compressed class space
1112   bool _is_class;
1113 
1114   // Sum of reserved and committed memory in the virtual spaces
1115   size_t _reserved_words;
1116   size_t _committed_words;
1117 
1118   // Number of virtual spaces
1119   size_t _virtual_space_count;
1120 
1121   ~VirtualSpaceList();
1122 
1123   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1124 
1125   void set_virtual_space_list(VirtualSpaceNode* v) {
1126     _virtual_space_list = v;
1127   }
1128   void set_current_virtual_space(VirtualSpaceNode* v) {
1129     _current_virtual_space = v;
1130   }
1131 
1132   void link_vs(VirtualSpaceNode* new_entry);
1133 
1134   // Get another virtual space and add it to the list.  This
1135   // is typically prompted by a failed attempt to allocate a chunk
1136   // and is typically followed by the allocation of a chunk.
1137   bool create_new_virtual_space(size_t vs_word_size);
1138 
1139   // Chunk up the unused committed space in the current
1140   // virtual space and add the chunks to the free list.
1141   void retire_current_virtual_space();
1142 
1143  public:
1144   VirtualSpaceList(size_t word_size);
1145   VirtualSpaceList(ReservedSpace rs);
1146 
1147   size_t free_bytes();
1148 
1149   Metachunk* get_new_chunk(size_t chunk_word_size,
1150                            size_t suggested_commit_granularity);
1151 
1152   bool expand_node_by(VirtualSpaceNode* node,
1153                       size_t min_words,
1154                       size_t preferred_words);
1155 
1156   bool expand_by(size_t min_words,
1157                  size_t preferred_words);
1158 
1159   VirtualSpaceNode* current_virtual_space() {
1160     return _current_virtual_space;
1161   }
1162 
1163   bool is_class() const { return _is_class; }
1164 
1165   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1166 
1167   size_t reserved_words()  { return _reserved_words; }
1168   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1169   size_t committed_words() { return _committed_words; }
1170   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1171 
1172   void inc_reserved_words(size_t v);
1173   void dec_reserved_words(size_t v);
1174   void inc_committed_words(size_t v);
1175   void dec_committed_words(size_t v);
1176   void inc_virtual_space_count();
1177   void dec_virtual_space_count();
1178 
1179   bool contains(const void* ptr);
1180 
1181   // Unlink empty VirtualSpaceNodes and free it.
1182   void purge(ChunkManager* chunk_manager);
1183 
1184   void print_on(outputStream* st) const;
1185   void print_map(outputStream* st) const;
1186 
1187   class VirtualSpaceListIterator : public StackObj {
1188     VirtualSpaceNode* _virtual_spaces;
1189    public:
1190     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1191       _virtual_spaces(virtual_spaces) {}
1192 
1193     bool repeat() {
1194       return _virtual_spaces != NULL;
1195     }
1196 
1197     VirtualSpaceNode* get_next() {
1198       VirtualSpaceNode* result = _virtual_spaces;
1199       if (_virtual_spaces != NULL) {
1200         _virtual_spaces = _virtual_spaces->next();
1201       }
1202       return result;
1203     }
1204   };
1205 };
1206 
1207 class Metadebug : AllStatic {
1208   // Debugging support for Metaspaces
1209   static int _allocation_fail_alot_count;
1210 
1211  public:
1212 
1213   static void init_allocation_fail_alot_count();
1214 #ifdef ASSERT
1215   static bool test_metadata_failure();
1216 #endif
1217 };
1218 
1219 int Metadebug::_allocation_fail_alot_count = 0;
1220 
1221 //  SpaceManager - used by Metaspace to handle allocations
1222 class SpaceManager : public CHeapObj<mtClass> {
1223   friend class ClassLoaderMetaspace;
1224   friend class Metadebug;
1225 
1226  private:
1227 
1228   // protects allocations
1229   Mutex* const _lock;
1230 
1231   // Type of metadata allocated.
1232   const Metaspace::MetadataType   _mdtype;
1233 
1234   // Type of metaspace
1235   const Metaspace::MetaspaceType  _space_type;
1236 
1237   // List of chunks in use by this SpaceManager.  Allocations
1238   // are done from the current chunk.  The list is used for deallocating
1239   // chunks when the SpaceManager is freed.
1240   Metachunk* _chunks_in_use[NumberOfInUseLists];
1241   Metachunk* _current_chunk;
1242 
1243   // Maximum number of small chunks to allocate to a SpaceManager
1244   static uint const _small_chunk_limit;
1245 
1246   // Maximum number of specialize chunks to allocate for anonymous and delegating
1247   // metadata space to a SpaceManager
1248   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1249 
1250   // Sum of all space in allocated chunks
1251   size_t _allocated_blocks_words;
1252 
1253   // Sum of all allocated chunks
1254   size_t _allocated_chunks_words;
1255   size_t _allocated_chunks_count;
1256 
1257   // Free lists of blocks are per SpaceManager since they
1258   // are assumed to be in chunks in use by the SpaceManager
1259   // and all chunks in use by a SpaceManager are freed when
1260   // the class loader using the SpaceManager is collected.
1261   BlockFreelist* _block_freelists;
1262 
1263  private:
1264   // Accessors
1265   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1266   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1267     _chunks_in_use[index] = v;
1268   }
1269 
1270   BlockFreelist* block_freelists() const { return _block_freelists; }
1271 
1272   Metaspace::MetadataType mdtype() { return _mdtype; }
1273 
1274   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1275   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1276 
1277   Metachunk* current_chunk() const { return _current_chunk; }
1278   void set_current_chunk(Metachunk* v) {
1279     _current_chunk = v;
1280   }
1281 
1282   Metachunk* find_current_chunk(size_t word_size);
1283 
1284   // Add chunk to the list of chunks in use
1285   void add_chunk(Metachunk* v, bool make_current);
1286   void retire_current_chunk();
1287 
1288   Mutex* lock() const { return _lock; }
1289 
1290  protected:
1291   void initialize();
1292 
1293  public:
1294   SpaceManager(Metaspace::MetadataType mdtype,
1295                Metaspace::MetaspaceType space_type,
1296                Mutex* lock);
1297   ~SpaceManager();
1298 
1299   enum ChunkMultiples {
1300     MediumChunkMultiple = 4
1301   };
1302 
1303   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1304   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1305   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1306 
1307   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1308 
1309   // Accessors
1310   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1311 
1312   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1313   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1314   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1315 
1316   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1317 
1318   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1319 
1320   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1321   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1322   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1323   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1324   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1325 
1326   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1327 
1328   // Increment the per Metaspace and global running sums for Metachunks
1329   // by the given size.  This is used when a Metachunk to added to
1330   // the in-use list.
1331   void inc_size_metrics(size_t words);
1332   // Increment the per Metaspace and global running sums Metablocks by the given
1333   // size.  This is used when a Metablock is allocated.
1334   void inc_used_metrics(size_t words);
1335   // Delete the portion of the running sums for this SpaceManager. That is,
1336   // the globals running sums for the Metachunks and Metablocks are
1337   // decremented for all the Metachunks in-use by this SpaceManager.
1338   void dec_total_from_size_metrics();
1339 
1340   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1341   // or return the unadjusted size if the requested size is humongous.
1342   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1343   size_t adjust_initial_chunk_size(size_t requested) const;
1344 
1345   // Get the initial chunks size for this metaspace type.
1346   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1347 
1348   size_t sum_capacity_in_chunks_in_use() const;
1349   size_t sum_used_in_chunks_in_use() const;
1350   size_t sum_free_in_chunks_in_use() const;
1351   size_t sum_waste_in_chunks_in_use() const;
1352   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1353 
1354   size_t sum_count_in_chunks_in_use();
1355   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1356 
1357   Metachunk* get_new_chunk(size_t chunk_word_size);
1358 
1359   // Block allocation and deallocation.
1360   // Allocates a block from the current chunk
1361   MetaWord* allocate(size_t word_size);
1362 
1363   // Helper for allocations
1364   MetaWord* allocate_work(size_t word_size);
1365 
1366   // Returns a block to the per manager freelist
1367   void deallocate(MetaWord* p, size_t word_size);
1368 
1369   // Based on the allocation size and a minimum chunk size,
1370   // returned chunk size (for expanding space for chunk allocation).
1371   size_t calc_chunk_size(size_t allocation_word_size);
1372 
1373   // Called when an allocation from the current chunk fails.
1374   // Gets a new chunk (may require getting a new virtual space),
1375   // and allocates from that chunk.
1376   MetaWord* grow_and_allocate(size_t word_size);
1377 
1378   // Notify memory usage to MemoryService.
1379   void track_metaspace_memory_usage();
1380 
1381   // debugging support.
1382 
1383   void dump(outputStream* const out) const;
1384   void print_on(outputStream* st) const;
1385   void locked_print_chunks_in_use_on(outputStream* st) const;
1386 
1387   void verify();
1388   void verify_chunk_size(Metachunk* chunk);
1389 #ifdef ASSERT
1390   void verify_allocated_blocks_words();
1391 #endif
1392 
1393   // This adjusts the size given to be greater than the minimum allocation size in
1394   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1395   size_t get_allocation_word_size(size_t word_size) {
1396     size_t byte_size = word_size * BytesPerWord;
1397 
1398     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1399     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1400 
1401     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1402     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1403 
1404     return raw_word_size;
1405   }
1406 };
1407 
1408 uint const SpaceManager::_small_chunk_limit = 4;
1409 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1410 
1411 void VirtualSpaceNode::inc_container_count() {
1412   assert_lock_strong(MetaspaceExpand_lock);
1413   _container_count++;
1414 }
1415 
1416 void VirtualSpaceNode::dec_container_count() {
1417   assert_lock_strong(MetaspaceExpand_lock);
1418   _container_count--;
1419 }
1420 
1421 #ifdef ASSERT
1422 void VirtualSpaceNode::verify_container_count() {
1423   assert(_container_count == container_count_slow(),
1424          "Inconsistency in container_count _container_count " UINTX_FORMAT
1425          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1426 }
1427 #endif
1428 
1429 // BlockFreelist methods
1430 
1431 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1432 
1433 BlockFreelist::~BlockFreelist() {
1434   delete _dictionary;
1435   if (_small_blocks != NULL) {
1436     delete _small_blocks;
1437   }
1438 }
1439 
1440 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1441   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1442 
1443   Metablock* free_chunk = ::new (p) Metablock(word_size);
1444   if (word_size < SmallBlocks::small_block_max_size()) {
1445     small_blocks()->return_block(free_chunk, word_size);
1446   } else {
1447   dictionary()->return_chunk(free_chunk);
1448 }
1449   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1450             SIZE_FORMAT, p2i(free_chunk), word_size);
1451 }
1452 
1453 MetaWord* BlockFreelist::get_block(size_t word_size) {
1454   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1455 
1456   // Try small_blocks first.
1457   if (word_size < SmallBlocks::small_block_max_size()) {
1458     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1459     // this space manager.
1460     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1461     if (new_block != NULL) {
1462       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1463               p2i(new_block), word_size);
1464       return new_block;
1465     }
1466   }
1467 
1468   if (word_size < BlockFreelist::min_dictionary_size()) {
1469     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1470     return NULL;
1471   }
1472 
1473   Metablock* free_block = dictionary()->get_chunk(word_size);
1474   if (free_block == NULL) {
1475     return NULL;
1476   }
1477 
1478   const size_t block_size = free_block->size();
1479   if (block_size > WasteMultiplier * word_size) {
1480     return_block((MetaWord*)free_block, block_size);
1481     return NULL;
1482   }
1483 
1484   MetaWord* new_block = (MetaWord*)free_block;
1485   assert(block_size >= word_size, "Incorrect size of block from freelist");
1486   const size_t unused = block_size - word_size;
1487   if (unused >= SmallBlocks::small_block_min_size()) {
1488     return_block(new_block + word_size, unused);
1489   }
1490 
1491   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1492             p2i(new_block), word_size);
1493   return new_block;
1494 }
1495 
1496 void BlockFreelist::print_on(outputStream* st) const {
1497   dictionary()->print_free_lists(st);
1498   if (_small_blocks != NULL) {
1499     _small_blocks->print_on(st);
1500   }
1501 }
1502 
1503 // VirtualSpaceNode methods
1504 
1505 VirtualSpaceNode::~VirtualSpaceNode() {
1506   _rs.release();
1507   if (_occupancy_map != NULL) {
1508     delete _occupancy_map;
1509   }
1510 #ifdef ASSERT
1511   size_t word_size = sizeof(*this) / BytesPerWord;
1512   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1513 #endif
1514 }
1515 
1516 size_t VirtualSpaceNode::used_words_in_vs() const {
1517   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1518 }
1519 
1520 // Space committed in the VirtualSpace
1521 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1522   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1523 }
1524 
1525 size_t VirtualSpaceNode::free_words_in_vs() const {
1526   return pointer_delta(end(), top(), sizeof(MetaWord));
1527 }
1528 
1529 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1530 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1531 
1532   assert(target_top > top(), "Sanity");
1533 
1534   // Padding chunks are added to the freelist.
1535   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1536 
1537   // shorthands
1538   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1539   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1540   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1541 
1542   while (top() < target_top) {
1543 
1544     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1545     // for padding chunks, so it is not worth it.
1546     size_t padding_chunk_word_size = small_word_size;
1547     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1548       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1549       padding_chunk_word_size = spec_word_size;
1550     }
1551     MetaWord* here = top();
1552     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1553     inc_top(padding_chunk_word_size);
1554 
1555     // Create new padding chunk.
1556     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1557     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1558 
1559     Metachunk* const padding_chunk =
1560       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1561     assert(padding_chunk == (Metachunk*)here, "Sanity");
1562     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1563     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1564                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1565                                        (is_class() ? "class space " : "metaspace"),
1566                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1567 
1568     // Mark chunk start in occupancy map.
1569     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1570 
1571     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1572     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1573     // will assert that).
1574     do_update_in_use_info_for_chunk(padding_chunk, true);
1575 
1576     // Return Chunk to freelist.
1577     inc_container_count();
1578     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1579     // Please note: at this point, ChunkManager::return_single_chunk()
1580     // may already have merged the padding chunk with neighboring chunks, so
1581     // it may have vanished at this point. Do not reference the padding
1582     // chunk beyond this point.
1583   }
1584 
1585   assert(top() == target_top, "Sanity");
1586 
1587 } // allocate_padding_chunks_until_top_is_at()
1588 
1589 // Allocates the chunk from the virtual space only.
1590 // This interface is also used internally for debugging.  Not all
1591 // chunks removed here are necessarily used for allocation.
1592 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1593   // Non-humongous chunks are to be allocated aligned to their chunk
1594   // size. So, start addresses of medium chunks are aligned to medium
1595   // chunk size, those of small chunks to small chunk size and so
1596   // forth. This facilitates merging of free chunks and reduces
1597   // fragmentation. Chunk sizes are spec < small < medium, with each
1598   // larger chunk size being a multiple of the next smaller chunk
1599   // size.
1600   // Because of this alignment, me may need to create a number of padding
1601   // chunks. These chunks are created and added to the freelist.
1602 
1603   // The chunk manager to which we will give our padding chunks.
1604   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1605 
1606   // shorthands
1607   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1608   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1609   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1610 
1611   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1612          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1613 
1614   // Chunk alignment (in bytes) == chunk size unless humongous.
1615   // Humongous chunks are aligned to the smallest chunk size (spec).
1616   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1617                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1618 
1619   // Do we have enough space to create the requested chunk plus
1620   // any padding chunks needed?
1621   MetaWord* const next_aligned =
1622     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1623   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1624     return NULL;
1625   }
1626 
1627   // Before allocating the requested chunk, allocate padding chunks if necessary.
1628   // We only need to do this for small or medium chunks: specialized chunks are the
1629   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1630   // (implicitly, also aligned to smallest chunk size).
1631   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1632     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1633         (is_class() ? "class space " : "metaspace"),
1634         top(), next_aligned);
1635     allocate_padding_chunks_until_top_is_at(next_aligned);
1636     // Now, top should be aligned correctly.
1637     assert_is_aligned(top(), required_chunk_alignment);
1638   }
1639 
1640   // Now, top should be aligned correctly.
1641   assert_is_aligned(top(), required_chunk_alignment);
1642 
1643   // Bottom of the new chunk
1644   MetaWord* chunk_limit = top();
1645   assert(chunk_limit != NULL, "Not safe to call this method");
1646 
1647   // The virtual spaces are always expanded by the
1648   // commit granularity to enforce the following condition.
1649   // Without this the is_available check will not work correctly.
1650   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1651       "The committed memory doesn't match the expanded memory.");
1652 
1653   if (!is_available(chunk_word_size)) {
1654     LogTarget(Debug, gc, metaspace, freelist) lt;
1655     if (lt.is_enabled()) {
1656       LogStream ls(lt);
1657       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1658       // Dump some information about the virtual space that is nearly full
1659       print_on(&ls);
1660     }
1661     return NULL;
1662   }
1663 
1664   // Take the space  (bump top on the current virtual space).
1665   inc_top(chunk_word_size);
1666 
1667   // Initialize the chunk
1668   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1669   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1670   assert(result == (Metachunk*)chunk_limit, "Sanity");
1671   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1672   do_update_in_use_info_for_chunk(result, true);
1673 
1674   inc_container_count();
1675 
1676   if (VerifyMetaspace) {
1677     DEBUG_ONLY(chunk_manager->locked_verify());
1678     DEBUG_ONLY(this->verify());
1679   }
1680 
1681   DEBUG_ONLY(do_verify_chunk(result));
1682 
1683   result->inc_use_count();
1684 
1685   return result;
1686 }
1687 
1688 
1689 // Expand the virtual space (commit more of the reserved space)
1690 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1691   size_t min_bytes = min_words * BytesPerWord;
1692   size_t preferred_bytes = preferred_words * BytesPerWord;
1693 
1694   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1695 
1696   if (uncommitted < min_bytes) {
1697     return false;
1698   }
1699 
1700   size_t commit = MIN2(preferred_bytes, uncommitted);
1701   bool result = virtual_space()->expand_by(commit, false);
1702 
1703   if (result) {
1704     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1705               (is_class() ? "class" : "non-class"), commit);
1706   } else {
1707     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1708               (is_class() ? "class" : "non-class"), commit);
1709   }
1710 
1711   assert(result, "Failed to commit memory");
1712 
1713   return result;
1714 }
1715 
1716 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1717   assert_lock_strong(MetaspaceExpand_lock);
1718   Metachunk* result = take_from_committed(chunk_word_size);
1719   return result;
1720 }
1721 
1722 bool VirtualSpaceNode::initialize() {
1723 
1724   if (!_rs.is_reserved()) {
1725     return false;
1726   }
1727 
1728   // These are necessary restriction to make sure that the virtual space always
1729   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1730   // aligned only the middle alignment of the VirtualSpace is used.
1731   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1732   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1733 
1734   // ReservedSpaces marked as special will have the entire memory
1735   // pre-committed. Setting a committed size will make sure that
1736   // committed_size and actual_committed_size agrees.
1737   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1738 
1739   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1740                                             Metaspace::commit_alignment());
1741   if (result) {
1742     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1743         "Checking that the pre-committed memory was registered by the VirtualSpace");
1744 
1745     set_top((MetaWord*)virtual_space()->low());
1746     set_reserved(MemRegion((HeapWord*)_rs.base(),
1747                  (HeapWord*)(_rs.base() + _rs.size())));
1748 
1749     assert(reserved()->start() == (HeapWord*) _rs.base(),
1750            "Reserved start was not set properly " PTR_FORMAT
1751            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1752     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1753            "Reserved size was not set properly " SIZE_FORMAT
1754            " != " SIZE_FORMAT, reserved()->word_size(),
1755            _rs.size() / BytesPerWord);
1756   }
1757 
1758   // Initialize Occupancy Map.
1759   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1760   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1761 
1762   return result;
1763 }
1764 
1765 void VirtualSpaceNode::print_on(outputStream* st) const {
1766   size_t used = used_words_in_vs();
1767   size_t capacity = capacity_words_in_vs();
1768   VirtualSpace* vs = virtual_space();
1769   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1770            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1771            PTR_FORMAT ", " PTR_FORMAT ")",
1772            p2i(vs), capacity / K,
1773            capacity == 0 ? 0 : used * 100 / capacity,
1774            p2i(bottom()), p2i(top()), p2i(end()),
1775            p2i(vs->high_boundary()));
1776 }
1777 
1778 #ifdef ASSERT
1779 void VirtualSpaceNode::mangle() {
1780   size_t word_size = capacity_words_in_vs();
1781   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1782 }
1783 #endif // ASSERT
1784 
1785 // VirtualSpaceList methods
1786 // Space allocated from the VirtualSpace
1787 
1788 VirtualSpaceList::~VirtualSpaceList() {
1789   VirtualSpaceListIterator iter(virtual_space_list());
1790   while (iter.repeat()) {
1791     VirtualSpaceNode* vsl = iter.get_next();
1792     delete vsl;
1793   }
1794 }
1795 
1796 void VirtualSpaceList::inc_reserved_words(size_t v) {
1797   assert_lock_strong(MetaspaceExpand_lock);
1798   _reserved_words = _reserved_words + v;
1799 }
1800 void VirtualSpaceList::dec_reserved_words(size_t v) {
1801   assert_lock_strong(MetaspaceExpand_lock);
1802   _reserved_words = _reserved_words - v;
1803 }
1804 
1805 #define assert_committed_below_limit()                        \
1806   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1807          "Too much committed memory. Committed: " SIZE_FORMAT \
1808          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1809           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1810 
1811 void VirtualSpaceList::inc_committed_words(size_t v) {
1812   assert_lock_strong(MetaspaceExpand_lock);
1813   _committed_words = _committed_words + v;
1814 
1815   assert_committed_below_limit();
1816 }
1817 void VirtualSpaceList::dec_committed_words(size_t v) {
1818   assert_lock_strong(MetaspaceExpand_lock);
1819   _committed_words = _committed_words - v;
1820 
1821   assert_committed_below_limit();
1822 }
1823 
1824 void VirtualSpaceList::inc_virtual_space_count() {
1825   assert_lock_strong(MetaspaceExpand_lock);
1826   _virtual_space_count++;
1827 }
1828 void VirtualSpaceList::dec_virtual_space_count() {
1829   assert_lock_strong(MetaspaceExpand_lock);
1830   _virtual_space_count--;
1831 }
1832 
1833 void ChunkManager::remove_chunk(Metachunk* chunk) {
1834   size_t word_size = chunk->word_size();
1835   ChunkIndex index = list_index(word_size);
1836   if (index != HumongousIndex) {
1837     free_chunks(index)->remove_chunk(chunk);
1838   } else {
1839     humongous_dictionary()->remove_chunk(chunk);
1840   }
1841 
1842   // Chunk has been removed from the chunks free list, update counters.
1843   account_for_removed_chunk(chunk);
1844 }
1845 
1846 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1847   assert_lock_strong(MetaspaceExpand_lock);
1848   assert(chunk != NULL, "invalid chunk pointer");
1849   // Check for valid merge combinations.
1850   assert((chunk->get_chunk_type() == SpecializedIndex &&
1851           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1852          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1853         "Invalid chunk merge combination.");
1854 
1855   const size_t target_chunk_word_size =
1856     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1857 
1858   // [ prospective merge region )
1859   MetaWord* const p_merge_region_start =
1860     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1861   MetaWord* const p_merge_region_end =
1862     p_merge_region_start + target_chunk_word_size;
1863 
1864   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1865   VirtualSpaceNode* const vsn = chunk->container();
1866   OccupancyMap* const ocmap = vsn->occupancy_map();
1867 
1868   // The prospective chunk merge range must be completely contained by the
1869   // committed range of the virtual space node.
1870   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1871     return false;
1872   }
1873 
1874   // Only attempt to merge this range if at its start a chunk starts and at its end
1875   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1876   // of that range, we cannot merge.
1877   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1878     return false;
1879   }
1880   if (p_merge_region_end < vsn->top() &&
1881       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1882     return false;
1883   }
1884 
1885   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1886   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1887     return false;
1888   }
1889 
1890   // Success! Remove all chunks in this region...
1891   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1892     (is_class() ? "class space" : "metaspace"),
1893     p_merge_region_start, p_merge_region_end);
1894 
1895   const int num_chunks_removed =
1896     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1897 
1898   // ... and create a single new bigger chunk.
1899   Metachunk* const p_new_chunk =
1900       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1901   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1902   p_new_chunk->set_origin(origin_merge);
1903 
1904   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1905     (is_class() ? "class space" : "metaspace"),
1906     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1907 
1908   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1909   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1910   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1911 
1912   // Mark chunk as free. Note: it is not necessary to update the occupancy
1913   // map in-use map, because the old chunks were also free, so nothing
1914   // should have changed.
1915   p_new_chunk->set_is_tagged_free(true);
1916 
1917   // Add new chunk to its freelist.
1918   ChunkList* const list = free_chunks(target_chunk_type);
1919   list->return_chunk_at_head(p_new_chunk);
1920 
1921   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1922   // should not have changed, because the size of the space should be the same)
1923   _free_chunks_count -= num_chunks_removed;
1924   _free_chunks_count ++;
1925 
1926   // VirtualSpaceNode::container_count does not have to be modified:
1927   // it means "number of active (non-free) chunks", so merging free chunks
1928   // should not affect that count.
1929 
1930   // At the end of a chunk merge, run verification tests.
1931   if (VerifyMetaspace) {
1932     DEBUG_ONLY(this->locked_verify());
1933     DEBUG_ONLY(vsn->verify());
1934   }
1935 
1936   return true;
1937 }
1938 
1939 // Remove all chunks in the given area - the chunks are supposed to be free -
1940 // from their corresponding freelists. Mark them as invalid.
1941 // - This does not correct the occupancy map.
1942 // - This does not adjust the counters in ChunkManager.
1943 // - Does not adjust container count counter in containing VirtualSpaceNode
1944 // Returns number of chunks removed.
1945 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1946   assert(p != NULL && word_size > 0, "Invalid range.");
1947   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1948   assert_is_aligned(word_size, smallest_chunk_size);
1949 
1950   Metachunk* const start = (Metachunk*) p;
1951   const Metachunk* const end = (Metachunk*)(p + word_size);
1952   Metachunk* cur = start;
1953   int num_removed = 0;
1954   while (cur < end) {
1955     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1956     DEBUG_ONLY(do_verify_chunk(cur));
1957     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1958     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1959     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1960       (is_class() ? "class space" : "metaspace"),
1961       cur, cur->word_size() * sizeof(MetaWord));
1962     cur->remove_sentinel();
1963     // Note: cannot call ChunkManager::remove_chunk, because that
1964     // modifies the counters in ChunkManager, which we do not want. So
1965     // we call remove_chunk on the freelist directly (see also the
1966     // splitting function which does the same).
1967     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1968     list->remove_chunk(cur);
1969     num_removed ++;
1970     cur = next;
1971   }
1972   return num_removed;
1973 }
1974 
1975 // Walk the list of VirtualSpaceNodes and delete
1976 // nodes with a 0 container_count.  Remove Metachunks in
1977 // the node from their respective freelists.
1978 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1979   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1980   assert_lock_strong(MetaspaceExpand_lock);
1981   // Don't use a VirtualSpaceListIterator because this
1982   // list is being changed and a straightforward use of an iterator is not safe.
1983   VirtualSpaceNode* purged_vsl = NULL;
1984   VirtualSpaceNode* prev_vsl = virtual_space_list();
1985   VirtualSpaceNode* next_vsl = prev_vsl;
1986   while (next_vsl != NULL) {
1987     VirtualSpaceNode* vsl = next_vsl;
1988     DEBUG_ONLY(vsl->verify_container_count();)
1989     next_vsl = vsl->next();
1990     // Don't free the current virtual space since it will likely
1991     // be needed soon.
1992     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1993       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1994                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
1995       // Unlink it from the list
1996       if (prev_vsl == vsl) {
1997         // This is the case of the current node being the first node.
1998         assert(vsl == virtual_space_list(), "Expected to be the first node");
1999         set_virtual_space_list(vsl->next());
2000       } else {
2001         prev_vsl->set_next(vsl->next());
2002       }
2003 
2004       vsl->purge(chunk_manager);
2005       dec_reserved_words(vsl->reserved_words());
2006       dec_committed_words(vsl->committed_words());
2007       dec_virtual_space_count();
2008       purged_vsl = vsl;
2009       delete vsl;
2010     } else {
2011       prev_vsl = vsl;
2012     }
2013   }
2014 #ifdef ASSERT
2015   if (purged_vsl != NULL) {
2016     // List should be stable enough to use an iterator here.
2017     VirtualSpaceListIterator iter(virtual_space_list());
2018     while (iter.repeat()) {
2019       VirtualSpaceNode* vsl = iter.get_next();
2020       assert(vsl != purged_vsl, "Purge of vsl failed");
2021     }
2022   }
2023 #endif
2024 }
2025 
2026 
2027 // This function looks at the mmap regions in the metaspace without locking.
2028 // The chunks are added with store ordering and not deleted except for at
2029 // unloading time during a safepoint.
2030 bool VirtualSpaceList::contains(const void* ptr) {
2031   // List should be stable enough to use an iterator here because removing virtual
2032   // space nodes is only allowed at a safepoint.
2033   VirtualSpaceListIterator iter(virtual_space_list());
2034   while (iter.repeat()) {
2035     VirtualSpaceNode* vsn = iter.get_next();
2036     if (vsn->contains(ptr)) {
2037       return true;
2038     }
2039   }
2040   return false;
2041 }
2042 
2043 void VirtualSpaceList::retire_current_virtual_space() {
2044   assert_lock_strong(MetaspaceExpand_lock);
2045 
2046   VirtualSpaceNode* vsn = current_virtual_space();
2047 
2048   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2049                                   Metaspace::chunk_manager_metadata();
2050 
2051   vsn->retire(cm);
2052 }
2053 
2054 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2055   DEBUG_ONLY(verify_container_count();)
2056   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2057   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2058     ChunkIndex index = (ChunkIndex)i;
2059     size_t chunk_size = chunk_manager->size_by_index(index);
2060 
2061     while (free_words_in_vs() >= chunk_size) {
2062       Metachunk* chunk = get_chunk_vs(chunk_size);
2063       // Chunk will be allocated aligned, so allocation may require
2064       // additional padding chunks. That may cause above allocation to
2065       // fail. Just ignore the failed allocation and continue with the
2066       // next smaller chunk size. As the VirtualSpaceNode comitted
2067       // size should be a multiple of the smallest chunk size, we
2068       // should always be able to fill the VirtualSpace completely.
2069       if (chunk == NULL) {
2070         break;
2071       }
2072       chunk_manager->return_single_chunk(index, chunk);
2073     }
2074     DEBUG_ONLY(verify_container_count();)
2075   }
2076   assert(free_words_in_vs() == 0, "should be empty now");
2077 }
2078 
2079 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2080                                    _is_class(false),
2081                                    _virtual_space_list(NULL),
2082                                    _current_virtual_space(NULL),
2083                                    _reserved_words(0),
2084                                    _committed_words(0),
2085                                    _virtual_space_count(0) {
2086   MutexLockerEx cl(MetaspaceExpand_lock,
2087                    Mutex::_no_safepoint_check_flag);
2088   create_new_virtual_space(word_size);
2089 }
2090 
2091 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2092                                    _is_class(true),
2093                                    _virtual_space_list(NULL),
2094                                    _current_virtual_space(NULL),
2095                                    _reserved_words(0),
2096                                    _committed_words(0),
2097                                    _virtual_space_count(0) {
2098   MutexLockerEx cl(MetaspaceExpand_lock,
2099                    Mutex::_no_safepoint_check_flag);
2100   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2101   bool succeeded = class_entry->initialize();
2102   if (succeeded) {
2103     link_vs(class_entry);
2104   }
2105 }
2106 
2107 size_t VirtualSpaceList::free_bytes() {
2108   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2109 }
2110 
2111 // Allocate another meta virtual space and add it to the list.
2112 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2113   assert_lock_strong(MetaspaceExpand_lock);
2114 
2115   if (is_class()) {
2116     assert(false, "We currently don't support more than one VirtualSpace for"
2117                   " the compressed class space. The initialization of the"
2118                   " CCS uses another code path and should not hit this path.");
2119     return false;
2120   }
2121 
2122   if (vs_word_size == 0) {
2123     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2124     return false;
2125   }
2126 
2127   // Reserve the space
2128   size_t vs_byte_size = vs_word_size * BytesPerWord;
2129   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2130 
2131   // Allocate the meta virtual space and initialize it.
2132   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2133   if (!new_entry->initialize()) {
2134     delete new_entry;
2135     return false;
2136   } else {
2137     assert(new_entry->reserved_words() == vs_word_size,
2138         "Reserved memory size differs from requested memory size");
2139     // ensure lock-free iteration sees fully initialized node
2140     OrderAccess::storestore();
2141     link_vs(new_entry);
2142     return true;
2143   }
2144 }
2145 
2146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2147   if (virtual_space_list() == NULL) {
2148       set_virtual_space_list(new_entry);
2149   } else {
2150     current_virtual_space()->set_next(new_entry);
2151   }
2152   set_current_virtual_space(new_entry);
2153   inc_reserved_words(new_entry->reserved_words());
2154   inc_committed_words(new_entry->committed_words());
2155   inc_virtual_space_count();
2156 #ifdef ASSERT
2157   new_entry->mangle();
2158 #endif
2159   LogTarget(Trace, gc, metaspace) lt;
2160   if (lt.is_enabled()) {
2161     LogStream ls(lt);
2162     VirtualSpaceNode* vsl = current_virtual_space();
2163     ResourceMark rm;
2164     vsl->print_on(&ls);
2165   }
2166 }
2167 
2168 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2169                                       size_t min_words,
2170                                       size_t preferred_words) {
2171   size_t before = node->committed_words();
2172 
2173   bool result = node->expand_by(min_words, preferred_words);
2174 
2175   size_t after = node->committed_words();
2176 
2177   // after and before can be the same if the memory was pre-committed.
2178   assert(after >= before, "Inconsistency");
2179   inc_committed_words(after - before);
2180 
2181   return result;
2182 }
2183 
2184 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2185   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2186   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2187   assert(min_words <= preferred_words, "Invalid arguments");
2188 
2189   const char* const class_or_not = (is_class() ? "class" : "non-class");
2190 
2191   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2192     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2193               class_or_not);
2194     return  false;
2195   }
2196 
2197   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2198   if (allowed_expansion_words < min_words) {
2199     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2200               class_or_not);
2201     return false;
2202   }
2203 
2204   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2205 
2206   // Commit more memory from the the current virtual space.
2207   bool vs_expanded = expand_node_by(current_virtual_space(),
2208                                     min_words,
2209                                     max_expansion_words);
2210   if (vs_expanded) {
2211      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2212                class_or_not);
2213      return true;
2214   }
2215   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2216             class_or_not);
2217   retire_current_virtual_space();
2218 
2219   // Get another virtual space.
2220   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2221   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2222 
2223   if (create_new_virtual_space(grow_vs_words)) {
2224     if (current_virtual_space()->is_pre_committed()) {
2225       // The memory was pre-committed, so we are done here.
2226       assert(min_words <= current_virtual_space()->committed_words(),
2227           "The new VirtualSpace was pre-committed, so it"
2228           "should be large enough to fit the alloc request.");
2229       return true;
2230     }
2231 
2232     return expand_node_by(current_virtual_space(),
2233                           min_words,
2234                           max_expansion_words);
2235   }
2236 
2237   return false;
2238 }
2239 
2240 // Given a chunk, calculate the largest possible padding space which
2241 // could be required when allocating it.
2242 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2243   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2244   if (chunk_type != HumongousIndex) {
2245     // Normal, non-humongous chunks are allocated at chunk size
2246     // boundaries, so the largest padding space required would be that
2247     // minus the smallest chunk size.
2248     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2249     return chunk_word_size - smallest_chunk_size;
2250   } else {
2251     // Humongous chunks are allocated at smallest-chunksize
2252     // boundaries, so there is no padding required.
2253     return 0;
2254   }
2255 }
2256 
2257 
2258 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2259 
2260   // Allocate a chunk out of the current virtual space.
2261   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2262 
2263   if (next != NULL) {
2264     return next;
2265   }
2266 
2267   // The expand amount is currently only determined by the requested sizes
2268   // and not how much committed memory is left in the current virtual space.
2269 
2270   // We must have enough space for the requested size and any
2271   // additional reqired padding chunks.
2272   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2273 
2274   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2275   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2276   if (min_word_size >= preferred_word_size) {
2277     // Can happen when humongous chunks are allocated.
2278     preferred_word_size = min_word_size;
2279   }
2280 
2281   bool expanded = expand_by(min_word_size, preferred_word_size);
2282   if (expanded) {
2283     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2284     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2285   }
2286 
2287    return next;
2288 }
2289 
2290 void VirtualSpaceList::print_on(outputStream* st) const {
2291   VirtualSpaceListIterator iter(virtual_space_list());
2292   while (iter.repeat()) {
2293     VirtualSpaceNode* node = iter.get_next();
2294     node->print_on(st);
2295   }
2296 }
2297 
2298 void VirtualSpaceList::print_map(outputStream* st) const {
2299   VirtualSpaceNode* list = virtual_space_list();
2300   VirtualSpaceListIterator iter(list);
2301   unsigned i = 0;
2302   while (iter.repeat()) {
2303     st->print_cr("Node %u:", i);
2304     VirtualSpaceNode* node = iter.get_next();
2305     node->print_map(st, this->is_class());
2306     i ++;
2307   }
2308 }
2309 
2310 // MetaspaceGC methods
2311 
2312 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2313 // Within the VM operation after the GC the attempt to allocate the metadata
2314 // should succeed.  If the GC did not free enough space for the metaspace
2315 // allocation, the HWM is increased so that another virtualspace will be
2316 // allocated for the metadata.  With perm gen the increase in the perm
2317 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2318 // metaspace policy uses those as the small and large steps for the HWM.
2319 //
2320 // After the GC the compute_new_size() for MetaspaceGC is called to
2321 // resize the capacity of the metaspaces.  The current implementation
2322 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2323 // to resize the Java heap by some GC's.  New flags can be implemented
2324 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2325 // free space is desirable in the metaspace capacity to decide how much
2326 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2327 // free space is desirable in the metaspace capacity before decreasing
2328 // the HWM.
2329 
2330 // Calculate the amount to increase the high water mark (HWM).
2331 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2332 // another expansion is not requested too soon.  If that is not
2333 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2334 // If that is still not enough, expand by the size of the allocation
2335 // plus some.
2336 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2337   size_t min_delta = MinMetaspaceExpansion;
2338   size_t max_delta = MaxMetaspaceExpansion;
2339   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2340 
2341   if (delta <= min_delta) {
2342     delta = min_delta;
2343   } else if (delta <= max_delta) {
2344     // Don't want to hit the high water mark on the next
2345     // allocation so make the delta greater than just enough
2346     // for this allocation.
2347     delta = max_delta;
2348   } else {
2349     // This allocation is large but the next ones are probably not
2350     // so increase by the minimum.
2351     delta = delta + min_delta;
2352   }
2353 
2354   assert_is_aligned(delta, Metaspace::commit_alignment());
2355 
2356   return delta;
2357 }
2358 
2359 size_t MetaspaceGC::capacity_until_GC() {
2360   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2361   assert(value >= MetaspaceSize, "Not initialized properly?");
2362   return value;
2363 }
2364 
2365 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2366   assert_is_aligned(v, Metaspace::commit_alignment());
2367 
2368   intptr_t capacity_until_GC = _capacity_until_GC;
2369   intptr_t new_value = capacity_until_GC + v;
2370 
2371   if (new_value < capacity_until_GC) {
2372     // The addition wrapped around, set new_value to aligned max value.
2373     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2374   }
2375 
2376   intptr_t expected = _capacity_until_GC;
2377   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2378 
2379   if (expected != actual) {
2380     return false;
2381   }
2382 
2383   if (new_cap_until_GC != NULL) {
2384     *new_cap_until_GC = new_value;
2385   }
2386   if (old_cap_until_GC != NULL) {
2387     *old_cap_until_GC = capacity_until_GC;
2388   }
2389   return true;
2390 }
2391 
2392 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2393   assert_is_aligned(v, Metaspace::commit_alignment());
2394 
2395   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2396 }
2397 
2398 void MetaspaceGC::initialize() {
2399   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2400   // we can't do a GC during initialization.
2401   _capacity_until_GC = MaxMetaspaceSize;
2402 }
2403 
2404 void MetaspaceGC::post_initialize() {
2405   // Reset the high-water mark once the VM initialization is done.
2406   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2407 }
2408 
2409 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2410   // Check if the compressed class space is full.
2411   if (is_class && Metaspace::using_class_space()) {
2412     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2413     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2414       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2415                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2416       return false;
2417     }
2418   }
2419 
2420   // Check if the user has imposed a limit on the metaspace memory.
2421   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2422   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2423     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2424               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2425     return false;
2426   }
2427 
2428   return true;
2429 }
2430 
2431 size_t MetaspaceGC::allowed_expansion() {
2432   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2433   size_t capacity_until_gc = capacity_until_GC();
2434 
2435   assert(capacity_until_gc >= committed_bytes,
2436          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2437          capacity_until_gc, committed_bytes);
2438 
2439   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2440   size_t left_until_GC = capacity_until_gc - committed_bytes;
2441   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2442   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2443             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2444             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2445 
2446   return left_to_commit / BytesPerWord;
2447 }
2448 
2449 void MetaspaceGC::compute_new_size() {
2450   assert(_shrink_factor <= 100, "invalid shrink factor");
2451   uint current_shrink_factor = _shrink_factor;
2452   _shrink_factor = 0;
2453 
2454   // Using committed_bytes() for used_after_gc is an overestimation, since the
2455   // chunk free lists are included in committed_bytes() and the memory in an
2456   // un-fragmented chunk free list is available for future allocations.
2457   // However, if the chunk free lists becomes fragmented, then the memory may
2458   // not be available for future allocations and the memory is therefore "in use".
2459   // Including the chunk free lists in the definition of "in use" is therefore
2460   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2461   // shrink below committed_bytes() and this has caused serious bugs in the past.
2462   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2463   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2464 
2465   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2466   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2467 
2468   const double min_tmp = used_after_gc / maximum_used_percentage;
2469   size_t minimum_desired_capacity =
2470     (size_t)MIN2(min_tmp, double(max_uintx));
2471   // Don't shrink less than the initial generation size
2472   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2473                                   MetaspaceSize);
2474 
2475   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2476   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2477                            minimum_free_percentage, maximum_used_percentage);
2478   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2479 
2480 
2481   size_t shrink_bytes = 0;
2482   if (capacity_until_GC < minimum_desired_capacity) {
2483     // If we have less capacity below the metaspace HWM, then
2484     // increment the HWM.
2485     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2486     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2487     // Don't expand unless it's significant
2488     if (expand_bytes >= MinMetaspaceExpansion) {
2489       size_t new_capacity_until_GC = 0;
2490       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2491       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2492 
2493       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2494                                                new_capacity_until_GC,
2495                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2496       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2497                                minimum_desired_capacity / (double) K,
2498                                expand_bytes / (double) K,
2499                                MinMetaspaceExpansion / (double) K,
2500                                new_capacity_until_GC / (double) K);
2501     }
2502     return;
2503   }
2504 
2505   // No expansion, now see if we want to shrink
2506   // We would never want to shrink more than this
2507   assert(capacity_until_GC >= minimum_desired_capacity,
2508          SIZE_FORMAT " >= " SIZE_FORMAT,
2509          capacity_until_GC, minimum_desired_capacity);
2510   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2511 
2512   // Should shrinking be considered?
2513   if (MaxMetaspaceFreeRatio < 100) {
2514     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2515     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2516     const double max_tmp = used_after_gc / minimum_used_percentage;
2517     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2518     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2519                                     MetaspaceSize);
2520     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2521                              maximum_free_percentage, minimum_used_percentage);
2522     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2523                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2524 
2525     assert(minimum_desired_capacity <= maximum_desired_capacity,
2526            "sanity check");
2527 
2528     if (capacity_until_GC > maximum_desired_capacity) {
2529       // Capacity too large, compute shrinking size
2530       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2531       // We don't want shrink all the way back to initSize if people call
2532       // System.gc(), because some programs do that between "phases" and then
2533       // we'd just have to grow the heap up again for the next phase.  So we
2534       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2535       // on the third call, and 100% by the fourth call.  But if we recompute
2536       // size without shrinking, it goes back to 0%.
2537       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2538 
2539       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2540 
2541       assert(shrink_bytes <= max_shrink_bytes,
2542              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2543              shrink_bytes, max_shrink_bytes);
2544       if (current_shrink_factor == 0) {
2545         _shrink_factor = 10;
2546       } else {
2547         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2548       }
2549       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2550                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2551       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2552                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2553     }
2554   }
2555 
2556   // Don't shrink unless it's significant
2557   if (shrink_bytes >= MinMetaspaceExpansion &&
2558       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2559     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2560     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2561                                              new_capacity_until_GC,
2562                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2563   }
2564 }
2565 
2566 // Metadebug methods
2567 
2568 void Metadebug::init_allocation_fail_alot_count() {
2569   if (MetadataAllocationFailALot) {
2570     _allocation_fail_alot_count =
2571       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2572   }
2573 }
2574 
2575 #ifdef ASSERT
2576 bool Metadebug::test_metadata_failure() {
2577   if (MetadataAllocationFailALot &&
2578       Threads::is_vm_complete()) {
2579     if (_allocation_fail_alot_count > 0) {
2580       _allocation_fail_alot_count--;
2581     } else {
2582       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2583       init_allocation_fail_alot_count();
2584       return true;
2585     }
2586   }
2587   return false;
2588 }
2589 #endif
2590 
2591 // ChunkManager methods
2592 size_t ChunkManager::free_chunks_total_words() {
2593   return _free_chunks_total;
2594 }
2595 
2596 size_t ChunkManager::free_chunks_total_bytes() {
2597   return free_chunks_total_words() * BytesPerWord;
2598 }
2599 
2600 // Update internal accounting after a chunk was added
2601 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2602   assert_lock_strong(MetaspaceExpand_lock);
2603   _free_chunks_count ++;
2604   _free_chunks_total += c->word_size();
2605 }
2606 
2607 // Update internal accounting after a chunk was removed
2608 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2609   assert_lock_strong(MetaspaceExpand_lock);
2610   assert(_free_chunks_count >= 1,
2611     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2612   assert(_free_chunks_total >= c->word_size(),
2613     "ChunkManager::_free_chunks_total: about to go negative"
2614      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2615   _free_chunks_count --;
2616   _free_chunks_total -= c->word_size();
2617 }
2618 
2619 size_t ChunkManager::free_chunks_count() {
2620 #ifdef ASSERT
2621   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2622     MutexLockerEx cl(MetaspaceExpand_lock,
2623                      Mutex::_no_safepoint_check_flag);
2624     // This lock is only needed in debug because the verification
2625     // of the _free_chunks_totals walks the list of free chunks
2626     slow_locked_verify_free_chunks_count();
2627   }
2628 #endif
2629   return _free_chunks_count;
2630 }
2631 
2632 ChunkIndex ChunkManager::list_index(size_t size) {
2633   return get_chunk_type_by_size(size, is_class());
2634 }
2635 
2636 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2637   index_bounds_check(index);
2638   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2639   return get_size_for_nonhumongous_chunktype(index, is_class());
2640 }
2641 
2642 void ChunkManager::locked_verify_free_chunks_total() {
2643   assert_lock_strong(MetaspaceExpand_lock);
2644   assert(sum_free_chunks() == _free_chunks_total,
2645          "_free_chunks_total " SIZE_FORMAT " is not the"
2646          " same as sum " SIZE_FORMAT, _free_chunks_total,
2647          sum_free_chunks());
2648 }
2649 
2650 void ChunkManager::verify_free_chunks_total() {
2651   MutexLockerEx cl(MetaspaceExpand_lock,
2652                      Mutex::_no_safepoint_check_flag);
2653   locked_verify_free_chunks_total();
2654 }
2655 
2656 void ChunkManager::locked_verify_free_chunks_count() {
2657   assert_lock_strong(MetaspaceExpand_lock);
2658   assert(sum_free_chunks_count() == _free_chunks_count,
2659          "_free_chunks_count " SIZE_FORMAT " is not the"
2660          " same as sum " SIZE_FORMAT, _free_chunks_count,
2661          sum_free_chunks_count());
2662 }
2663 
2664 void ChunkManager::verify_free_chunks_count() {
2665 #ifdef ASSERT
2666   MutexLockerEx cl(MetaspaceExpand_lock,
2667                      Mutex::_no_safepoint_check_flag);
2668   locked_verify_free_chunks_count();
2669 #endif
2670 }
2671 
2672 void ChunkManager::verify() {
2673   MutexLockerEx cl(MetaspaceExpand_lock,
2674                      Mutex::_no_safepoint_check_flag);
2675   locked_verify();
2676 }
2677 
2678 void ChunkManager::locked_verify() {
2679   locked_verify_free_chunks_count();
2680   locked_verify_free_chunks_total();
2681   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2682     ChunkList* list = free_chunks(i);
2683     if (list != NULL) {
2684       Metachunk* chunk = list->head();
2685       while (chunk) {
2686         DEBUG_ONLY(do_verify_chunk(chunk);)
2687         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2688         chunk = chunk->next();
2689       }
2690     }
2691   }
2692 }
2693 
2694 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2695   assert_lock_strong(MetaspaceExpand_lock);
2696   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2697                 _free_chunks_total, _free_chunks_count);
2698 }
2699 
2700 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2701   assert_lock_strong(MetaspaceExpand_lock);
2702   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2703                 sum_free_chunks(), sum_free_chunks_count());
2704 }
2705 
2706 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2707   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2708          "Bad index: %d", (int)index);
2709 
2710   return &_free_chunks[index];
2711 }
2712 
2713 // These methods that sum the free chunk lists are used in printing
2714 // methods that are used in product builds.
2715 size_t ChunkManager::sum_free_chunks() {
2716   assert_lock_strong(MetaspaceExpand_lock);
2717   size_t result = 0;
2718   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2719     ChunkList* list = free_chunks(i);
2720 
2721     if (list == NULL) {
2722       continue;
2723     }
2724 
2725     result = result + list->count() * list->size();
2726   }
2727   result = result + humongous_dictionary()->total_size();
2728   return result;
2729 }
2730 
2731 size_t ChunkManager::sum_free_chunks_count() {
2732   assert_lock_strong(MetaspaceExpand_lock);
2733   size_t count = 0;
2734   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2735     ChunkList* list = free_chunks(i);
2736     if (list == NULL) {
2737       continue;
2738     }
2739     count = count + list->count();
2740   }
2741   count = count + humongous_dictionary()->total_free_blocks();
2742   return count;
2743 }
2744 
2745 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2746   ChunkIndex index = list_index(word_size);
2747   assert(index < HumongousIndex, "No humongous list");
2748   return free_chunks(index);
2749 }
2750 
2751 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2752 // split up the larger chunk into n smaller chunks, at least one of which should be
2753 // the target chunk of target chunk size. The smaller chunks, including the target
2754 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2755 // Note that this chunk is supposed to be removed from the freelist right away.
2756 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2757   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2758 
2759   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2760   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2761 
2762   MetaWord* const region_start = (MetaWord*)larger_chunk;
2763   const size_t region_word_len = larger_chunk->word_size();
2764   MetaWord* const region_end = region_start + region_word_len;
2765   VirtualSpaceNode* const vsn = larger_chunk->container();
2766   OccupancyMap* const ocmap = vsn->occupancy_map();
2767 
2768   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2769   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2770   // at an address suitable to place the smaller target chunk.
2771   assert_is_aligned(region_start, target_chunk_word_size);
2772 
2773   // Remove old chunk.
2774   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2775   larger_chunk->remove_sentinel();
2776 
2777   // Prevent access to the old chunk from here on.
2778   larger_chunk = NULL;
2779   // ... and wipe it.
2780   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2781 
2782   // In its place create first the target chunk...
2783   MetaWord* p = region_start;
2784   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2785   assert(target_chunk == (Metachunk*)p, "Sanity");
2786   target_chunk->set_origin(origin_split);
2787 
2788   // Note: we do not need to mark its start in the occupancy map
2789   // because it coincides with the old chunk start.
2790 
2791   // Mark chunk as free and return to the freelist.
2792   do_update_in_use_info_for_chunk(target_chunk, false);
2793   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2794 
2795   // This chunk should now be valid and can be verified.
2796   DEBUG_ONLY(do_verify_chunk(target_chunk));
2797 
2798   // In the remaining space create the remainder chunks.
2799   p += target_chunk->word_size();
2800   assert(p < region_end, "Sanity");
2801 
2802   while (p < region_end) {
2803 
2804     // Find the largest chunk size which fits the alignment requirements at address p.
2805     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2806     size_t this_chunk_word_size = 0;
2807     for(;;) {
2808       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2809       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2810         break;
2811       } else {
2812         this_chunk_index = prev_chunk_index(this_chunk_index);
2813         assert(this_chunk_index >= target_chunk_index, "Sanity");
2814       }
2815     }
2816 
2817     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2818     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2819     assert(p + this_chunk_word_size <= region_end, "Sanity");
2820 
2821     // Create splitting chunk.
2822     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2823     assert(this_chunk == (Metachunk*)p, "Sanity");
2824     this_chunk->set_origin(origin_split);
2825     ocmap->set_chunk_starts_at_address(p, true);
2826     do_update_in_use_info_for_chunk(this_chunk, false);
2827 
2828     // This chunk should be valid and can be verified.
2829     DEBUG_ONLY(do_verify_chunk(this_chunk));
2830 
2831     // Return this chunk to freelist and correct counter.
2832     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2833     _free_chunks_count ++;
2834 
2835     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2836       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2837       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2838       p2i(region_start), p2i(region_end));
2839 
2840     p += this_chunk_word_size;
2841 
2842   }
2843 
2844   return target_chunk;
2845 }
2846 
2847 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2848   assert_lock_strong(MetaspaceExpand_lock);
2849 
2850   slow_locked_verify();
2851 
2852   Metachunk* chunk = NULL;
2853   bool we_did_split_a_chunk = false;
2854 
2855   if (list_index(word_size) != HumongousIndex) {
2856 
2857     ChunkList* free_list = find_free_chunks_list(word_size);
2858     assert(free_list != NULL, "Sanity check");
2859 
2860     chunk = free_list->head();
2861 
2862     if (chunk == NULL) {
2863       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2864       // This is the counterpart of the coalescing-upon-chunk-return.
2865 
2866       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2867 
2868       // Is there a larger chunk we could split?
2869       Metachunk* larger_chunk = NULL;
2870       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2871       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2872         larger_chunk = free_chunks(larger_chunk_index)->head();
2873         if (larger_chunk == NULL) {
2874           larger_chunk_index = next_chunk_index(larger_chunk_index);
2875         }
2876       }
2877 
2878       if (larger_chunk != NULL) {
2879         assert(larger_chunk->word_size() > word_size, "Sanity");
2880         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2881 
2882         // We found a larger chunk. Lets split it up:
2883         // - remove old chunk
2884         // - in its place, create new smaller chunks, with at least one chunk
2885         //   being of target size, the others sized as large as possible. This
2886         //   is to make sure the resulting chunks are "as coalesced as possible"
2887         //   (similar to VirtualSpaceNode::retire()).
2888         // Note: during this operation both ChunkManager and VirtualSpaceNode
2889         //  are temporarily invalid, so be careful with asserts.
2890 
2891         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2892            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2893           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2894           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2895 
2896         chunk = split_chunk(word_size, larger_chunk);
2897 
2898         // This should have worked.
2899         assert(chunk != NULL, "Sanity");
2900         assert(chunk->word_size() == word_size, "Sanity");
2901         assert(chunk->is_tagged_free(), "Sanity");
2902 
2903         we_did_split_a_chunk = true;
2904 
2905       }
2906     }
2907 
2908     if (chunk == NULL) {
2909       return NULL;
2910     }
2911 
2912     // Remove the chunk as the head of the list.
2913     free_list->remove_chunk(chunk);
2914 
2915     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2916                                        p2i(free_list), free_list->count());
2917 
2918   } else {
2919     chunk = humongous_dictionary()->get_chunk(word_size);
2920 
2921     if (chunk == NULL) {
2922       return NULL;
2923     }
2924 
2925     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2926                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2927   }
2928 
2929   // Chunk has been removed from the chunk manager; update counters.
2930   account_for_removed_chunk(chunk);
2931   do_update_in_use_info_for_chunk(chunk, true);
2932   chunk->container()->inc_container_count();
2933   chunk->inc_use_count();
2934 
2935   // Remove it from the links to this freelist
2936   chunk->set_next(NULL);
2937   chunk->set_prev(NULL);
2938 
2939   // Run some verifications (some more if we did a chunk split)
2940 #ifdef ASSERT
2941   if (VerifyMetaspace) {
2942     locked_verify();
2943     VirtualSpaceNode* const vsn = chunk->container();
2944     vsn->verify();
2945     if (we_did_split_a_chunk) {
2946       vsn->verify_free_chunks_are_ideally_merged();
2947     }
2948   }
2949 #endif
2950 
2951   return chunk;
2952 }
2953 
2954 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2955   assert_lock_strong(MetaspaceExpand_lock);
2956   slow_locked_verify();
2957 
2958   // Take from the beginning of the list
2959   Metachunk* chunk = free_chunks_get(word_size);
2960   if (chunk == NULL) {
2961     return NULL;
2962   }
2963 
2964   assert((word_size <= chunk->word_size()) ||
2965          (list_index(chunk->word_size()) == HumongousIndex),
2966          "Non-humongous variable sized chunk");
2967   LogTarget(Debug, gc, metaspace, freelist) lt;
2968   if (lt.is_enabled()) {
2969     size_t list_count;
2970     if (list_index(word_size) < HumongousIndex) {
2971       ChunkList* list = find_free_chunks_list(word_size);
2972       list_count = list->count();
2973     } else {
2974       list_count = humongous_dictionary()->total_count();
2975     }
2976     LogStream ls(lt);
2977     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2978              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2979     ResourceMark rm;
2980     locked_print_free_chunks(&ls);
2981   }
2982 
2983   return chunk;
2984 }
2985 
2986 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2987   assert_lock_strong(MetaspaceExpand_lock);
2988   DEBUG_ONLY(do_verify_chunk(chunk);)
2989   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
2990   assert(chunk != NULL, "Expected chunk.");
2991   assert(chunk->container() != NULL, "Container should have been set.");
2992   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2993   index_bounds_check(index);
2994 
2995   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2996   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2997   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2998   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
2999 
3000   if (index != HumongousIndex) {
3001     // Return non-humongous chunk to freelist.
3002     ChunkList* list = free_chunks(index);
3003     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3004     list->return_chunk_at_head(chunk);
3005     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3006         chunk_size_name(index), p2i(chunk));
3007   } else {
3008     // Return humongous chunk to dictionary.
3009     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3010     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3011            "Humongous chunk has wrong alignment.");
3012     _humongous_dictionary.return_chunk(chunk);
3013     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3014         chunk_size_name(index), p2i(chunk), chunk->word_size());
3015   }
3016   chunk->container()->dec_container_count();
3017   do_update_in_use_info_for_chunk(chunk, false);
3018 
3019   // Chunk has been added; update counters.
3020   account_for_added_chunk(chunk);
3021 
3022   // Attempt coalesce returned chunks with its neighboring chunks:
3023   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3024   if (index == SmallIndex || index == SpecializedIndex) {
3025     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3026       // This did not work. But if this chunk is special, we still may form a small chunk?
3027       if (index == SpecializedIndex) {
3028         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3029           // give up.
3030         }
3031       }
3032     }
3033   }
3034 
3035 }
3036 
3037 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3038   index_bounds_check(index);
3039   if (chunks == NULL) {
3040     return;
3041   }
3042   LogTarget(Trace, gc, metaspace, freelist) log;
3043   if (log.is_enabled()) { // tracing
3044     log.print("returning list of %s chunks...", chunk_size_name(index));
3045   }
3046   unsigned num_chunks_returned = 0;
3047   size_t size_chunks_returned = 0;
3048   Metachunk* cur = chunks;
3049   while (cur != NULL) {
3050     // Capture the next link before it is changed
3051     // by the call to return_chunk_at_head();
3052     Metachunk* next = cur->next();
3053     if (log.is_enabled()) { // tracing
3054       num_chunks_returned ++;
3055       size_chunks_returned += cur->word_size();
3056     }
3057     return_single_chunk(index, cur);
3058     cur = next;
3059   }
3060   if (log.is_enabled()) { // tracing
3061     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3062         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3063     if (index != HumongousIndex) {
3064       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3065     } else {
3066       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3067     }
3068   }
3069 }
3070 
3071 void ChunkManager::print_on(outputStream* out) const {
3072   _humongous_dictionary.report_statistics(out);
3073 }
3074 
3075 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3076   assert_lock_strong(MetaspaceExpand_lock);
3077   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3078     stat->num_by_type[i] = num_free_chunks(i);
3079     stat->single_size_by_type[i] = size_by_index(i);
3080     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3081   }
3082   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3083   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3084 }
3085 
3086 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3087   MutexLockerEx cl(MetaspaceExpand_lock,
3088                    Mutex::_no_safepoint_check_flag);
3089   locked_get_statistics(stat);
3090 }
3091 
3092 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3093   size_t total = 0;
3094   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3095 
3096   const char* unit = scale_unit(scale);
3097   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3098     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3099                    stat->num_by_type[i], chunk_size_name(i),
3100                    stat->single_size_by_type[i]);
3101     if (scale == 1) {
3102       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3103     } else {
3104       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3105     }
3106 
3107     total += stat->total_size_by_type[i];
3108   }
3109 
3110 
3111   total += stat->total_size_humongous_chunks;
3112 
3113   if (scale == 1) {
3114     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3115     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3116 
3117     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3118   } else {
3119     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3120     stat->num_humongous_chunks,
3121     (float)stat->total_size_humongous_chunks / scale, unit);
3122 
3123     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3124   }
3125 
3126 }
3127 
3128 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3129   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3130 
3131   // Note: keep lock protection only to retrieving statistics; keep printing
3132   // out of lock protection
3133   ChunkManagerStatistics stat;
3134   out->print_cr("Chunkmanager (non-class):");
3135   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3136   if (non_class_cm != NULL) {
3137     non_class_cm->get_statistics(&stat);
3138     ChunkManager::print_statistics(&stat, out, scale);
3139   } else {
3140     out->print_cr("unavailable.");
3141   }
3142   out->print_cr("Chunkmanager (class):");
3143   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3144   if (class_cm != NULL) {
3145     class_cm->get_statistics(&stat);
3146     ChunkManager::print_statistics(&stat, out, scale);
3147   } else {
3148     out->print_cr("unavailable.");
3149   }
3150 }
3151 
3152 // SpaceManager methods
3153 
3154 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3155   size_t chunk_sizes[] = {
3156       specialized_chunk_size(is_class_space),
3157       small_chunk_size(is_class_space),
3158       medium_chunk_size(is_class_space)
3159   };
3160 
3161   // Adjust up to one of the fixed chunk sizes ...
3162   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3163     if (requested <= chunk_sizes[i]) {
3164       return chunk_sizes[i];
3165     }
3166   }
3167 
3168   // ... or return the size as a humongous chunk.
3169   return requested;
3170 }
3171 
3172 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3173   return adjust_initial_chunk_size(requested, is_class());
3174 }
3175 
3176 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3177   size_t requested;
3178 
3179   if (is_class()) {
3180     switch (type) {
3181     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3182     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3183     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3184     default:                                 requested = ClassSmallChunk; break;
3185     }
3186   } else {
3187     switch (type) {
3188     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3189     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3190     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3191     default:                                 requested = SmallChunk; break;
3192     }
3193   }
3194 
3195   // Adjust to one of the fixed chunk sizes (unless humongous)
3196   const size_t adjusted = adjust_initial_chunk_size(requested);
3197 
3198   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3199          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3200 
3201   return adjusted;
3202 }
3203 
3204 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3205   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3206   size_t free = 0;
3207   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3208     Metachunk* chunk = chunks_in_use(i);
3209     while (chunk != NULL) {
3210       free += chunk->free_word_size();
3211       chunk = chunk->next();
3212     }
3213   }
3214   return free;
3215 }
3216 
3217 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3218   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3219   size_t result = 0;
3220   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3221    result += sum_waste_in_chunks_in_use(i);
3222   }
3223 
3224   return result;
3225 }
3226 
3227 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3228   size_t result = 0;
3229   Metachunk* chunk = chunks_in_use(index);
3230   // Count the free space in all the chunk but not the
3231   // current chunk from which allocations are still being done.
3232   while (chunk != NULL) {
3233     if (chunk != current_chunk()) {
3234       result += chunk->free_word_size();
3235     }
3236     chunk = chunk->next();
3237   }
3238   return result;
3239 }
3240 
3241 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3242   // For CMS use "allocated_chunks_words()" which does not need the
3243   // Metaspace lock.  For the other collectors sum over the
3244   // lists.  Use both methods as a check that "allocated_chunks_words()"
3245   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3246   // to use in the product and allocated_chunks_words() should be used
3247   // but allow for  checking that allocated_chunks_words() returns the same
3248   // value as sum_capacity_in_chunks_in_use() which is the definitive
3249   // answer.
3250   if (UseConcMarkSweepGC) {
3251     return allocated_chunks_words();
3252   } else {
3253     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3254     size_t sum = 0;
3255     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3256       Metachunk* chunk = chunks_in_use(i);
3257       while (chunk != NULL) {
3258         sum += chunk->word_size();
3259         chunk = chunk->next();
3260       }
3261     }
3262   return sum;
3263   }
3264 }
3265 
3266 size_t SpaceManager::sum_count_in_chunks_in_use() {
3267   size_t count = 0;
3268   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3269     count = count + sum_count_in_chunks_in_use(i);
3270   }
3271 
3272   return count;
3273 }
3274 
3275 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3276   size_t count = 0;
3277   Metachunk* chunk = chunks_in_use(i);
3278   while (chunk != NULL) {
3279     count++;
3280     chunk = chunk->next();
3281   }
3282   return count;
3283 }
3284 
3285 
3286 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3287   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3288   size_t used = 0;
3289   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3290     Metachunk* chunk = chunks_in_use(i);
3291     while (chunk != NULL) {
3292       used += chunk->used_word_size();
3293       chunk = chunk->next();
3294     }
3295   }
3296   return used;
3297 }
3298 
3299 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3300 
3301   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302     Metachunk* chunk = chunks_in_use(i);
3303     st->print("SpaceManager: %s " PTR_FORMAT,
3304                  chunk_size_name(i), p2i(chunk));
3305     if (chunk != NULL) {
3306       st->print_cr(" free " SIZE_FORMAT,
3307                    chunk->free_word_size());
3308     } else {
3309       st->cr();
3310     }
3311   }
3312 
3313   chunk_manager()->locked_print_free_chunks(st);
3314   chunk_manager()->locked_print_sum_free_chunks(st);
3315 }
3316 
3317 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3318 
3319   // Decide between a small chunk and a medium chunk.  Up to
3320   // _small_chunk_limit small chunks can be allocated.
3321   // After that a medium chunk is preferred.
3322   size_t chunk_word_size;
3323 
3324   // Special case for anonymous metadata space.
3325   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3326   // rarely about 4K (64-bits JVM).
3327   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3328   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3329   // reduces space waste from 60+% to around 30%.
3330   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3331       _mdtype == Metaspace::NonClassType &&
3332       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3333       word_size + Metachunk::overhead() <= SpecializedChunk) {
3334     return SpecializedChunk;
3335   }
3336 
3337   if (chunks_in_use(MediumIndex) == NULL &&
3338       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3339     chunk_word_size = (size_t) small_chunk_size();
3340     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3341       chunk_word_size = medium_chunk_size();
3342     }
3343   } else {
3344     chunk_word_size = medium_chunk_size();
3345   }
3346 
3347   // Might still need a humongous chunk.  Enforce
3348   // humongous allocations sizes to be aligned up to
3349   // the smallest chunk size.
3350   size_t if_humongous_sized_chunk =
3351     align_up(word_size + Metachunk::overhead(),
3352                   smallest_chunk_size());
3353   chunk_word_size =
3354     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3355 
3356   assert(!SpaceManager::is_humongous(word_size) ||
3357          chunk_word_size == if_humongous_sized_chunk,
3358          "Size calculation is wrong, word_size " SIZE_FORMAT
3359          " chunk_word_size " SIZE_FORMAT,
3360          word_size, chunk_word_size);
3361   Log(gc, metaspace, alloc) log;
3362   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3363     log.debug("Metadata humongous allocation:");
3364     log.debug("  word_size " PTR_FORMAT, word_size);
3365     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3366     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3367   }
3368   return chunk_word_size;
3369 }
3370 
3371 void SpaceManager::track_metaspace_memory_usage() {
3372   if (is_init_completed()) {
3373     if (is_class()) {
3374       MemoryService::track_compressed_class_memory_usage();
3375     }
3376     MemoryService::track_metaspace_memory_usage();
3377   }
3378 }
3379 
3380 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3381   assert_lock_strong(_lock);
3382   assert(vs_list()->current_virtual_space() != NULL,
3383          "Should have been set");
3384   assert(current_chunk() == NULL ||
3385          current_chunk()->allocate(word_size) == NULL,
3386          "Don't need to expand");
3387   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3388 
3389   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3390     size_t words_left = 0;
3391     size_t words_used = 0;
3392     if (current_chunk() != NULL) {
3393       words_left = current_chunk()->free_word_size();
3394       words_used = current_chunk()->used_word_size();
3395     }
3396     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3397                                        word_size, words_used, words_left);
3398   }
3399 
3400   // Get another chunk
3401   size_t chunk_word_size = calc_chunk_size(word_size);
3402   Metachunk* next = get_new_chunk(chunk_word_size);
3403 
3404   MetaWord* mem = NULL;
3405 
3406   // If a chunk was available, add it to the in-use chunk list
3407   // and do an allocation from it.
3408   if (next != NULL) {
3409     // Add to this manager's list of chunks in use.
3410     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3411     // case it usually makes no sense to make it the current chunk, since the next allocation would
3412     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3413     // good chunk which could be used for more normal allocations.
3414     bool make_current = true;
3415     if (next->get_chunk_type() == HumongousIndex &&
3416         current_chunk() != NULL) {
3417       make_current = false;
3418     }
3419     add_chunk(next, make_current);
3420     mem = next->allocate(word_size);
3421   }
3422 
3423   // Track metaspace memory usage statistic.
3424   track_metaspace_memory_usage();
3425 
3426   return mem;
3427 }
3428 
3429 void SpaceManager::print_on(outputStream* st) const {
3430 
3431   for (ChunkIndex i = ZeroIndex;
3432        i < NumberOfInUseLists ;
3433        i = next_chunk_index(i) ) {
3434     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3435                  p2i(chunks_in_use(i)),
3436                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3437   }
3438   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3439                " Humongous " SIZE_FORMAT,
3440                sum_waste_in_chunks_in_use(SmallIndex),
3441                sum_waste_in_chunks_in_use(MediumIndex),
3442                sum_waste_in_chunks_in_use(HumongousIndex));
3443   // block free lists
3444   if (block_freelists() != NULL) {
3445     st->print_cr("total in block free lists " SIZE_FORMAT,
3446       block_freelists()->total_size());
3447   }
3448 }
3449 
3450 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3451                            Metaspace::MetaspaceType space_type,
3452                            Mutex* lock) :
3453   _mdtype(mdtype),
3454   _space_type(space_type),
3455   _allocated_blocks_words(0),
3456   _allocated_chunks_words(0),
3457   _allocated_chunks_count(0),
3458   _block_freelists(NULL),
3459   _lock(lock)
3460 {
3461   initialize();
3462 }
3463 
3464 void SpaceManager::inc_size_metrics(size_t words) {
3465   assert_lock_strong(MetaspaceExpand_lock);
3466   // Total of allocated Metachunks and allocated Metachunks count
3467   // for each SpaceManager
3468   _allocated_chunks_words = _allocated_chunks_words + words;
3469   _allocated_chunks_count++;
3470   // Global total of capacity in allocated Metachunks
3471   MetaspaceUtils::inc_capacity(mdtype(), words);
3472   // Global total of allocated Metablocks.
3473   // used_words_slow() includes the overhead in each
3474   // Metachunk so include it in the used when the
3475   // Metachunk is first added (so only added once per
3476   // Metachunk).
3477   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3478 }
3479 
3480 void SpaceManager::inc_used_metrics(size_t words) {
3481   // Add to the per SpaceManager total
3482   Atomic::add(words, &_allocated_blocks_words);
3483   // Add to the global total
3484   MetaspaceUtils::inc_used(mdtype(), words);
3485 }
3486 
3487 void SpaceManager::dec_total_from_size_metrics() {
3488   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3489   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3490   // Also deduct the overhead per Metachunk
3491   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3492 }
3493 
3494 void SpaceManager::initialize() {
3495   Metadebug::init_allocation_fail_alot_count();
3496   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3497     _chunks_in_use[i] = NULL;
3498   }
3499   _current_chunk = NULL;
3500   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3501 }
3502 
3503 SpaceManager::~SpaceManager() {
3504   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3505   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3506          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3507          " allocated_chunks_words() " SIZE_FORMAT,
3508          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3509 
3510   MutexLockerEx fcl(MetaspaceExpand_lock,
3511                     Mutex::_no_safepoint_check_flag);
3512 
3513   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3514          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3515          " allocated_chunks_count() " SIZE_FORMAT,
3516          sum_count_in_chunks_in_use(), allocated_chunks_count());
3517 
3518   chunk_manager()->slow_locked_verify();
3519 
3520   dec_total_from_size_metrics();
3521 
3522   Log(gc, metaspace, freelist) log;
3523   if (log.is_trace()) {
3524     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3525     ResourceMark rm;
3526     LogStream ls(log.trace());
3527     locked_print_chunks_in_use_on(&ls);
3528     if (block_freelists() != NULL) {
3529       block_freelists()->print_on(&ls);
3530     }
3531   }
3532 
3533   // Add all the chunks in use by this space manager
3534   // to the global list of free chunks.
3535 
3536   // Follow each list of chunks-in-use and add them to the
3537   // free lists.  Each list is NULL terminated.
3538 
3539   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3540     Metachunk* chunks = chunks_in_use(i);
3541     chunk_manager()->return_chunk_list(i, chunks);
3542     set_chunks_in_use(i, NULL);
3543   }
3544 
3545   chunk_manager()->slow_locked_verify();
3546 
3547   if (_block_freelists != NULL) {
3548     delete _block_freelists;
3549   }
3550 }
3551 
3552 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3553   assert_lock_strong(_lock);
3554   // Allocations and deallocations are in raw_word_size
3555   size_t raw_word_size = get_allocation_word_size(word_size);
3556   // Lazily create a block_freelist
3557   if (block_freelists() == NULL) {
3558     _block_freelists = new BlockFreelist();
3559   }
3560   block_freelists()->return_block(p, raw_word_size);
3561 }
3562 
3563 // Adds a chunk to the list of chunks in use.
3564 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3565 
3566   assert_lock_strong(_lock);
3567   assert(new_chunk != NULL, "Should not be NULL");
3568   assert(new_chunk->next() == NULL, "Should not be on a list");
3569 
3570   new_chunk->reset_empty();
3571 
3572   // Find the correct list and and set the current
3573   // chunk for that list.
3574   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3575 
3576   if (make_current) {
3577     // If we are to make the chunk current, retire the old current chunk and replace
3578     // it with the new chunk.
3579     retire_current_chunk();
3580     set_current_chunk(new_chunk);
3581   }
3582 
3583   // Add the new chunk at the head of its respective chunk list.
3584   new_chunk->set_next(chunks_in_use(index));
3585   set_chunks_in_use(index, new_chunk);
3586 
3587   // Add to the running sum of capacity
3588   inc_size_metrics(new_chunk->word_size());
3589 
3590   assert(new_chunk->is_empty(), "Not ready for reuse");
3591   Log(gc, metaspace, freelist) log;
3592   if (log.is_trace()) {
3593     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3594     ResourceMark rm;
3595     LogStream ls(log.trace());
3596     new_chunk->print_on(&ls);
3597     chunk_manager()->locked_print_free_chunks(&ls);
3598   }
3599 }
3600 
3601 void SpaceManager::retire_current_chunk() {
3602   if (current_chunk() != NULL) {
3603     size_t remaining_words = current_chunk()->free_word_size();
3604     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3605       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3606       deallocate(ptr, remaining_words);
3607       inc_used_metrics(remaining_words);
3608     }
3609   }
3610 }
3611 
3612 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3613   // Get a chunk from the chunk freelist
3614   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3615 
3616   if (next == NULL) {
3617     next = vs_list()->get_new_chunk(chunk_word_size,
3618                                     medium_chunk_bunch());
3619   }
3620 
3621   Log(gc, metaspace, alloc) log;
3622   if (log.is_debug() && next != NULL &&
3623       SpaceManager::is_humongous(next->word_size())) {
3624     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3625   }
3626 
3627   return next;
3628 }
3629 
3630 MetaWord* SpaceManager::allocate(size_t word_size) {
3631   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3632   size_t raw_word_size = get_allocation_word_size(word_size);
3633   BlockFreelist* fl =  block_freelists();
3634   MetaWord* p = NULL;
3635   // Allocation from the dictionary is expensive in the sense that
3636   // the dictionary has to be searched for a size.  Don't allocate
3637   // from the dictionary until it starts to get fat.  Is this
3638   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3639   // for allocations.  Do some profiling.  JJJ
3640   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3641     p = fl->get_block(raw_word_size);
3642   }
3643   if (p == NULL) {
3644     p = allocate_work(raw_word_size);
3645   }
3646 
3647   return p;
3648 }
3649 
3650 // Returns the address of spaced allocated for "word_size".
3651 // This methods does not know about blocks (Metablocks)
3652 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3653   assert_lock_strong(_lock);
3654 #ifdef ASSERT
3655   if (Metadebug::test_metadata_failure()) {
3656     return NULL;
3657   }
3658 #endif
3659   // Is there space in the current chunk?
3660   MetaWord* result = NULL;
3661 
3662   if (current_chunk() != NULL) {
3663     result = current_chunk()->allocate(word_size);
3664   }
3665 
3666   if (result == NULL) {
3667     result = grow_and_allocate(word_size);
3668   }
3669 
3670   if (result != NULL) {
3671     inc_used_metrics(word_size);
3672     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3673            "Head of the list is being allocated");
3674   }
3675 
3676   return result;
3677 }
3678 
3679 void SpaceManager::verify() {
3680   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3681     Metachunk* curr = chunks_in_use(i);
3682     while (curr != NULL) {
3683       DEBUG_ONLY(do_verify_chunk(curr);)
3684       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3685       curr = curr->next();
3686     }
3687   }
3688 }
3689 
3690 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3691   assert(is_humongous(chunk->word_size()) ||
3692          chunk->word_size() == medium_chunk_size() ||
3693          chunk->word_size() == small_chunk_size() ||
3694          chunk->word_size() == specialized_chunk_size(),
3695          "Chunk size is wrong");
3696   return;
3697 }
3698 
3699 #ifdef ASSERT
3700 void SpaceManager::verify_allocated_blocks_words() {
3701   // Verification is only guaranteed at a safepoint.
3702   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3703     "Verification can fail if the applications is running");
3704   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3705          "allocation total is not consistent " SIZE_FORMAT
3706          " vs " SIZE_FORMAT,
3707          allocated_blocks_words(), sum_used_in_chunks_in_use());
3708 }
3709 
3710 #endif
3711 
3712 void SpaceManager::dump(outputStream* const out) const {
3713   size_t curr_total = 0;
3714   size_t waste = 0;
3715   uint i = 0;
3716   size_t used = 0;
3717   size_t capacity = 0;
3718 
3719   // Add up statistics for all chunks in this SpaceManager.
3720   for (ChunkIndex index = ZeroIndex;
3721        index < NumberOfInUseLists;
3722        index = next_chunk_index(index)) {
3723     for (Metachunk* curr = chunks_in_use(index);
3724          curr != NULL;
3725          curr = curr->next()) {
3726       out->print("%d) ", i++);
3727       curr->print_on(out);
3728       curr_total += curr->word_size();
3729       used += curr->used_word_size();
3730       capacity += curr->word_size();
3731       waste += curr->free_word_size() + curr->overhead();;
3732     }
3733   }
3734 
3735   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3736     if (block_freelists() != NULL) block_freelists()->print_on(out);
3737   }
3738 
3739   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3740   // Free space isn't wasted.
3741   waste -= free;
3742 
3743   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3744                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3745                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3746 }
3747 
3748 // MetaspaceUtils
3749 
3750 
3751 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3752 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3753 
3754 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3755   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3756   return list == NULL ? 0 : list->free_bytes();
3757 }
3758 
3759 size_t MetaspaceUtils::free_bytes() {
3760   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3761 }
3762 
3763 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3764   assert_lock_strong(MetaspaceExpand_lock);
3765   assert(words <= capacity_words(mdtype),
3766          "About to decrement below 0: words " SIZE_FORMAT
3767          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3768          words, mdtype, capacity_words(mdtype));
3769   _capacity_words[mdtype] -= words;
3770 }
3771 
3772 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3773   assert_lock_strong(MetaspaceExpand_lock);
3774   // Needs to be atomic
3775   _capacity_words[mdtype] += words;
3776 }
3777 
3778 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3779   assert(words <= used_words(mdtype),
3780          "About to decrement below 0: words " SIZE_FORMAT
3781          " is greater than _used_words[%u] " SIZE_FORMAT,
3782          words, mdtype, used_words(mdtype));
3783   // For CMS deallocation of the Metaspaces occurs during the
3784   // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
3785   // is not enough since allocation is on a per Metaspace basis
3786   // and protected by the Metaspace lock.
3787   Atomic::sub(words, &_used_words[mdtype]);
3788 }
3789 
3790 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3791   // _used_words tracks allocations for
3792   // each piece of metadata.  Those allocations are
3793   // generally done concurrently by different application
3794   // threads so must be done atomically.
3795   Atomic::add(words, &_used_words[mdtype]);
3796 }
3797 
3798 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3799   size_t used = 0;
3800   ClassLoaderDataGraphMetaspaceIterator iter;
3801   while (iter.repeat()) {
3802     ClassLoaderMetaspace* msp = iter.get_next();
3803     // Sum allocated_blocks_words for each metaspace
3804     if (msp != NULL) {
3805       used += msp->used_words_slow(mdtype);
3806     }
3807   }
3808   return used * BytesPerWord;
3809 }
3810 
3811 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3812   size_t free = 0;
3813   ClassLoaderDataGraphMetaspaceIterator iter;
3814   while (iter.repeat()) {
3815     ClassLoaderMetaspace* msp = iter.get_next();
3816     if (msp != NULL) {
3817       free += msp->free_words_slow(mdtype);
3818     }
3819   }
3820   return free * BytesPerWord;
3821 }
3822 
3823 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3824   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3825     return 0;
3826   }
3827   // Don't count the space in the freelists.  That space will be
3828   // added to the capacity calculation as needed.
3829   size_t capacity = 0;
3830   ClassLoaderDataGraphMetaspaceIterator iter;
3831   while (iter.repeat()) {
3832     ClassLoaderMetaspace* msp = iter.get_next();
3833     if (msp != NULL) {
3834       capacity += msp->capacity_words_slow(mdtype);
3835     }
3836   }
3837   return capacity * BytesPerWord;
3838 }
3839 
3840 size_t MetaspaceUtils::capacity_bytes_slow() {
3841 #ifdef PRODUCT
3842   // Use capacity_bytes() in PRODUCT instead of this function.
3843   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3844 #endif
3845   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3846   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3847   assert(capacity_bytes() == class_capacity + non_class_capacity,
3848          "bad accounting: capacity_bytes() " SIZE_FORMAT
3849          " class_capacity + non_class_capacity " SIZE_FORMAT
3850          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3851          capacity_bytes(), class_capacity + non_class_capacity,
3852          class_capacity, non_class_capacity);
3853 
3854   return class_capacity + non_class_capacity;
3855 }
3856 
3857 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3858   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3859   return list == NULL ? 0 : list->reserved_bytes();
3860 }
3861 
3862 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3863   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3864   return list == NULL ? 0 : list->committed_bytes();
3865 }
3866 
3867 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3868 
3869 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3870   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3871   if (chunk_manager == NULL) {
3872     return 0;
3873   }
3874   chunk_manager->slow_verify();
3875   return chunk_manager->free_chunks_total_words();
3876 }
3877 
3878 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3879   return free_chunks_total_words(mdtype) * BytesPerWord;
3880 }
3881 
3882 size_t MetaspaceUtils::free_chunks_total_words() {
3883   return free_chunks_total_words(Metaspace::ClassType) +
3884          free_chunks_total_words(Metaspace::NonClassType);
3885 }
3886 
3887 size_t MetaspaceUtils::free_chunks_total_bytes() {
3888   return free_chunks_total_words() * BytesPerWord;
3889 }
3890 
3891 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3892   return Metaspace::get_chunk_manager(mdtype) != NULL;
3893 }
3894 
3895 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3896   if (!has_chunk_free_list(mdtype)) {
3897     return MetaspaceChunkFreeListSummary();
3898   }
3899 
3900   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3901   return cm->chunk_free_list_summary();
3902 }
3903 
3904 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3905   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3906                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3907 }
3908 
3909 void MetaspaceUtils::print_on(outputStream* out) {
3910   Metaspace::MetadataType nct = Metaspace::NonClassType;
3911 
3912   out->print_cr(" Metaspace       "
3913                 "used "      SIZE_FORMAT "K, "
3914                 "capacity "  SIZE_FORMAT "K, "
3915                 "committed " SIZE_FORMAT "K, "
3916                 "reserved "  SIZE_FORMAT "K",
3917                 used_bytes()/K,
3918                 capacity_bytes()/K,
3919                 committed_bytes()/K,
3920                 reserved_bytes()/K);
3921 
3922   if (Metaspace::using_class_space()) {
3923     Metaspace::MetadataType ct = Metaspace::ClassType;
3924     out->print_cr("  class space    "
3925                   "used "      SIZE_FORMAT "K, "
3926                   "capacity "  SIZE_FORMAT "K, "
3927                   "committed " SIZE_FORMAT "K, "
3928                   "reserved "  SIZE_FORMAT "K",
3929                   used_bytes(ct)/K,
3930                   capacity_bytes(ct)/K,
3931                   committed_bytes(ct)/K,
3932                   reserved_bytes(ct)/K);
3933   }
3934 }
3935 
3936 // Print information for class space and data space separately.
3937 // This is almost the same as above.
3938 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3939   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3940   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3941   size_t used_bytes = used_bytes_slow(mdtype);
3942   size_t free_bytes = free_bytes_slow(mdtype);
3943   size_t used_and_free = used_bytes + free_bytes +
3944                            free_chunks_capacity_bytes;
3945   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3946              "K + unused in chunks " SIZE_FORMAT "K  + "
3947              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3948              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3949              used_bytes / K,
3950              free_bytes / K,
3951              free_chunks_capacity_bytes / K,
3952              used_and_free / K,
3953              capacity_bytes / K);
3954   // Accounting can only be correct if we got the values during a safepoint
3955   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3956 }
3957 
3958 // Print total fragmentation for class metaspaces
3959 void MetaspaceUtils::print_class_waste(outputStream* out) {
3960   assert(Metaspace::using_class_space(), "class metaspace not used");
3961   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3962   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3963   ClassLoaderDataGraphMetaspaceIterator iter;
3964   while (iter.repeat()) {
3965     ClassLoaderMetaspace* msp = iter.get_next();
3966     if (msp != NULL) {
3967       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3968       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3969       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3970       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3971       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3972       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3973       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3974     }
3975   }
3976   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3977                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3978                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3979                 "large count " SIZE_FORMAT,
3980                 cls_specialized_count, cls_specialized_waste,
3981                 cls_small_count, cls_small_waste,
3982                 cls_medium_count, cls_medium_waste, cls_humongous_count);
3983 }
3984 
3985 // Print total fragmentation for data and class metaspaces separately
3986 void MetaspaceUtils::print_waste(outputStream* out) {
3987   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3988   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3989 
3990   ClassLoaderDataGraphMetaspaceIterator iter;
3991   while (iter.repeat()) {
3992     ClassLoaderMetaspace* msp = iter.get_next();
3993     if (msp != NULL) {
3994       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3995       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3996       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3997       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3998       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3999       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4000       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4001     }
4002   }
4003   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4004   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4005                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4006                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4007                         "large count " SIZE_FORMAT,
4008              specialized_count, specialized_waste, small_count,
4009              small_waste, medium_count, medium_waste, humongous_count);
4010   if (Metaspace::using_class_space()) {
4011     print_class_waste(out);
4012   }
4013 }
4014 
4015 class MetadataStats {
4016 private:
4017   size_t _capacity;
4018   size_t _used;
4019   size_t _free;
4020   size_t _waste;
4021 
4022 public:
4023   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4024   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4025   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4026 
4027   void add(const MetadataStats& stats) {
4028     _capacity += stats.capacity();
4029     _used += stats.used();
4030     _free += stats.free();
4031     _waste += stats.waste();
4032   }
4033 
4034   size_t capacity() const { return _capacity; }
4035   size_t used() const     { return _used; }
4036   size_t free() const     { return _free; }
4037   size_t waste() const    { return _waste; }
4038 
4039   void print_on(outputStream* out, size_t scale) const;
4040 };
4041 
4042 
4043 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4044   const char* unit = scale_unit(scale);
4045   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4046     (float)capacity() / scale, unit,
4047     (float)used() / scale, unit,
4048     (float)free() / scale, unit,
4049     (float)waste() / scale, unit);
4050 }
4051 
4052 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4053 private:
4054   outputStream*  _out;
4055   size_t         _scale;
4056 
4057   size_t         _total_count;
4058   MetadataStats  _total_metadata;
4059   MetadataStats  _total_class;
4060 
4061   size_t         _total_anon_count;
4062   MetadataStats  _total_anon_metadata;
4063   MetadataStats  _total_anon_class;
4064 
4065 public:
4066   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4067   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4068 
4069   ~PrintCLDMetaspaceInfoClosure() {
4070     print_summary();
4071   }
4072 
4073   void do_cld(ClassLoaderData* cld) {
4074     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4075 
4076     if (cld->is_unloading()) return;
4077     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4078     if (msp == NULL) {
4079       return;
4080     }
4081 
4082     bool anonymous = false;
4083     if (cld->is_anonymous()) {
4084       _out->print_cr("ClassLoader: for anonymous class");
4085       anonymous = true;
4086     } else {
4087       ResourceMark rm;
4088       _out->print_cr("ClassLoader: %s", cld->loader_name());
4089     }
4090 
4091     print_metaspace(msp, anonymous);
4092     _out->cr();
4093   }
4094 
4095 private:
4096   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4097   void print_summary() const;
4098 };
4099 
4100 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4101   assert(msp != NULL, "Sanity");
4102   SpaceManager* vsm = msp->vsm();
4103   const char* unit = scale_unit(_scale);
4104 
4105   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4106   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4107   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4108   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4109 
4110   _total_count ++;
4111   MetadataStats metadata_stats(capacity, used, free, waste);
4112   _total_metadata.add(metadata_stats);
4113 
4114   if (anonymous) {
4115     _total_anon_count ++;
4116     _total_anon_metadata.add(metadata_stats);
4117   }
4118 
4119   _out->print("  Metadata   ");
4120   metadata_stats.print_on(_out, _scale);
4121 
4122   if (Metaspace::using_class_space()) {
4123     vsm = msp->class_vsm();
4124 
4125     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4126     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4127     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4128     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4129 
4130     MetadataStats class_stats(capacity, used, free, waste);
4131     _total_class.add(class_stats);
4132 
4133     if (anonymous) {
4134       _total_anon_class.add(class_stats);
4135     }
4136 
4137     _out->print("  Class data ");
4138     class_stats.print_on(_out, _scale);
4139   }
4140 }
4141 
4142 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4143   const char* unit = scale_unit(_scale);
4144   _out->cr();
4145   _out->print_cr("Summary:");
4146 
4147   MetadataStats total;
4148   total.add(_total_metadata);
4149   total.add(_total_class);
4150 
4151   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4152   total.print_on(_out, _scale);
4153 
4154   _out->print("                    Metadata ");
4155   _total_metadata.print_on(_out, _scale);
4156 
4157   if (Metaspace::using_class_space()) {
4158     _out->print("                  Class data ");
4159     _total_class.print_on(_out, _scale);
4160   }
4161   _out->cr();
4162 
4163   MetadataStats total_anon;
4164   total_anon.add(_total_anon_metadata);
4165   total_anon.add(_total_anon_class);
4166 
4167   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4168   total_anon.print_on(_out, _scale);
4169 
4170   _out->print("                    Metadata ");
4171   _total_anon_metadata.print_on(_out, _scale);
4172 
4173   if (Metaspace::using_class_space()) {
4174     _out->print("                  Class data ");
4175     _total_anon_class.print_on(_out, _scale);
4176   }
4177 }
4178 
4179 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4180   const char* unit = scale_unit(scale);
4181   out->print_cr("Metaspaces:");
4182   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4183     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4184     committed_bytes(Metaspace::NonClassType) / scale, unit);
4185   if (Metaspace::using_class_space()) {
4186     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4187     reserved_bytes(Metaspace::ClassType) / scale, unit,
4188     committed_bytes(Metaspace::ClassType) / scale, unit);
4189   }
4190 
4191   out->cr();
4192   ChunkManager::print_all_chunkmanagers(out, scale);
4193 
4194   out->cr();
4195   out->print_cr("Per-classloader metadata:");
4196   out->cr();
4197 
4198   PrintCLDMetaspaceInfoClosure cl(out, scale);
4199   ClassLoaderDataGraph::cld_do(&cl);
4200 }
4201 
4202 
4203 // Dump global metaspace things from the end of ClassLoaderDataGraph
4204 void MetaspaceUtils::dump(outputStream* out) {
4205   out->print_cr("All Metaspace:");
4206   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4207   out->print("class space: "); print_on(out, Metaspace::ClassType);
4208   print_waste(out);
4209 }
4210 
4211 // Prints an ASCII representation of the given space.
4212 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4213   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4214   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4215   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4216   if (vsl != NULL) {
4217     if (for_class) {
4218       if (!Metaspace::using_class_space()) {
4219         out->print_cr("No Class Space.");
4220         return;
4221       }
4222       out->print_raw("---- Metaspace Map (Class Space) ----");
4223     } else {
4224       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4225     }
4226     // Print legend:
4227     out->cr();
4228     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4229     out->cr();
4230     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4231     vsl->print_map(out);
4232     out->cr();
4233   }
4234 }
4235 
4236 void MetaspaceUtils::verify_free_chunks() {
4237   Metaspace::chunk_manager_metadata()->verify();
4238   if (Metaspace::using_class_space()) {
4239     Metaspace::chunk_manager_class()->verify();
4240   }
4241 }
4242 
4243 void MetaspaceUtils::verify_capacity() {
4244 #ifdef ASSERT
4245   size_t running_sum_capacity_bytes = capacity_bytes();
4246   // For purposes of the running sum of capacity, verify against capacity
4247   size_t capacity_in_use_bytes = capacity_bytes_slow();
4248   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4249          "capacity_words() * BytesPerWord " SIZE_FORMAT
4250          " capacity_bytes_slow()" SIZE_FORMAT,
4251          running_sum_capacity_bytes, capacity_in_use_bytes);
4252   for (Metaspace::MetadataType i = Metaspace::ClassType;
4253        i < Metaspace:: MetadataTypeCount;
4254        i = (Metaspace::MetadataType)(i + 1)) {
4255     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4256     assert(capacity_bytes(i) == capacity_in_use_bytes,
4257            "capacity_bytes(%u) " SIZE_FORMAT
4258            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4259            i, capacity_bytes(i), i, capacity_in_use_bytes);
4260   }
4261 #endif
4262 }
4263 
4264 void MetaspaceUtils::verify_used() {
4265 #ifdef ASSERT
4266   size_t running_sum_used_bytes = used_bytes();
4267   // For purposes of the running sum of used, verify against used
4268   size_t used_in_use_bytes = used_bytes_slow();
4269   assert(used_bytes() == used_in_use_bytes,
4270          "used_bytes() " SIZE_FORMAT
4271          " used_bytes_slow()" SIZE_FORMAT,
4272          used_bytes(), used_in_use_bytes);
4273   for (Metaspace::MetadataType i = Metaspace::ClassType;
4274        i < Metaspace:: MetadataTypeCount;
4275        i = (Metaspace::MetadataType)(i + 1)) {
4276     size_t used_in_use_bytes = used_bytes_slow(i);
4277     assert(used_bytes(i) == used_in_use_bytes,
4278            "used_bytes(%u) " SIZE_FORMAT
4279            " used_bytes_slow(%u)" SIZE_FORMAT,
4280            i, used_bytes(i), i, used_in_use_bytes);
4281   }
4282 #endif
4283 }
4284 
4285 void MetaspaceUtils::verify_metrics() {
4286   verify_capacity();
4287   verify_used();
4288 }
4289 
4290 
4291 // Metaspace methods
4292 
4293 size_t Metaspace::_first_chunk_word_size = 0;
4294 size_t Metaspace::_first_class_chunk_word_size = 0;
4295 
4296 size_t Metaspace::_commit_alignment = 0;
4297 size_t Metaspace::_reserve_alignment = 0;
4298 
4299 VirtualSpaceList* Metaspace::_space_list = NULL;
4300 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4301 
4302 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4303 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4304 
4305 #define VIRTUALSPACEMULTIPLIER 2
4306 
4307 #ifdef _LP64
4308 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4309 
4310 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4311   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4312   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4313   // narrow_klass_base is the lower of the metaspace base and the cds base
4314   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4315   // between the lower base and higher address.
4316   address lower_base;
4317   address higher_address;
4318 #if INCLUDE_CDS
4319   if (UseSharedSpaces) {
4320     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4321                           (address)(metaspace_base + compressed_class_space_size()));
4322     lower_base = MIN2(metaspace_base, cds_base);
4323   } else
4324 #endif
4325   {
4326     higher_address = metaspace_base + compressed_class_space_size();
4327     lower_base = metaspace_base;
4328 
4329     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4330     // If compressed class space fits in lower 32G, we don't need a base.
4331     if (higher_address <= (address)klass_encoding_max) {
4332       lower_base = 0; // Effectively lower base is zero.
4333     }
4334   }
4335 
4336   Universe::set_narrow_klass_base(lower_base);
4337 
4338   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4339   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4340   // how dump time narrow_klass_shift is set. Although, CDS can work
4341   // with zero-shift mode also, to be consistent with AOT it uses
4342   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4343   // can be used at same time as AOT code.
4344   if (!UseSharedSpaces
4345       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4346     Universe::set_narrow_klass_shift(0);
4347   } else {
4348     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4349   }
4350   AOTLoader::set_narrow_klass_shift();
4351 }
4352 
4353 #if INCLUDE_CDS
4354 // Return TRUE if the specified metaspace_base and cds_base are close enough
4355 // to work with compressed klass pointers.
4356 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4357   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4358   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4359   address lower_base = MIN2((address)metaspace_base, cds_base);
4360   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4361                                 (address)(metaspace_base + compressed_class_space_size()));
4362   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4363 }
4364 #endif
4365 
4366 // Try to allocate the metaspace at the requested addr.
4367 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4368   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4369   assert(using_class_space(), "called improperly");
4370   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4371   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4372          "Metaspace size is too big");
4373   assert_is_aligned(requested_addr, _reserve_alignment);
4374   assert_is_aligned(cds_base, _reserve_alignment);
4375   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4376 
4377   // Don't use large pages for the class space.
4378   bool large_pages = false;
4379 
4380 #if !(defined(AARCH64) || defined(AIX))
4381   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4382                                              _reserve_alignment,
4383                                              large_pages,
4384                                              requested_addr);
4385 #else // AARCH64
4386   ReservedSpace metaspace_rs;
4387 
4388   // Our compressed klass pointers may fit nicely into the lower 32
4389   // bits.
4390   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4391     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4392                                  _reserve_alignment,
4393                                  large_pages,
4394                                  requested_addr);
4395   }
4396 
4397   if (! metaspace_rs.is_reserved()) {
4398     // Aarch64: Try to align metaspace so that we can decode a compressed
4399     // klass with a single MOVK instruction.  We can do this iff the
4400     // compressed class base is a multiple of 4G.
4401     // Aix: Search for a place where we can find memory. If we need to load
4402     // the base, 4G alignment is helpful, too.
4403     size_t increment = AARCH64_ONLY(4*)G;
4404     for (char *a = align_up(requested_addr, increment);
4405          a < (char*)(1024*G);
4406          a += increment) {
4407       if (a == (char *)(32*G)) {
4408         // Go faster from here on. Zero-based is no longer possible.
4409         increment = 4*G;
4410       }
4411 
4412 #if INCLUDE_CDS
4413       if (UseSharedSpaces
4414           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4415         // We failed to find an aligned base that will reach.  Fall
4416         // back to using our requested addr.
4417         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4418                                      _reserve_alignment,
4419                                      large_pages,
4420                                      requested_addr);
4421         break;
4422       }
4423 #endif
4424 
4425       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4426                                    _reserve_alignment,
4427                                    large_pages,
4428                                    a);
4429       if (metaspace_rs.is_reserved())
4430         break;
4431     }
4432   }
4433 
4434 #endif // AARCH64
4435 
4436   if (!metaspace_rs.is_reserved()) {
4437 #if INCLUDE_CDS
4438     if (UseSharedSpaces) {
4439       size_t increment = align_up(1*G, _reserve_alignment);
4440 
4441       // Keep trying to allocate the metaspace, increasing the requested_addr
4442       // by 1GB each time, until we reach an address that will no longer allow
4443       // use of CDS with compressed klass pointers.
4444       char *addr = requested_addr;
4445       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4446              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4447         addr = addr + increment;
4448         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4449                                      _reserve_alignment, large_pages, addr);
4450       }
4451     }
4452 #endif
4453     // If no successful allocation then try to allocate the space anywhere.  If
4454     // that fails then OOM doom.  At this point we cannot try allocating the
4455     // metaspace as if UseCompressedClassPointers is off because too much
4456     // initialization has happened that depends on UseCompressedClassPointers.
4457     // So, UseCompressedClassPointers cannot be turned off at this point.
4458     if (!metaspace_rs.is_reserved()) {
4459       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4460                                    _reserve_alignment, large_pages);
4461       if (!metaspace_rs.is_reserved()) {
4462         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4463                                               compressed_class_space_size()));
4464       }
4465     }
4466   }
4467 
4468   // If we got here then the metaspace got allocated.
4469   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4470 
4471 #if INCLUDE_CDS
4472   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4473   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4474     FileMapInfo::stop_sharing_and_unmap(
4475         "Could not allocate metaspace at a compatible address");
4476   }
4477 #endif
4478   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4479                                   UseSharedSpaces ? (address)cds_base : 0);
4480 
4481   initialize_class_space(metaspace_rs);
4482 
4483   LogTarget(Trace, gc, metaspace) lt;
4484   if (lt.is_enabled()) {
4485     ResourceMark rm;
4486     LogStream ls(lt);
4487     print_compressed_class_space(&ls, requested_addr);
4488   }
4489 }
4490 
4491 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4492   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4493                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4494   if (_class_space_list != NULL) {
4495     address base = (address)_class_space_list->current_virtual_space()->bottom();
4496     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4497                  compressed_class_space_size(), p2i(base));
4498     if (requested_addr != 0) {
4499       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4500     }
4501     st->cr();
4502   }
4503 }
4504 
4505 // For UseCompressedClassPointers the class space is reserved above the top of
4506 // the Java heap.  The argument passed in is at the base of the compressed space.
4507 void Metaspace::initialize_class_space(ReservedSpace rs) {
4508   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4509   assert(rs.size() >= CompressedClassSpaceSize,
4510          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4511   assert(using_class_space(), "Must be using class space");
4512   _class_space_list = new VirtualSpaceList(rs);
4513   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4514 
4515   if (!_class_space_list->initialization_succeeded()) {
4516     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4517   }
4518 }
4519 
4520 #endif
4521 
4522 void Metaspace::ergo_initialize() {
4523   if (DumpSharedSpaces) {
4524     // Using large pages when dumping the shared archive is currently not implemented.
4525     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4526   }
4527 
4528   size_t page_size = os::vm_page_size();
4529   if (UseLargePages && UseLargePagesInMetaspace) {
4530     page_size = os::large_page_size();
4531   }
4532 
4533   _commit_alignment  = page_size;
4534   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4535 
4536   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4537   // override if MaxMetaspaceSize was set on the command line or not.
4538   // This information is needed later to conform to the specification of the
4539   // java.lang.management.MemoryUsage API.
4540   //
4541   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4542   // globals.hpp to the aligned value, but this is not possible, since the
4543   // alignment depends on other flags being parsed.
4544   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4545 
4546   if (MetaspaceSize > MaxMetaspaceSize) {
4547     MetaspaceSize = MaxMetaspaceSize;
4548   }
4549 
4550   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4551 
4552   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4553 
4554   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4555   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4556 
4557   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4558 
4559   // Initial virtual space size will be calculated at global_initialize()
4560   size_t min_metaspace_sz =
4561       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4562   if (UseCompressedClassPointers) {
4563     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4564       if (min_metaspace_sz >= MaxMetaspaceSize) {
4565         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4566       } else {
4567         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4568                       MaxMetaspaceSize - min_metaspace_sz);
4569       }
4570     }
4571   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4572     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4573                   min_metaspace_sz);
4574   }
4575 
4576   set_compressed_class_space_size(CompressedClassSpaceSize);
4577 }
4578 
4579 void Metaspace::global_initialize() {
4580   MetaspaceGC::initialize();
4581 
4582 #if INCLUDE_CDS
4583   if (DumpSharedSpaces) {
4584     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4585   } else if (UseSharedSpaces) {
4586     // If any of the archived space fails to map, UseSharedSpaces
4587     // is reset to false. Fall through to the
4588     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4589     // metaspace.
4590     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4591   }
4592 
4593   if (!DumpSharedSpaces && !UseSharedSpaces)
4594 #endif // INCLUDE_CDS
4595   {
4596 #ifdef _LP64
4597     if (using_class_space()) {
4598       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4599       allocate_metaspace_compressed_klass_ptrs(base, 0);
4600     }
4601 #endif // _LP64
4602   }
4603 
4604   // Initialize these before initializing the VirtualSpaceList
4605   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4606   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4607   // Make the first class chunk bigger than a medium chunk so it's not put
4608   // on the medium chunk list.   The next chunk will be small and progress
4609   // from there.  This size calculated by -version.
4610   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4611                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4612   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4613   // Arbitrarily set the initial virtual space to a multiple
4614   // of the boot class loader size.
4615   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4616   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4617 
4618   // Initialize the list of virtual spaces.
4619   _space_list = new VirtualSpaceList(word_size);
4620   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4621 
4622   if (!_space_list->initialization_succeeded()) {
4623     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4624   }
4625 
4626   _tracer = new MetaspaceTracer();
4627 }
4628 
4629 void Metaspace::post_initialize() {
4630   MetaspaceGC::post_initialize();
4631 }
4632 
4633 void Metaspace::verify_global_initialization() {
4634   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4635   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4636 
4637   if (using_class_space()) {
4638     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4639     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4640   }
4641 }
4642 
4643 size_t Metaspace::align_word_size_up(size_t word_size) {
4644   size_t byte_size = word_size * wordSize;
4645   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4646 }
4647 
4648 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4649                               MetaspaceObj::Type type, TRAPS) {
4650   assert(!_frozen, "sanity");
4651   if (HAS_PENDING_EXCEPTION) {
4652     assert(false, "Should not allocate with exception pending");
4653     return NULL;  // caller does a CHECK_NULL too
4654   }
4655 
4656   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4657         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4658 
4659   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4660 
4661   // Try to allocate metadata.
4662   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4663 
4664   if (result == NULL) {
4665     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4666 
4667     // Allocation failed.
4668     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4669       // Only start a GC if the bootstrapping has completed.
4670       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4671       // the VM thread.
4672 
4673       // Try to clean out some memory and retry.
4674       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4675     }
4676   }
4677 
4678   if (result == NULL) {
4679     if (DumpSharedSpaces) {
4680       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4681       // We should abort to avoid generating a potentially bad archive.
4682       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4683           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4684       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4685       vm_exit(1);
4686     }
4687     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4688   }
4689 
4690   // Zero initialize.
4691   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4692 
4693   return result;
4694 }
4695 
4696 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4697   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4698 
4699   // If result is still null, we are out of memory.
4700   Log(gc, metaspace, freelist) log;
4701   if (log.is_info()) {
4702     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4703              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4704     ResourceMark rm;
4705     if (log.is_debug()) {
4706       if (loader_data->metaspace_or_null() != NULL) {
4707         LogStream ls(log.debug());
4708         loader_data->print_value_on(&ls);
4709       }
4710     }
4711     LogStream ls(log.info());
4712     MetaspaceUtils::dump(&ls);
4713     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4714     ChunkManager::print_all_chunkmanagers(&ls);
4715   }
4716 
4717   bool out_of_compressed_class_space = false;
4718   if (is_class_space_allocation(mdtype)) {
4719     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4720     out_of_compressed_class_space =
4721       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4722       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4723       CompressedClassSpaceSize;
4724   }
4725 
4726   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4727   const char* space_string = out_of_compressed_class_space ?
4728     "Compressed class space" : "Metaspace";
4729 
4730   report_java_out_of_memory(space_string);
4731 
4732   if (JvmtiExport::should_post_resource_exhausted()) {
4733     JvmtiExport::post_resource_exhausted(
4734         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4735         space_string);
4736   }
4737 
4738   if (!is_init_completed()) {
4739     vm_exit_during_initialization("OutOfMemoryError", space_string);
4740   }
4741 
4742   if (out_of_compressed_class_space) {
4743     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4744   } else {
4745     THROW_OOP(Universe::out_of_memory_error_metaspace());
4746   }
4747 }
4748 
4749 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4750   switch (mdtype) {
4751     case Metaspace::ClassType: return "Class";
4752     case Metaspace::NonClassType: return "Metadata";
4753     default:
4754       assert(false, "Got bad mdtype: %d", (int) mdtype);
4755       return NULL;
4756   }
4757 }
4758 
4759 void Metaspace::purge(MetadataType mdtype) {
4760   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4761 }
4762 
4763 void Metaspace::purge() {
4764   MutexLockerEx cl(MetaspaceExpand_lock,
4765                    Mutex::_no_safepoint_check_flag);
4766   purge(NonClassType);
4767   if (using_class_space()) {
4768     purge(ClassType);
4769   }
4770 }
4771 
4772 bool Metaspace::contains(const void* ptr) {
4773   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4774     return true;
4775   }
4776   return contains_non_shared(ptr);
4777 }
4778 
4779 bool Metaspace::contains_non_shared(const void* ptr) {
4780   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4781      return true;
4782   }
4783 
4784   return get_space_list(NonClassType)->contains(ptr);
4785 }
4786 
4787 // ClassLoaderMetaspace
4788 
4789 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4790   initialize(lock, type);
4791 }
4792 
4793 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4794   delete _vsm;
4795   if (Metaspace::using_class_space()) {
4796     delete _class_vsm;
4797   }
4798 }
4799 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4800   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4801   if (chunk != NULL) {
4802     // Add to this manager's list of chunks in use and make it the current_chunk().
4803     get_space_manager(mdtype)->add_chunk(chunk, true);
4804   }
4805 }
4806 
4807 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4808   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4809 
4810   // Get a chunk from the chunk freelist
4811   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4812 
4813   if (chunk == NULL) {
4814     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4815                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4816   }
4817 
4818   return chunk;
4819 }
4820 
4821 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4822   Metaspace::verify_global_initialization();
4823 
4824   // Allocate SpaceManager for metadata objects.
4825   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4826 
4827   if (Metaspace::using_class_space()) {
4828     // Allocate SpaceManager for classes.
4829     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4830   }
4831 
4832   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4833 
4834   // Allocate chunk for metadata objects
4835   initialize_first_chunk(type, Metaspace::NonClassType);
4836 
4837   // Allocate chunk for class metadata objects
4838   if (Metaspace::using_class_space()) {
4839     initialize_first_chunk(type, Metaspace::ClassType);
4840   }
4841 }
4842 
4843 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4844   Metaspace::assert_not_frozen();
4845   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4846   if (Metaspace::is_class_space_allocation(mdtype)) {
4847     return  class_vsm()->allocate(word_size);
4848   } else {
4849     return  vsm()->allocate(word_size);
4850   }
4851 }
4852 
4853 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4854   Metaspace::assert_not_frozen();
4855   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4856   assert(delta_bytes > 0, "Must be");
4857 
4858   size_t before = 0;
4859   size_t after = 0;
4860   MetaWord* res;
4861   bool incremented;
4862 
4863   // Each thread increments the HWM at most once. Even if the thread fails to increment
4864   // the HWM, an allocation is still attempted. This is because another thread must then
4865   // have incremented the HWM and therefore the allocation might still succeed.
4866   do {
4867     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4868     res = allocate(word_size, mdtype);
4869   } while (!incremented && res == NULL);
4870 
4871   if (incremented) {
4872     Metaspace::tracer()->report_gc_threshold(before, after,
4873                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4874     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4875   }
4876 
4877   return res;
4878 }
4879 
4880 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4881   if (mdtype == Metaspace::ClassType) {
4882     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4883   } else {
4884     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4885   }
4886 }
4887 
4888 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4889   Metaspace::assert_not_frozen();
4890   if (mdtype == Metaspace::ClassType) {
4891     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4892   } else {
4893     return vsm()->sum_free_in_chunks_in_use();
4894   }
4895 }
4896 
4897 // Space capacity in the Metaspace.  It includes
4898 // space in the list of chunks from which allocations
4899 // have been made. Don't include space in the global freelist and
4900 // in the space available in the dictionary which
4901 // is already counted in some chunk.
4902 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4903   if (mdtype == Metaspace::ClassType) {
4904     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4905   } else {
4906     return vsm()->sum_capacity_in_chunks_in_use();
4907   }
4908 }
4909 
4910 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4911   return used_words_slow(mdtype) * BytesPerWord;
4912 }
4913 
4914 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4915   return capacity_words_slow(mdtype) * BytesPerWord;
4916 }
4917 
4918 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4919   return vsm()->allocated_blocks_bytes() +
4920       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4921 }
4922 
4923 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4924   return vsm()->allocated_chunks_bytes() +
4925       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4926 }
4927 
4928 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4929   Metaspace::assert_not_frozen();
4930   assert(!SafepointSynchronize::is_at_safepoint()
4931          || Thread::current()->is_VM_thread(), "should be the VM thread");
4932 
4933   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4934 
4935   if (is_class && Metaspace::using_class_space()) {
4936     class_vsm()->deallocate(ptr, word_size);
4937   } else {
4938     vsm()->deallocate(ptr, word_size);
4939   }
4940 }
4941 
4942 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4943   assert(Metaspace::using_class_space(), "Has to use class space");
4944   return class_vsm()->calc_chunk_size(word_size);
4945 }
4946 
4947 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4948   // Print both class virtual space counts and metaspace.
4949   if (Verbose) {
4950     vsm()->print_on(out);
4951     if (Metaspace::using_class_space()) {
4952       class_vsm()->print_on(out);
4953     }
4954   }
4955 }
4956 
4957 void ClassLoaderMetaspace::verify() {
4958   vsm()->verify();
4959   if (Metaspace::using_class_space()) {
4960     class_vsm()->verify();
4961   }
4962 }
4963 
4964 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4965   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4966   vsm()->dump(out);
4967   if (Metaspace::using_class_space()) {
4968     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4969     class_vsm()->dump(out);
4970   }
4971 }
4972 
4973 
4974 
4975 #ifdef ASSERT
4976 static void do_verify_chunk(Metachunk* chunk) {
4977   guarantee(chunk != NULL, "Sanity");
4978   // Verify chunk itself; then verify that it is consistent with the
4979   // occupany map of its containing node.
4980   chunk->verify();
4981   VirtualSpaceNode* const vsn = chunk->container();
4982   OccupancyMap* const ocmap = vsn->occupancy_map();
4983   ocmap->verify_for_chunk(chunk);
4984 }
4985 #endif
4986 
4987 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4988   chunk->set_is_tagged_free(!inuse);
4989   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4990   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4991 }
4992 
4993 /////////////// Unit tests ///////////////
4994 
4995 #ifndef PRODUCT
4996 
4997 class TestMetaspaceUtilsTest : AllStatic {
4998  public:
4999   static void test_reserved() {
5000     size_t reserved = MetaspaceUtils::reserved_bytes();
5001 
5002     assert(reserved > 0, "assert");
5003 
5004     size_t committed  = MetaspaceUtils::committed_bytes();
5005     assert(committed <= reserved, "assert");
5006 
5007     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5008     assert(reserved_metadata > 0, "assert");
5009     assert(reserved_metadata <= reserved, "assert");
5010 
5011     if (UseCompressedClassPointers) {
5012       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5013       assert(reserved_class > 0, "assert");
5014       assert(reserved_class < reserved, "assert");
5015     }
5016   }
5017 
5018   static void test_committed() {
5019     size_t committed = MetaspaceUtils::committed_bytes();
5020 
5021     assert(committed > 0, "assert");
5022 
5023     size_t reserved  = MetaspaceUtils::reserved_bytes();
5024     assert(committed <= reserved, "assert");
5025 
5026     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5027     assert(committed_metadata > 0, "assert");
5028     assert(committed_metadata <= committed, "assert");
5029 
5030     if (UseCompressedClassPointers) {
5031       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5032       assert(committed_class > 0, "assert");
5033       assert(committed_class < committed, "assert");
5034     }
5035   }
5036 
5037   static void test_virtual_space_list_large_chunk() {
5038     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5039     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5040     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5041     // vm_allocation_granularity aligned on Windows.
5042     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5043     large_size += (os::vm_page_size()/BytesPerWord);
5044     vs_list->get_new_chunk(large_size, 0);
5045   }
5046 
5047   static void test() {
5048     test_reserved();
5049     test_committed();
5050     test_virtual_space_list_large_chunk();
5051   }
5052 };
5053 
5054 void TestMetaspaceUtils_test() {
5055   TestMetaspaceUtilsTest::test();
5056 }
5057 
5058 class TestVirtualSpaceNodeTest {
5059   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5060                                           size_t& num_small_chunks,
5061                                           size_t& num_specialized_chunks) {
5062     num_medium_chunks = words_left / MediumChunk;
5063     words_left = words_left % MediumChunk;
5064 
5065     num_small_chunks = words_left / SmallChunk;
5066     words_left = words_left % SmallChunk;
5067     // how many specialized chunks can we get?
5068     num_specialized_chunks = words_left / SpecializedChunk;
5069     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5070   }
5071 
5072  public:
5073   static void test() {
5074     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5075     const size_t vsn_test_size_words = MediumChunk  * 4;
5076     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5077 
5078     // The chunk sizes must be multiples of eachother, or this will fail
5079     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5080     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5081 
5082     { // No committed memory in VSN
5083       ChunkManager cm(false);
5084       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5085       vsn.initialize();
5086       vsn.retire(&cm);
5087       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5088     }
5089 
5090     { // All of VSN is committed, half is used by chunks
5091       ChunkManager cm(false);
5092       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5093       vsn.initialize();
5094       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5095       vsn.get_chunk_vs(MediumChunk);
5096       vsn.get_chunk_vs(MediumChunk);
5097       vsn.retire(&cm);
5098       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5099       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5100     }
5101 
5102     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5103     // This doesn't work for systems with vm_page_size >= 16K.
5104     if (page_chunks < MediumChunk) {
5105       // 4 pages of VSN is committed, some is used by chunks
5106       ChunkManager cm(false);
5107       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5108 
5109       vsn.initialize();
5110       vsn.expand_by(page_chunks, page_chunks);
5111       vsn.get_chunk_vs(SmallChunk);
5112       vsn.get_chunk_vs(SpecializedChunk);
5113       vsn.retire(&cm);
5114 
5115       // committed - used = words left to retire
5116       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5117 
5118       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5119       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5120 
5121       assert(num_medium_chunks == 0, "should not get any medium chunks");
5122       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5123       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5124     }
5125 
5126     { // Half of VSN is committed, a humongous chunk is used
5127       ChunkManager cm(false);
5128       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5129       vsn.initialize();
5130       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5131       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5132       vsn.retire(&cm);
5133 
5134       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5135       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5136       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5137 
5138       assert(num_medium_chunks == 0, "should not get any medium chunks");
5139       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5140       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5141     }
5142 
5143   }
5144 
5145 #define assert_is_available_positive(word_size) \
5146   assert(vsn.is_available(word_size), \
5147          #word_size ": " PTR_FORMAT " bytes were not available in " \
5148          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5149          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5150 
5151 #define assert_is_available_negative(word_size) \
5152   assert(!vsn.is_available(word_size), \
5153          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5154          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5155          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5156 
5157   static void test_is_available_positive() {
5158     // Reserve some memory.
5159     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5160     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5161 
5162     // Commit some memory.
5163     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5164     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5165     assert(expanded, "Failed to commit");
5166 
5167     // Check that is_available accepts the committed size.
5168     assert_is_available_positive(commit_word_size);
5169 
5170     // Check that is_available accepts half the committed size.
5171     size_t expand_word_size = commit_word_size / 2;
5172     assert_is_available_positive(expand_word_size);
5173   }
5174 
5175   static void test_is_available_negative() {
5176     // Reserve some memory.
5177     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5178     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5179 
5180     // Commit some memory.
5181     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5182     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5183     assert(expanded, "Failed to commit");
5184 
5185     // Check that is_available doesn't accept a too large size.
5186     size_t two_times_commit_word_size = commit_word_size * 2;
5187     assert_is_available_negative(two_times_commit_word_size);
5188   }
5189 
5190   static void test_is_available_overflow() {
5191     // Reserve some memory.
5192     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5193     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5194 
5195     // Commit some memory.
5196     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5197     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5198     assert(expanded, "Failed to commit");
5199 
5200     // Calculate a size that will overflow the virtual space size.
5201     void* virtual_space_max = (void*)(uintptr_t)-1;
5202     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5203     size_t overflow_size = bottom_to_max + BytesPerWord;
5204     size_t overflow_word_size = overflow_size / BytesPerWord;
5205 
5206     // Check that is_available can handle the overflow.
5207     assert_is_available_negative(overflow_word_size);
5208   }
5209 
5210   static void test_is_available() {
5211     TestVirtualSpaceNodeTest::test_is_available_positive();
5212     TestVirtualSpaceNodeTest::test_is_available_negative();
5213     TestVirtualSpaceNodeTest::test_is_available_overflow();
5214   }
5215 };
5216 
5217 // The following test is placed here instead of a gtest / unittest file
5218 // because the ChunkManager class is only available in this file.
5219 void ChunkManager_test_list_index() {
5220   {
5221     // Test previous bug where a query for a humongous class metachunk,
5222     // incorrectly matched the non-class medium metachunk size.
5223     {
5224       ChunkManager manager(true);
5225 
5226       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5227 
5228       ChunkIndex index = manager.list_index(MediumChunk);
5229 
5230       assert(index == HumongousIndex,
5231           "Requested size is larger than ClassMediumChunk,"
5232           " so should return HumongousIndex. Got index: %d", (int)index);
5233     }
5234 
5235     // Check the specified sizes as well.
5236     {
5237       ChunkManager manager(true);
5238       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5239       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5240       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5241       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5242     }
5243     {
5244       ChunkManager manager(false);
5245       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5246       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5247       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5248       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5249     }
5250 
5251   }
5252 
5253 }
5254 
5255 #endif // !PRODUCT
5256 
5257 #ifdef ASSERT
5258 
5259 // The following test is placed here instead of a gtest / unittest file
5260 // because the ChunkManager class is only available in this file.
5261 class SpaceManagerTest : AllStatic {
5262   friend void SpaceManager_test_adjust_initial_chunk_size();
5263 
5264   static void test_adjust_initial_chunk_size(bool is_class) {
5265     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5266     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5267     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5268 
5269 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5270     do {                                                                         \
5271       size_t v = value;                                                          \
5272       size_t e = expected;                                                       \
5273       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5274              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5275     } while (0)
5276 
5277     // Smallest (specialized)
5278     test_adjust_initial_chunk_size(1,            smallest, is_class);
5279     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5280     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5281 
5282     // Small
5283     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5284     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5285     test_adjust_initial_chunk_size(normal,       normal, is_class);
5286 
5287     // Medium
5288     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5289     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5290     test_adjust_initial_chunk_size(medium,     medium, is_class);
5291 
5292     // Humongous
5293     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5294 
5295 #undef test_adjust_initial_chunk_size
5296   }
5297 
5298   static void test_adjust_initial_chunk_size() {
5299     test_adjust_initial_chunk_size(false);
5300     test_adjust_initial_chunk_size(true);
5301   }
5302 };
5303 
5304 void SpaceManager_test_adjust_initial_chunk_size() {
5305   SpaceManagerTest::test_adjust_initial_chunk_size();
5306 }
5307 
5308 #endif // ASSERT
5309 
5310 struct chunkmanager_statistics_t {
5311   int num_specialized_chunks;
5312   int num_small_chunks;
5313   int num_medium_chunks;
5314   int num_humongous_chunks;
5315 };
5316 
5317 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5318   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5319   ChunkManager::ChunkManagerStatistics stat;
5320   chunk_manager->get_statistics(&stat);
5321   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5322   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5323   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5324   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5325 }
5326 
5327 struct chunk_geometry_t {
5328   size_t specialized_chunk_word_size;
5329   size_t small_chunk_word_size;
5330   size_t medium_chunk_word_size;
5331 };
5332 
5333 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5334   if (mdType == Metaspace::NonClassType) {
5335     out->specialized_chunk_word_size = SpecializedChunk;
5336     out->small_chunk_word_size = SmallChunk;
5337     out->medium_chunk_word_size = MediumChunk;
5338   } else {
5339     out->specialized_chunk_word_size = ClassSpecializedChunk;
5340     out->small_chunk_word_size = ClassSmallChunk;
5341     out->medium_chunk_word_size = ClassMediumChunk;
5342   }
5343 }