1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = DEBUG_ONLY(true) NOT_DEBUG(false);
  60 
  61 // Helper function that does a bunch of checks for a chunk.
  62 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  63 
  64 // Given a Metachunk, update its in-use information (both in the
  65 // chunk and the occupancy map).
  66 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  67 
  68 size_t const allocation_from_dictionary_limit = 4 * K;
  69 
  70 MetaWord* last_allocated = 0;
  71 
  72 size_t Metaspace::_compressed_class_space_size;
  73 const MetaspaceTracer* Metaspace::_tracer = NULL;
  74 
  75 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  76 
  77 enum ChunkSizes {    // in words.
  78   ClassSpecializedChunk = 128,
  79   SpecializedChunk = 128,
  80   ClassSmallChunk = 256,
  81   SmallChunk = 512,
  82   ClassMediumChunk = 4 * K,
  83   MediumChunk = 8 * K
  84 };
  85 
  86 // Returns size of this chunk type.
  87 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  88   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  89   size_t size = 0;
  90   if (is_class) {
  91     switch(chunktype) {
  92       case SpecializedIndex: size = ClassSpecializedChunk; break;
  93       case SmallIndex: size = ClassSmallChunk; break;
  94       case MediumIndex: size = ClassMediumChunk; break;
  95       default:
  96         ShouldNotReachHere();
  97     }
  98   } else {
  99     switch(chunktype) {
 100       case SpecializedIndex: size = SpecializedChunk; break;
 101       case SmallIndex: size = SmallChunk; break;
 102       case MediumIndex: size = MediumChunk; break;
 103       default:
 104         ShouldNotReachHere();
 105     }
 106   }
 107   return size;
 108 }
 109 
 110 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 111   if (is_class) {
 112     if (size == ClassSpecializedChunk) {
 113       return SpecializedIndex;
 114     } else if (size == ClassSmallChunk) {
 115       return SmallIndex;
 116     } else if (size == ClassMediumChunk) {
 117       return MediumIndex;
 118     } else if (size > ClassMediumChunk) {
 119       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 120       return HumongousIndex;
 121     }
 122   } else {
 123     if (size == SpecializedChunk) {
 124       return SpecializedIndex;
 125     } else if (size == SmallChunk) {
 126       return SmallIndex;
 127     } else if (size == MediumChunk) {
 128       return MediumIndex;
 129     } else if (size > MediumChunk) {
 130       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 131       return HumongousIndex;
 132     }
 133   }
 134   ShouldNotReachHere();
 135   return (ChunkIndex)-1;
 136 }
 137 
 138 
 139 static ChunkIndex next_chunk_index(ChunkIndex i) {
 140   assert(i < NumberOfInUseLists, "Out of bound");
 141   return (ChunkIndex) (i+1);
 142 }
 143 
 144 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 145   assert(i > ZeroIndex, "Out of bound");
 146   return (ChunkIndex) (i-1);
 147 }
 148 
 149 static const char* scale_unit(size_t scale) {
 150   switch(scale) {
 151     case 1: return "BYTES";
 152     case K: return "KB";
 153     case M: return "MB";
 154     case G: return "GB";
 155     default:
 156       ShouldNotReachHere();
 157       return NULL;
 158   }
 159 }
 160 
 161 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 162 uint MetaspaceGC::_shrink_factor = 0;
 163 bool MetaspaceGC::_should_concurrent_collect = false;
 164 
 165 typedef class FreeList<Metachunk> ChunkList;
 166 
 167 // Manages the global free lists of chunks.
 168 class ChunkManager : public CHeapObj<mtInternal> {
 169   friend class TestVirtualSpaceNodeTest;
 170 
 171   // Free list of chunks of different sizes.
 172   //   SpecializedChunk
 173   //   SmallChunk
 174   //   MediumChunk
 175   ChunkList _free_chunks[NumberOfFreeLists];
 176 
 177   // Whether or not this is the class chunkmanager.
 178   const bool _is_class;
 179 
 180   // Return non-humongous chunk list by its index.
 181   ChunkList* free_chunks(ChunkIndex index);
 182 
 183   // Returns non-humongous chunk list for the given chunk word size.
 184   ChunkList* find_free_chunks_list(size_t word_size);
 185 
 186   //   HumongousChunk
 187   ChunkTreeDictionary _humongous_dictionary;
 188 
 189   // Returns the humongous chunk dictionary.
 190   ChunkTreeDictionary* humongous_dictionary() {
 191     return &_humongous_dictionary;
 192   }
 193 
 194   // Size, in metaspace words, of all chunks managed by this ChunkManager
 195   size_t _free_chunks_total;
 196   // Number of chunks in this ChunkManager
 197   size_t _free_chunks_count;
 198 
 199   // Update counters after a chunk was added or removed removed.
 200   void account_for_added_chunk(const Metachunk* c);
 201   void account_for_removed_chunk(const Metachunk* c);
 202 
 203   // Debug support
 204 
 205   size_t sum_free_chunks();
 206   size_t sum_free_chunks_count();
 207 
 208   void locked_verify_free_chunks_total();
 209   void slow_locked_verify_free_chunks_total() {
 210     if (metaspace_slow_verify) {
 211       locked_verify_free_chunks_total();
 212     }
 213   }
 214   void locked_verify_free_chunks_count();
 215   void slow_locked_verify_free_chunks_count() {
 216     if (metaspace_slow_verify) {
 217       locked_verify_free_chunks_count();
 218     }
 219   }
 220   void verify_free_chunks_count();
 221 
 222   // Given a pointer to a chunk, attempts to merge it with neighboring
 223   // free chunks to form a bigger chunk. Returns true if successful.
 224   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 225 
 226   // Helper for chunk merging:
 227   //  Given an address range with 1-n chunks which are all supposed to be
 228   //  free and hence currently managed by this ChunkManager, remove them
 229   //  from this ChunkManager and mark them as invalid.
 230   // - This does not correct the occupancy map.
 231   // - This does not adjust the counters in ChunkManager.
 232   // - Does not adjust container count counter in containing VirtualSpaceNode.
 233   // Returns number of chunks removed.
 234   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 235 
 236   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 237   // split up the larger chunk into n smaller chunks, at least one of which should be
 238   // the target chunk of target chunk size. The smaller chunks, including the target
 239   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 240   // Note that this chunk is supposed to be removed from the freelist right away.
 241   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 242 
 243  public:
 244 
 245   struct ChunkManagerStatistics {
 246     size_t num_by_type[NumberOfFreeLists];
 247     size_t single_size_by_type[NumberOfFreeLists];
 248     size_t total_size_by_type[NumberOfFreeLists];
 249     size_t num_humongous_chunks;
 250     size_t total_size_humongous_chunks;
 251   };
 252 
 253   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 254   void get_statistics(ChunkManagerStatistics* stat) const;
 255   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 256 
 257 
 258   ChunkManager(bool is_class)
 259       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 260     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 261     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 262     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 263   }
 264 
 265   // Add or delete (return) a chunk to the global freelist.
 266   Metachunk* chunk_freelist_allocate(size_t word_size);
 267 
 268   // Map a size to a list index assuming that there are lists
 269   // for special, small, medium, and humongous chunks.
 270   ChunkIndex list_index(size_t size);
 271 
 272   // Map a given index to the chunk size.
 273   size_t size_by_index(ChunkIndex index) const;
 274 
 275   bool is_class() const { return _is_class; }
 276 
 277   // Convenience accessors.
 278   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 279   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 280   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 281 
 282   // Take a chunk from the ChunkManager. The chunk is expected to be in
 283   // the chunk manager (the freelist if non-humongous, the dictionary if
 284   // humongous).
 285   void remove_chunk(Metachunk* chunk);
 286 
 287   // Return a single chunk of type index to the ChunkManager.
 288   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 289 
 290   // Add the simple linked list of chunks to the freelist of chunks
 291   // of type index.
 292   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 293 
 294   // Total of the space in the free chunks list
 295   size_t free_chunks_total_words();
 296   size_t free_chunks_total_bytes();
 297 
 298   // Number of chunks in the free chunks list
 299   size_t free_chunks_count();
 300 
 301   // Remove from a list by size.  Selects list based on size of chunk.
 302   Metachunk* free_chunks_get(size_t chunk_word_size);
 303 
 304 #define index_bounds_check(index)                                         \
 305   assert(index == SpecializedIndex ||                                     \
 306          index == SmallIndex ||                                           \
 307          index == MediumIndex ||                                          \
 308          index == HumongousIndex, "Bad index: %d", (int) index)
 309 
 310   size_t num_free_chunks(ChunkIndex index) const {
 311     index_bounds_check(index);
 312 
 313     if (index == HumongousIndex) {
 314       return _humongous_dictionary.total_free_blocks();
 315     }
 316 
 317     ssize_t count = _free_chunks[index].count();
 318     return count == -1 ? 0 : (size_t) count;
 319   }
 320 
 321   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 322     index_bounds_check(index);
 323 
 324     size_t word_size = 0;
 325     if (index == HumongousIndex) {
 326       word_size = _humongous_dictionary.total_size();
 327     } else {
 328       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 329       word_size = size_per_chunk_in_words * num_free_chunks(index);
 330     }
 331 
 332     return word_size * BytesPerWord;
 333   }
 334 
 335   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 336     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 337                                          num_free_chunks(SmallIndex),
 338                                          num_free_chunks(MediumIndex),
 339                                          num_free_chunks(HumongousIndex),
 340                                          size_free_chunks_in_bytes(SpecializedIndex),
 341                                          size_free_chunks_in_bytes(SmallIndex),
 342                                          size_free_chunks_in_bytes(MediumIndex),
 343                                          size_free_chunks_in_bytes(HumongousIndex));
 344   }
 345 
 346   // Debug support
 347   void verify();
 348   void slow_verify() {
 349     if (metaspace_slow_verify) {
 350       verify();
 351     }
 352   }
 353   void locked_verify();
 354   void slow_locked_verify() {
 355     if (metaspace_slow_verify) {
 356       locked_verify();
 357     }
 358   }
 359   void verify_free_chunks_total();
 360 
 361   void locked_print_free_chunks(outputStream* st);
 362   void locked_print_sum_free_chunks(outputStream* st);
 363 
 364   void print_on(outputStream* st) const;
 365 
 366   // Prints composition for both non-class and (if available)
 367   // class chunk manager.
 368   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 369 };
 370 
 371 class SmallBlocks : public CHeapObj<mtClass> {
 372   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 373   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 374 
 375  private:
 376   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 377 
 378   FreeList<Metablock>& list_at(size_t word_size) {
 379     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 380     return _small_lists[word_size - _small_block_min_size];
 381   }
 382 
 383  public:
 384   SmallBlocks() {
 385     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 386       uint k = i - _small_block_min_size;
 387       _small_lists[k].set_size(i);
 388     }
 389   }
 390 
 391   size_t total_size() const {
 392     size_t result = 0;
 393     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 394       uint k = i - _small_block_min_size;
 395       result = result + _small_lists[k].count() * _small_lists[k].size();
 396     }
 397     return result;
 398   }
 399 
 400   static uint small_block_max_size() { return _small_block_max_size; }
 401   static uint small_block_min_size() { return _small_block_min_size; }
 402 
 403   MetaWord* get_block(size_t word_size) {
 404     if (list_at(word_size).count() > 0) {
 405       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 406       return new_block;
 407     } else {
 408       return NULL;
 409     }
 410   }
 411   void return_block(Metablock* free_chunk, size_t word_size) {
 412     list_at(word_size).return_chunk_at_head(free_chunk, false);
 413     assert(list_at(word_size).count() > 0, "Should have a chunk");
 414   }
 415 
 416   void print_on(outputStream* st) const {
 417     st->print_cr("SmallBlocks:");
 418     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 419       uint k = i - _small_block_min_size;
 420       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 421     }
 422   }
 423 };
 424 
 425 // Used to manage the free list of Metablocks (a block corresponds
 426 // to the allocation of a quantum of metadata).
 427 class BlockFreelist : public CHeapObj<mtClass> {
 428   BlockTreeDictionary* const _dictionary;
 429   SmallBlocks* _small_blocks;
 430 
 431   // Only allocate and split from freelist if the size of the allocation
 432   // is at least 1/4th the size of the available block.
 433   const static int WasteMultiplier = 4;
 434 
 435   // Accessors
 436   BlockTreeDictionary* dictionary() const { return _dictionary; }
 437   SmallBlocks* small_blocks() {
 438     if (_small_blocks == NULL) {
 439       _small_blocks = new SmallBlocks();
 440     }
 441     return _small_blocks;
 442   }
 443 
 444  public:
 445   BlockFreelist();
 446   ~BlockFreelist();
 447 
 448   // Get and return a block to the free list
 449   MetaWord* get_block(size_t word_size);
 450   void return_block(MetaWord* p, size_t word_size);
 451 
 452   size_t total_size() const  {
 453     size_t result = dictionary()->total_size();
 454     if (_small_blocks != NULL) {
 455       result = result + _small_blocks->total_size();
 456     }
 457     return result;
 458   }
 459 
 460   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 461   void print_on(outputStream* st) const;
 462 };
 463 
 464 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 465 template <typename T> struct all_ones  { static const T value; };
 466 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 467 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 468 
 469 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 470 // keeps information about
 471 // - where a chunk starts
 472 // - whether a chunk is in-use or free
 473 // A bit in this bitmap represents one range of memory in the smallest
 474 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 475 class OccupancyMap : public CHeapObj<mtInternal> {
 476 
 477   // The address range this map covers.
 478   const MetaWord* const _reference_address;
 479   const size_t _word_size;
 480 
 481   // The word size of a specialized chunk, aka the number of words one
 482   // bit in this map represents.
 483   const size_t _smallest_chunk_word_size;
 484 
 485   // map data
 486   // Data are organized in two bit layers:
 487   // The first layer is the chunk-start-map. Here, a bit is set to mark
 488   // the corresponding region as the head of a chunk.
 489   // The second layer is the in-use-map. Here, a set bit indicates that
 490   // the corresponding belongs to a chunk which is in use.
 491   uint8_t* _map[2];
 492 
 493   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 494 
 495   // length, in bytes, of bitmap data
 496   size_t _map_size;
 497 
 498   // Returns true if bit at position pos at bit-layer layer is set.
 499   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 500     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 501     const unsigned byteoffset = pos / 8;
 502     assert(byteoffset < _map_size,
 503            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 504     const unsigned mask = 1 << (pos % 8);
 505     return (_map[layer][byteoffset] & mask) > 0;
 506   }
 507 
 508   // Changes bit at position pos at bit-layer layer to value v.
 509   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 510     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 511     const unsigned byteoffset = pos / 8;
 512     assert(byteoffset < _map_size,
 513            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 514     const unsigned mask = 1 << (pos % 8);
 515     if (v) {
 516       _map[layer][byteoffset] |= mask;
 517     } else {
 518       _map[layer][byteoffset] &= ~mask;
 519     }
 520   }
 521 
 522   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 523   // pos is 32/64 aligned and num_bits is 32/64.
 524   // This is the typical case when coalescing to medium chunks, whose size is
 525   // 32 or 64 times the specialized chunk size (depending on class or non class
 526   // case), so they occupy 64 bits which should be 64bit aligned, because
 527   // chunks are chunk-size aligned.
 528   template <typename T>
 529   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 530     assert(_map_size > 0, "not initialized");
 531     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 532     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 533     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 534     const size_t byteoffset = pos / 8;
 535     assert(byteoffset <= (_map_size - sizeof(T)),
 536            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 537     const T w = *(T*)(_map[layer] + byteoffset);
 538     return w > 0 ? true : false;
 539   }
 540 
 541   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 542   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 543     if (pos % 32 == 0 && num_bits == 32) {
 544       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 545     } else if (pos % 64 == 0 && num_bits == 64) {
 546       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 547     } else {
 548       for (unsigned n = 0; n < num_bits; n ++) {
 549         if (get_bit_at_position(pos + n, layer)) {
 550           return true;
 551         }
 552       }
 553     }
 554     return false;
 555   }
 556 
 557   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 558   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 559     assert(word_size % _smallest_chunk_word_size == 0,
 560         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 561     const unsigned pos = get_bitpos_for_address(p);
 562     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 563     return is_any_bit_set_in_region(pos, num_bits, layer);
 564   }
 565 
 566   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 567   // pos is 32/64 aligned and num_bits is 32/64.
 568   // This is the typical case when coalescing to medium chunks, whose size
 569   // is 32 or 64 times the specialized chunk size (depending on class or non
 570   // class case), so they occupy 64 bits which should be 64bit aligned,
 571   // because chunks are chunk-size aligned.
 572   template <typename T>
 573   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 574     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 575            (unsigned)(sizeof(T) * 8), pos);
 576     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 577            num_bits, (unsigned)(sizeof(T) * 8));
 578     const size_t byteoffset = pos / 8;
 579     assert(byteoffset <= (_map_size - sizeof(T)),
 580            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 581     T* const pw = (T*)(_map[layer] + byteoffset);
 582     *pw = v ? all_ones<T>::value : (T) 0;
 583   }
 584 
 585   // Set all bits in a region starting at pos to a value.
 586   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 587     assert(_map_size > 0, "not initialized");
 588     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 589     if (pos % 32 == 0 && num_bits == 32) {
 590       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 591     } else if (pos % 64 == 0 && num_bits == 64) {
 592       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 593     } else {
 594       for (unsigned n = 0; n < num_bits; n ++) {
 595         set_bit_at_position(pos + n, layer, v);
 596       }
 597     }
 598   }
 599 
 600   // Helper: sets all bits in a region [p, p+word_size).
 601   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 602     assert(word_size % _smallest_chunk_word_size == 0,
 603         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 604     const unsigned pos = get_bitpos_for_address(p);
 605     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 606     set_bits_of_region(pos, num_bits, layer, v);
 607   }
 608 
 609   // Helper: given an address, return the bit position representing that address.
 610   unsigned get_bitpos_for_address(const MetaWord* p) const {
 611     assert(_reference_address != NULL, "not initialized");
 612     assert(p >= _reference_address && p < _reference_address + _word_size,
 613            "Address %p out of range for occupancy map [%p..%p).",
 614             p, _reference_address, _reference_address + _word_size);
 615     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 616            "Address not aligned (%p).", p);
 617     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 618     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 619     return (unsigned) d;
 620   }
 621 
 622  public:
 623 
 624   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 625     _reference_address(reference_address), _word_size(word_size),
 626     _smallest_chunk_word_size(smallest_chunk_word_size) {
 627     assert(reference_address != NULL, "invalid reference address");
 628     assert(is_aligned(reference_address, smallest_chunk_word_size),
 629            "Reference address not aligned to smallest chunk size.");
 630     assert(is_aligned(word_size, smallest_chunk_word_size),
 631            "Word_size shall be a multiple of the smallest chunk size.");
 632     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 633     size_t num_bits = word_size / smallest_chunk_word_size;
 634     _map_size = (num_bits + 7) / 8;
 635     assert(_map_size * 8 >= num_bits, "sanity");
 636     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 637     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 638     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 639     memset(_map[1], 0, _map_size);
 640     memset(_map[0], 0, _map_size);
 641     // Sanity test: the first respectively last possible chunk start address in
 642     // the covered range shall map to the first and last bit in the bitmap.
 643     assert(get_bitpos_for_address(reference_address) == 0,
 644       "First chunk address in range must map to fist bit in bitmap.");
 645     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 646       "Last chunk address in range must map to last bit in bitmap.");
 647   }
 648 
 649   ~OccupancyMap() {
 650     os::free(_map[0]);
 651     os::free(_map[1]);
 652   }
 653 
 654   // Returns true if at address x a chunk is starting.
 655   bool chunk_starts_at_address(MetaWord* p) const {
 656     const unsigned pos = get_bitpos_for_address(p);
 657     return get_bit_at_position(pos, layer_chunk_start_map);
 658   }
 659 
 660   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 661     const unsigned pos = get_bitpos_for_address(p);
 662     set_bit_at_position(pos, layer_chunk_start_map, v);
 663   }
 664 
 665   // Removes all chunk-start-bits inside a region, typically as a
 666   // result of a chunk merge.
 667   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 668     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 669   }
 670 
 671   // Returns true if there are life (in use) chunks in the region limited
 672   // by [p, p+word_size).
 673   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 674     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 675   }
 676 
 677   // Marks the region starting at p with the size word_size as in use
 678   // or free, depending on v.
 679   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 680     set_bits_of_region(p, word_size, layer_in_use_map, v);
 681   }
 682 
 683 #ifdef ASSERT
 684   // Verify occupancy map for the address range [from, to).
 685   // We need to tell it the address range, because the memory the
 686   // occupancy map is covering may not be fully comitted yet.
 687   void verify(MetaWord* from, MetaWord* to) {
 688     Metachunk* chunk = NULL;
 689     int nth_bit_for_chunk = 0;
 690     MetaWord* chunk_end = NULL;
 691     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 692       const unsigned pos = get_bitpos_for_address(p);
 693       // Check the chunk-starts-info:
 694       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 695         // Chunk start marked in bitmap.
 696         chunk = (Metachunk*) p;
 697         if (chunk_end != NULL) {
 698           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 699                  "the next chunk to start at %p).", p, chunk_end);
 700         }
 701         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 702         if (chunk->get_chunk_type() != HumongousIndex) {
 703           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 704         }
 705         chunk_end = p + chunk->word_size();
 706         nth_bit_for_chunk = 0;
 707         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 708       } else {
 709         // No chunk start marked in bitmap.
 710         assert(chunk != NULL, "Chunk should start at start of address range.");
 711         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 712         nth_bit_for_chunk ++;
 713       }
 714       // Check the in-use-info:
 715       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 716       if (in_use_bit) {
 717         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 718                chunk, nth_bit_for_chunk);
 719       } else {
 720         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 721                chunk, nth_bit_for_chunk);
 722       }
 723     }
 724   }
 725 
 726   // Verify that a given chunk is correctly accounted for in the bitmap.
 727   void verify_for_chunk(Metachunk* chunk) {
 728     assert(chunk_starts_at_address((MetaWord*) chunk),
 729            "No chunk start marked in map for chunk %p.", chunk);
 730     // For chunks larger than the minimal chunk size, no other chunk
 731     // must start in its area.
 732     if (chunk->word_size() > _smallest_chunk_word_size) {
 733       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 734                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 735              "No chunk must start within another chunk.");
 736     }
 737     if (!chunk->is_tagged_free()) {
 738       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 739              "Chunk %p is in use but marked as free in map (%d %d).",
 740              chunk, chunk->get_chunk_type(), chunk->get_origin());
 741     } else {
 742       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 743              "Chunk %p is free but marked as in-use in map (%d %d).",
 744              chunk, chunk->get_chunk_type(), chunk->get_origin());
 745     }
 746   }
 747 
 748 #endif // ASSERT
 749 
 750 };
 751 
 752 // A VirtualSpaceList node.
 753 class VirtualSpaceNode : public CHeapObj<mtClass> {
 754   friend class VirtualSpaceList;
 755 
 756   // Link to next VirtualSpaceNode
 757   VirtualSpaceNode* _next;
 758 
 759   // Whether this node is contained in class or metaspace.
 760   const bool _is_class;
 761 
 762   // total in the VirtualSpace
 763   MemRegion _reserved;
 764   ReservedSpace _rs;
 765   VirtualSpace _virtual_space;
 766   MetaWord* _top;
 767   // count of chunks contained in this VirtualSpace
 768   uintx _container_count;
 769 
 770   OccupancyMap* _occupancy_map;
 771 
 772   // Convenience functions to access the _virtual_space
 773   char* low()  const { return virtual_space()->low(); }
 774   char* high() const { return virtual_space()->high(); }
 775 
 776   // The first Metachunk will be allocated at the bottom of the
 777   // VirtualSpace
 778   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 779 
 780   // Committed but unused space in the virtual space
 781   size_t free_words_in_vs() const;
 782 
 783   // True if this node belongs to class metaspace.
 784   bool is_class() const { return _is_class; }
 785 
 786   // Helper function for take_from_committed: allocate padding chunks
 787   // until top is at the given address.
 788   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 789 
 790  public:
 791 
 792   VirtualSpaceNode(bool is_class, size_t byte_size);
 793   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 794     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 795   ~VirtualSpaceNode();
 796 
 797   // Convenience functions for logical bottom and end
 798   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 799   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 800 
 801   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 802   OccupancyMap* occupancy_map() { return _occupancy_map; }
 803 
 804   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 805 
 806   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 807   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 808 
 809   bool is_pre_committed() const { return _virtual_space.special(); }
 810 
 811   // address of next available space in _virtual_space;
 812   // Accessors
 813   VirtualSpaceNode* next() { return _next; }
 814   void set_next(VirtualSpaceNode* v) { _next = v; }
 815 
 816   void set_reserved(MemRegion const v) { _reserved = v; }
 817   void set_top(MetaWord* v) { _top = v; }
 818 
 819   // Accessors
 820   MemRegion* reserved() { return &_reserved; }
 821   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 822 
 823   // Returns true if "word_size" is available in the VirtualSpace
 824   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 825 
 826   MetaWord* top() const { return _top; }
 827   void inc_top(size_t word_size) { _top += word_size; }
 828 
 829   uintx container_count() { return _container_count; }
 830   void inc_container_count();
 831   void dec_container_count();
 832 #ifdef ASSERT
 833   uintx container_count_slow();
 834   void verify_container_count();
 835 #endif
 836 
 837   // used and capacity in this single entry in the list
 838   size_t used_words_in_vs() const;
 839   size_t capacity_words_in_vs() const;
 840 
 841   bool initialize();
 842 
 843   // get space from the virtual space
 844   Metachunk* take_from_committed(size_t chunk_word_size);
 845 
 846   // Allocate a chunk from the virtual space and return it.
 847   Metachunk* get_chunk_vs(size_t chunk_word_size);
 848 
 849   // Expands/shrinks the committed space in a virtual space.  Delegates
 850   // to Virtualspace
 851   bool expand_by(size_t min_words, size_t preferred_words);
 852 
 853   // In preparation for deleting this node, remove all the chunks
 854   // in the node from any freelist.
 855   void purge(ChunkManager* chunk_manager);
 856 
 857   // If an allocation doesn't fit in the current node a new node is created.
 858   // Allocate chunks out of the remaining committed space in this node
 859   // to avoid wasting that memory.
 860   // This always adds up because all the chunk sizes are multiples of
 861   // the smallest chunk size.
 862   void retire(ChunkManager* chunk_manager);
 863 
 864 
 865   void print_on(outputStream* st) const;
 866   void print_map(outputStream* st, bool is_class) const;
 867 
 868   // Debug support
 869   DEBUG_ONLY(void mangle();)
 870   // Verify counters, all chunks in this list node and the occupancy map.
 871   DEBUG_ONLY(void verify();)
 872   // Verify that all free chunks in this node are ideally merged
 873   // (there not should be multiple small chunks where a large chunk could exist.)
 874   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 875 
 876 };
 877 
 878 #define assert_is_aligned(value, alignment)                  \
 879   assert(is_aligned((value), (alignment)),                   \
 880          SIZE_FORMAT_HEX " is not aligned to "               \
 881          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 882 
 883 // Decide if large pages should be committed when the memory is reserved.
 884 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 885   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 886     size_t words = bytes / BytesPerWord;
 887     bool is_class = false; // We never reserve large pages for the class space.
 888     if (MetaspaceGC::can_expand(words, is_class) &&
 889         MetaspaceGC::allowed_expansion() >= words) {
 890       return true;
 891     }
 892   }
 893 
 894   return false;
 895 }
 896 
 897   // byte_size is the size of the associated virtualspace.
 898 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 899   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 900   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 901   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 902   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 903 
 904   if (_rs.is_reserved()) {
 905     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 906     assert(_rs.size() != 0, "Catch if we get a 0 size");
 907     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 908     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 909 
 910     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 911   }
 912 }
 913 
 914 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 915   DEBUG_ONLY(this->verify();)
 916   Metachunk* chunk = first_chunk();
 917   Metachunk* invalid_chunk = (Metachunk*) top();
 918   while (chunk < invalid_chunk ) {
 919     assert(chunk->is_tagged_free(), "Should be tagged free");
 920     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 921     chunk_manager->remove_chunk(chunk);
 922     chunk->remove_sentinel();
 923     assert(chunk->next() == NULL &&
 924            chunk->prev() == NULL,
 925            "Was not removed from its list");
 926     chunk = (Metachunk*) next;
 927   }
 928 }
 929 
 930 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 931 
 932   if (bottom() == top()) {
 933     return;
 934   }
 935 
 936   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 937   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 938   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 939 
 940   int line_len = 100;
 941   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 942   line_len = (int)(section_len / spec_chunk_size);
 943 
 944   static const int NUM_LINES = 4;
 945 
 946   char* lines[NUM_LINES];
 947   for (int i = 0; i < NUM_LINES; i ++) {
 948     lines[i] = (char*)os::malloc(line_len, mtInternal);
 949   }
 950   int pos = 0;
 951   const MetaWord* p = bottom();
 952   const Metachunk* chunk = (const Metachunk*)p;
 953   const MetaWord* chunk_end = p + chunk->word_size();
 954   while (p < top()) {
 955     if (pos == line_len) {
 956       pos = 0;
 957       for (int i = 0; i < NUM_LINES; i ++) {
 958         st->fill_to(22);
 959         st->print_raw(lines[i], line_len);
 960         st->cr();
 961       }
 962     }
 963     if (pos == 0) {
 964       st->print(PTR_FORMAT ":", p2i(p));
 965     }
 966     if (p == chunk_end) {
 967       chunk = (Metachunk*)p;
 968       chunk_end = p + chunk->word_size();
 969     }
 970     // line 1: chunk starting points (a dot if that area is a chunk start).
 971     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 972 
 973     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 974     // chunk is in use.
 975     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 976     if (chunk->word_size() == spec_chunk_size) {
 977       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 978     } else if (chunk->word_size() == small_chunk_size) {
 979       lines[1][pos] = chunk_is_free ? 's' : 'S';
 980     } else if (chunk->word_size() == med_chunk_size) {
 981       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 982     } else if (chunk->word_size() > med_chunk_size) {
 983       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 984     } else {
 985       ShouldNotReachHere();
 986     }
 987 
 988     // Line 3: chunk origin
 989     const ChunkOrigin origin = chunk->get_origin();
 990     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 991 
 992     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 993     //         but were never used.
 994     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 995 
 996     p += spec_chunk_size;
 997     pos ++;
 998   }
 999   if (pos > 0) {
1000     for (int i = 0; i < NUM_LINES; i ++) {
1001       st->fill_to(22);
1002       st->print_raw(lines[i], line_len);
1003       st->cr();
1004     }
1005   }
1006   for (int i = 0; i < NUM_LINES; i ++) {
1007     os::free(lines[i]);
1008   }
1009 }
1010 
1011 
1012 #ifdef ASSERT
1013 uintx VirtualSpaceNode::container_count_slow() {
1014   uintx count = 0;
1015   Metachunk* chunk = first_chunk();
1016   Metachunk* invalid_chunk = (Metachunk*) top();
1017   while (chunk < invalid_chunk ) {
1018     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1019     do_verify_chunk(chunk);
1020     // Don't count the chunks on the free lists.  Those are
1021     // still part of the VirtualSpaceNode but not currently
1022     // counted.
1023     if (!chunk->is_tagged_free()) {
1024       count++;
1025     }
1026     chunk = (Metachunk*) next;
1027   }
1028   return count;
1029 }
1030 #endif
1031 
1032 #ifdef ASSERT
1033 // Verify counters, all chunks in this list node and the occupancy map.
1034 void VirtualSpaceNode::verify() {
1035   uintx num_in_use_chunks = 0;
1036   Metachunk* chunk = first_chunk();
1037   Metachunk* invalid_chunk = (Metachunk*) top();
1038 
1039   // Iterate the chunks in this node and verify each chunk.
1040   while (chunk < invalid_chunk ) {
1041     DEBUG_ONLY(do_verify_chunk(chunk);)
1042     if (!chunk->is_tagged_free()) {
1043       num_in_use_chunks ++;
1044     }
1045     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1046     chunk = (Metachunk*) next;
1047   }
1048   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1049          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1050   // Also verify the occupancy map.
1051   occupancy_map()->verify(this->bottom(), this->top());
1052 }
1053 #endif // ASSERT
1054 
1055 #ifdef ASSERT
1056 // Verify that all free chunks in this node are ideally merged
1057 // (there not should be multiple small chunks where a large chunk could exist.)
1058 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1059   Metachunk* chunk = first_chunk();
1060   Metachunk* invalid_chunk = (Metachunk*) top();
1061   // Shorthands.
1062   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1063   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1064   int num_free_chunks_since_last_med_boundary = -1;
1065   int num_free_chunks_since_last_small_boundary = -1;
1066   while (chunk < invalid_chunk ) {
1067     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1068     // Reset the counter when encountering a non-free chunk.
1069     if (chunk->get_chunk_type() != HumongousIndex) {
1070       if (chunk->is_tagged_free()) {
1071         // Count successive free, non-humongous chunks.
1072         if (is_aligned(chunk, size_small)) {
1073           assert(num_free_chunks_since_last_small_boundary <= 1,
1074                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1075           num_free_chunks_since_last_small_boundary = 0;
1076         } else if (num_free_chunks_since_last_small_boundary != -1) {
1077           num_free_chunks_since_last_small_boundary ++;
1078         }
1079         if (is_aligned(chunk, size_med)) {
1080           assert(num_free_chunks_since_last_med_boundary <= 1,
1081                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1082           num_free_chunks_since_last_med_boundary = 0;
1083         } else if (num_free_chunks_since_last_med_boundary != -1) {
1084           num_free_chunks_since_last_med_boundary ++;
1085         }
1086       } else {
1087         // Encountering a non-free chunk, reset counters.
1088         num_free_chunks_since_last_med_boundary = -1;
1089         num_free_chunks_since_last_small_boundary = -1;
1090       }
1091     } else {
1092       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1093       num_free_chunks_since_last_med_boundary = -1;
1094       num_free_chunks_since_last_small_boundary = -1;
1095     }
1096 
1097     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1098     chunk = (Metachunk*) next;
1099   }
1100 }
1101 #endif // ASSERT
1102 
1103 // List of VirtualSpaces for metadata allocation.
1104 class VirtualSpaceList : public CHeapObj<mtClass> {
1105   friend class VirtualSpaceNode;
1106 
1107   enum VirtualSpaceSizes {
1108     VirtualSpaceSize = 256 * K
1109   };
1110 
1111   // Head of the list
1112   VirtualSpaceNode* _virtual_space_list;
1113   // virtual space currently being used for allocations
1114   VirtualSpaceNode* _current_virtual_space;
1115 
1116   // Is this VirtualSpaceList used for the compressed class space
1117   bool _is_class;
1118 
1119   // Sum of reserved and committed memory in the virtual spaces
1120   size_t _reserved_words;
1121   size_t _committed_words;
1122 
1123   // Number of virtual spaces
1124   size_t _virtual_space_count;
1125 
1126   ~VirtualSpaceList();
1127 
1128   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1129 
1130   void set_virtual_space_list(VirtualSpaceNode* v) {
1131     _virtual_space_list = v;
1132   }
1133   void set_current_virtual_space(VirtualSpaceNode* v) {
1134     _current_virtual_space = v;
1135   }
1136 
1137   void link_vs(VirtualSpaceNode* new_entry);
1138 
1139   // Get another virtual space and add it to the list.  This
1140   // is typically prompted by a failed attempt to allocate a chunk
1141   // and is typically followed by the allocation of a chunk.
1142   bool create_new_virtual_space(size_t vs_word_size);
1143 
1144   // Chunk up the unused committed space in the current
1145   // virtual space and add the chunks to the free list.
1146   void retire_current_virtual_space();
1147 
1148  public:
1149   VirtualSpaceList(size_t word_size);
1150   VirtualSpaceList(ReservedSpace rs);
1151 
1152   size_t free_bytes();
1153 
1154   Metachunk* get_new_chunk(size_t chunk_word_size,
1155                            size_t suggested_commit_granularity);
1156 
1157   bool expand_node_by(VirtualSpaceNode* node,
1158                       size_t min_words,
1159                       size_t preferred_words);
1160 
1161   bool expand_by(size_t min_words,
1162                  size_t preferred_words);
1163 
1164   VirtualSpaceNode* current_virtual_space() {
1165     return _current_virtual_space;
1166   }
1167 
1168   bool is_class() const { return _is_class; }
1169 
1170   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1171 
1172   size_t reserved_words()  { return _reserved_words; }
1173   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1174   size_t committed_words() { return _committed_words; }
1175   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1176 
1177   void inc_reserved_words(size_t v);
1178   void dec_reserved_words(size_t v);
1179   void inc_committed_words(size_t v);
1180   void dec_committed_words(size_t v);
1181   void inc_virtual_space_count();
1182   void dec_virtual_space_count();
1183 
1184   bool contains(const void* ptr);
1185 
1186   // Unlink empty VirtualSpaceNodes and free it.
1187   void purge(ChunkManager* chunk_manager);
1188 
1189   void print_on(outputStream* st) const;
1190   void print_map(outputStream* st) const;
1191 
1192   class VirtualSpaceListIterator : public StackObj {
1193     VirtualSpaceNode* _virtual_spaces;
1194    public:
1195     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1196       _virtual_spaces(virtual_spaces) {}
1197 
1198     bool repeat() {
1199       return _virtual_spaces != NULL;
1200     }
1201 
1202     VirtualSpaceNode* get_next() {
1203       VirtualSpaceNode* result = _virtual_spaces;
1204       if (_virtual_spaces != NULL) {
1205         _virtual_spaces = _virtual_spaces->next();
1206       }
1207       return result;
1208     }
1209   };
1210 };
1211 
1212 class Metadebug : AllStatic {
1213   // Debugging support for Metaspaces
1214   static int _allocation_fail_alot_count;
1215 
1216  public:
1217 
1218   static void init_allocation_fail_alot_count();
1219 #ifdef ASSERT
1220   static bool test_metadata_failure();
1221 #endif
1222 };
1223 
1224 int Metadebug::_allocation_fail_alot_count = 0;
1225 
1226 //  SpaceManager - used by Metaspace to handle allocations
1227 class SpaceManager : public CHeapObj<mtClass> {
1228   friend class Metaspace;
1229   friend class ClassLoaderMetaspace;
1230   friend class Metadebug;
1231 
1232  private:
1233 
1234   // protects allocations
1235   Mutex* const _lock;
1236 
1237   // Type of metadata allocated.
1238   const Metaspace::MetadataType   _mdtype;
1239 
1240   // Type of metaspace
1241   const Metaspace::MetaspaceType  _space_type;
1242 
1243   // List of chunks in use by this SpaceManager.  Allocations
1244   // are done from the current chunk.  The list is used for deallocating
1245   // chunks when the SpaceManager is freed.
1246   Metachunk* _chunks_in_use[NumberOfInUseLists];
1247   Metachunk* _current_chunk;
1248 
1249   // Maximum number of small chunks to allocate to a SpaceManager
1250   static uint const _small_chunk_limit;
1251 
1252   // Maximum number of specialize chunks to allocate for anonymous
1253   // metadata space to a SpaceManager
1254   static uint const _anon_metadata_specialize_chunk_limit;
1255 
1256   // Sum of all space in allocated chunks
1257   size_t _allocated_blocks_words;
1258 
1259   // Sum of all allocated chunks
1260   size_t _allocated_chunks_words;
1261   size_t _allocated_chunks_count;
1262 
1263   // Free lists of blocks are per SpaceManager since they
1264   // are assumed to be in chunks in use by the SpaceManager
1265   // and all chunks in use by a SpaceManager are freed when
1266   // the class loader using the SpaceManager is collected.
1267   BlockFreelist* _block_freelists;
1268 
1269   // protects virtualspace and chunk expansions
1270   static const char*  _expand_lock_name;
1271   static const int    _expand_lock_rank;
1272   static Mutex* const _expand_lock;
1273 
1274  private:
1275   // Accessors
1276   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1277   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1278     _chunks_in_use[index] = v;
1279   }
1280 
1281   BlockFreelist* block_freelists() const { return _block_freelists; }
1282 
1283   Metaspace::MetadataType mdtype() { return _mdtype; }
1284 
1285   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1286   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1287 
1288   Metachunk* current_chunk() const { return _current_chunk; }
1289   void set_current_chunk(Metachunk* v) {
1290     _current_chunk = v;
1291   }
1292 
1293   Metachunk* find_current_chunk(size_t word_size);
1294 
1295   // Add chunk to the list of chunks in use
1296   void add_chunk(Metachunk* v, bool make_current);
1297   void retire_current_chunk();
1298 
1299   Mutex* lock() const { return _lock; }
1300 
1301  protected:
1302   void initialize();
1303 
1304  public:
1305   SpaceManager(Metaspace::MetadataType mdtype,
1306                Metaspace::MetaspaceType space_type,
1307                Mutex* lock);
1308   ~SpaceManager();
1309 
1310   enum ChunkMultiples {
1311     MediumChunkMultiple = 4
1312   };
1313 
1314   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1315   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1316   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1317 
1318   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1319 
1320   // Accessors
1321   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1322 
1323   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1324   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1325   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1326 
1327   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1328 
1329   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1330 
1331   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1332   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1333   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1334   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1335   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1336 
1337   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1338 
1339   static Mutex* expand_lock() { return _expand_lock; }
1340 
1341   // Increment the per Metaspace and global running sums for Metachunks
1342   // by the given size.  This is used when a Metachunk to added to
1343   // the in-use list.
1344   void inc_size_metrics(size_t words);
1345   // Increment the per Metaspace and global running sums Metablocks by the given
1346   // size.  This is used when a Metablock is allocated.
1347   void inc_used_metrics(size_t words);
1348   // Delete the portion of the running sums for this SpaceManager. That is,
1349   // the globals running sums for the Metachunks and Metablocks are
1350   // decremented for all the Metachunks in-use by this SpaceManager.
1351   void dec_total_from_size_metrics();
1352 
1353   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1354   // or return the unadjusted size if the requested size is humongous.
1355   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1356   size_t adjust_initial_chunk_size(size_t requested) const;
1357 
1358   // Get the initial chunks size for this metaspace type.
1359   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1360 
1361   size_t sum_capacity_in_chunks_in_use() const;
1362   size_t sum_used_in_chunks_in_use() const;
1363   size_t sum_free_in_chunks_in_use() const;
1364   size_t sum_waste_in_chunks_in_use() const;
1365   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1366 
1367   size_t sum_count_in_chunks_in_use();
1368   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1369 
1370   Metachunk* get_new_chunk(size_t chunk_word_size);
1371 
1372   // Block allocation and deallocation.
1373   // Allocates a block from the current chunk
1374   MetaWord* allocate(size_t word_size);
1375 
1376   // Helper for allocations
1377   MetaWord* allocate_work(size_t word_size);
1378 
1379   // Returns a block to the per manager freelist
1380   void deallocate(MetaWord* p, size_t word_size);
1381 
1382   // Based on the allocation size and a minimum chunk size,
1383   // returned chunk size (for expanding space for chunk allocation).
1384   size_t calc_chunk_size(size_t allocation_word_size);
1385 
1386   // Called when an allocation from the current chunk fails.
1387   // Gets a new chunk (may require getting a new virtual space),
1388   // and allocates from that chunk.
1389   MetaWord* grow_and_allocate(size_t word_size);
1390 
1391   // Notify memory usage to MemoryService.
1392   void track_metaspace_memory_usage();
1393 
1394   // debugging support.
1395 
1396   void dump(outputStream* const out) const;
1397   void print_on(outputStream* st) const;
1398   void locked_print_chunks_in_use_on(outputStream* st) const;
1399 
1400   void verify();
1401   void verify_chunk_size(Metachunk* chunk);
1402 #ifdef ASSERT
1403   void verify_allocated_blocks_words();
1404 #endif
1405 
1406   // This adjusts the size given to be greater than the minimum allocation size in
1407   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1408   size_t get_allocation_word_size(size_t word_size) {
1409     size_t byte_size = word_size * BytesPerWord;
1410 
1411     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1412     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1413 
1414     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1415     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1416 
1417     return raw_word_size;
1418   }
1419 };
1420 
1421 uint const SpaceManager::_small_chunk_limit = 4;
1422 uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
1423 
1424 const char* SpaceManager::_expand_lock_name =
1425   "SpaceManager chunk allocation lock";
1426 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1427 Mutex* const SpaceManager::_expand_lock =
1428   new Mutex(SpaceManager::_expand_lock_rank,
1429             SpaceManager::_expand_lock_name,
1430             Mutex::_allow_vm_block_flag,
1431             Monitor::_safepoint_check_never);
1432 
1433 void VirtualSpaceNode::inc_container_count() {
1434   assert_lock_strong(SpaceManager::expand_lock());
1435   _container_count++;
1436 }
1437 
1438 void VirtualSpaceNode::dec_container_count() {
1439   assert_lock_strong(SpaceManager::expand_lock());
1440   _container_count--;
1441 }
1442 
1443 #ifdef ASSERT
1444 void VirtualSpaceNode::verify_container_count() {
1445   assert(_container_count == container_count_slow(),
1446          "Inconsistency in container_count _container_count " UINTX_FORMAT
1447          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1448 }
1449 #endif
1450 
1451 // BlockFreelist methods
1452 
1453 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1454 
1455 BlockFreelist::~BlockFreelist() {
1456   delete _dictionary;
1457   if (_small_blocks != NULL) {
1458     delete _small_blocks;
1459   }
1460 }
1461 
1462 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1463   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1464 
1465   Metablock* free_chunk = ::new (p) Metablock(word_size);
1466   if (word_size < SmallBlocks::small_block_max_size()) {
1467     small_blocks()->return_block(free_chunk, word_size);
1468   } else {
1469   dictionary()->return_chunk(free_chunk);
1470 }
1471   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1472             SIZE_FORMAT, p2i(free_chunk), word_size);
1473 }
1474 
1475 MetaWord* BlockFreelist::get_block(size_t word_size) {
1476   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1477 
1478   // Try small_blocks first.
1479   if (word_size < SmallBlocks::small_block_max_size()) {
1480     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1481     // this space manager.
1482     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1483     if (new_block != NULL) {
1484       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1485               p2i(new_block), word_size);
1486       return new_block;
1487     }
1488   }
1489 
1490   if (word_size < BlockFreelist::min_dictionary_size()) {
1491     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1492     return NULL;
1493   }
1494 
1495   Metablock* free_block = dictionary()->get_chunk(word_size);
1496   if (free_block == NULL) {
1497     return NULL;
1498   }
1499 
1500   const size_t block_size = free_block->size();
1501   if (block_size > WasteMultiplier * word_size) {
1502     return_block((MetaWord*)free_block, block_size);
1503     return NULL;
1504   }
1505 
1506   MetaWord* new_block = (MetaWord*)free_block;
1507   assert(block_size >= word_size, "Incorrect size of block from freelist");
1508   const size_t unused = block_size - word_size;
1509   if (unused >= SmallBlocks::small_block_min_size()) {
1510     return_block(new_block + word_size, unused);
1511   }
1512 
1513   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1514             p2i(new_block), word_size);
1515   return new_block;
1516 }
1517 
1518 void BlockFreelist::print_on(outputStream* st) const {
1519   dictionary()->print_free_lists(st);
1520   if (_small_blocks != NULL) {
1521     _small_blocks->print_on(st);
1522   }
1523 }
1524 
1525 // VirtualSpaceNode methods
1526 
1527 VirtualSpaceNode::~VirtualSpaceNode() {
1528   _rs.release();
1529   if (_occupancy_map != NULL) {
1530     delete _occupancy_map;
1531   }
1532 #ifdef ASSERT
1533   size_t word_size = sizeof(*this) / BytesPerWord;
1534   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1535 #endif
1536 }
1537 
1538 size_t VirtualSpaceNode::used_words_in_vs() const {
1539   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1540 }
1541 
1542 // Space committed in the VirtualSpace
1543 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1544   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1545 }
1546 
1547 size_t VirtualSpaceNode::free_words_in_vs() const {
1548   return pointer_delta(end(), top(), sizeof(MetaWord));
1549 }
1550 
1551 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1552 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1553 
1554   assert(target_top > top(), "Sanity");
1555 
1556   // Padding chunks are added to the freelist.
1557   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1558 
1559   // shorthands
1560   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1561   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1562   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1563 
1564   while (top() < target_top) {
1565 
1566     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1567     // for padding chunks, so it is not worth it.
1568     size_t padding_chunk_word_size = small_word_size;
1569     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1570       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1571       padding_chunk_word_size = spec_word_size;
1572     }
1573     MetaWord* here = top();
1574     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1575     inc_top(padding_chunk_word_size);
1576 
1577     // Create new padding chunk.
1578     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1579     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1580 
1581     Metachunk* const padding_chunk =
1582       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1583     assert(padding_chunk == (Metachunk*)here, "Sanity");
1584     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1585     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1586                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1587                                        (is_class() ? "class space " : "metaspace"),
1588                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1589 
1590     // Mark chunk start in occupancy map.
1591     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1592 
1593     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1594     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1595     // will assert that).
1596     do_update_in_use_info_for_chunk(padding_chunk, true);
1597 
1598     // Return Chunk to freelist.
1599     inc_container_count();
1600     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1601     // Please note: at this point, ChunkManager::return_single_chunk()
1602     // may already have merged the padding chunk with neighboring chunks, so
1603     // it may have vanished at this point. Do not reference the padding
1604     // chunk beyond this point.
1605   }
1606 
1607   assert(top() == target_top, "Sanity");
1608 
1609 } // allocate_padding_chunks_until_top_is_at()
1610 
1611 // Allocates the chunk from the virtual space only.
1612 // This interface is also used internally for debugging.  Not all
1613 // chunks removed here are necessarily used for allocation.
1614 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1615   // Non-humongous chunks are to be allocated aligned to their chunk
1616   // size. So, start addresses of medium chunks are aligned to medium
1617   // chunk size, those of small chunks to small chunk size and so
1618   // forth. This facilitates merging of free chunks and reduces
1619   // fragmentation. Chunk sizes are spec < small < medium, with each
1620   // larger chunk size being a multiple of the next smaller chunk
1621   // size.
1622   // Because of this alignment, me may need to create a number of padding
1623   // chunks. These chunks are created and added to the freelist.
1624 
1625   // The chunk manager to which we will give our padding chunks.
1626   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1627 
1628   // shorthands
1629   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1630   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1631   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1632 
1633   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1634          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1635 
1636   // Chunk alignment (in bytes) == chunk size unless humongous.
1637   // Humongous chunks are aligned to the smallest chunk size (spec).
1638   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1639                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1640 
1641   // Do we have enough space to create the requested chunk plus
1642   // any padding chunks needed?
1643   MetaWord* const next_aligned =
1644     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1645   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1646     return NULL;
1647   }
1648 
1649   // Before allocating the requested chunk, allocate padding chunks if necessary.
1650   // We only need to do this for small or medium chunks: specialized chunks are the
1651   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1652   // (implicitly, also aligned to smallest chunk size).
1653   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1654     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1655         (is_class() ? "class space " : "metaspace"),
1656         top(), next_aligned);
1657     allocate_padding_chunks_until_top_is_at(next_aligned);
1658     // Now, top should be aligned correctly.
1659     assert_is_aligned(top(), required_chunk_alignment);
1660   }
1661 
1662   // Now, top should be aligned correctly.
1663   assert_is_aligned(top(), required_chunk_alignment);
1664 
1665   // Bottom of the new chunk
1666   MetaWord* chunk_limit = top();
1667   assert(chunk_limit != NULL, "Not safe to call this method");
1668 
1669   // The virtual spaces are always expanded by the
1670   // commit granularity to enforce the following condition.
1671   // Without this the is_available check will not work correctly.
1672   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1673       "The committed memory doesn't match the expanded memory.");
1674 
1675   if (!is_available(chunk_word_size)) {
1676     LogTarget(Debug, gc, metaspace, freelist) lt;
1677     if (lt.is_enabled()) {
1678       LogStream ls(lt);
1679       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1680       // Dump some information about the virtual space that is nearly full
1681       print_on(&ls);
1682     }
1683     return NULL;
1684   }
1685 
1686   // Take the space  (bump top on the current virtual space).
1687   inc_top(chunk_word_size);
1688 
1689   // Initialize the chunk
1690   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1691   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1692   assert(result == (Metachunk*)chunk_limit, "Sanity");
1693   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1694   do_update_in_use_info_for_chunk(result, true);
1695 
1696   inc_container_count();
1697 
1698   DEBUG_ONLY(chunk_manager->locked_verify());
1699   DEBUG_ONLY(this->verify());
1700   DEBUG_ONLY(do_verify_chunk(result));
1701 
1702   result->inc_use_count();
1703 
1704   return result;
1705 }
1706 
1707 
1708 // Expand the virtual space (commit more of the reserved space)
1709 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1710   size_t min_bytes = min_words * BytesPerWord;
1711   size_t preferred_bytes = preferred_words * BytesPerWord;
1712 
1713   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1714 
1715   if (uncommitted < min_bytes) {
1716     return false;
1717   }
1718 
1719   size_t commit = MIN2(preferred_bytes, uncommitted);
1720   bool result = virtual_space()->expand_by(commit, false);
1721 
1722   if (result) {
1723     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1724               (is_class() ? "class" : "non-class"), commit);
1725   } else {
1726     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1727               (is_class() ? "class" : "non-class"), commit);
1728   }
1729 
1730   assert(result, "Failed to commit memory");
1731 
1732   return result;
1733 }
1734 
1735 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1736   assert_lock_strong(SpaceManager::expand_lock());
1737   Metachunk* result = take_from_committed(chunk_word_size);
1738   return result;
1739 }
1740 
1741 bool VirtualSpaceNode::initialize() {
1742 
1743   if (!_rs.is_reserved()) {
1744     return false;
1745   }
1746 
1747   // These are necessary restriction to make sure that the virtual space always
1748   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1749   // aligned only the middle alignment of the VirtualSpace is used.
1750   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1751   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1752 
1753   // ReservedSpaces marked as special will have the entire memory
1754   // pre-committed. Setting a committed size will make sure that
1755   // committed_size and actual_committed_size agrees.
1756   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1757 
1758   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1759                                             Metaspace::commit_alignment());
1760   if (result) {
1761     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1762         "Checking that the pre-committed memory was registered by the VirtualSpace");
1763 
1764     set_top((MetaWord*)virtual_space()->low());
1765     set_reserved(MemRegion((HeapWord*)_rs.base(),
1766                  (HeapWord*)(_rs.base() + _rs.size())));
1767 
1768     assert(reserved()->start() == (HeapWord*) _rs.base(),
1769            "Reserved start was not set properly " PTR_FORMAT
1770            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1771     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1772            "Reserved size was not set properly " SIZE_FORMAT
1773            " != " SIZE_FORMAT, reserved()->word_size(),
1774            _rs.size() / BytesPerWord);
1775   }
1776 
1777   // Initialize Occupancy Map.
1778   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1779   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1780 
1781   return result;
1782 }
1783 
1784 void VirtualSpaceNode::print_on(outputStream* st) const {
1785   size_t used = used_words_in_vs();
1786   size_t capacity = capacity_words_in_vs();
1787   VirtualSpace* vs = virtual_space();
1788   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1789            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1790            PTR_FORMAT ", " PTR_FORMAT ")",
1791            p2i(vs), capacity / K,
1792            capacity == 0 ? 0 : used * 100 / capacity,
1793            p2i(bottom()), p2i(top()), p2i(end()),
1794            p2i(vs->high_boundary()));
1795 }
1796 
1797 #ifdef ASSERT
1798 void VirtualSpaceNode::mangle() {
1799   size_t word_size = capacity_words_in_vs();
1800   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1801 }
1802 #endif // ASSERT
1803 
1804 // VirtualSpaceList methods
1805 // Space allocated from the VirtualSpace
1806 
1807 VirtualSpaceList::~VirtualSpaceList() {
1808   VirtualSpaceListIterator iter(virtual_space_list());
1809   while (iter.repeat()) {
1810     VirtualSpaceNode* vsl = iter.get_next();
1811     delete vsl;
1812   }
1813 }
1814 
1815 void VirtualSpaceList::inc_reserved_words(size_t v) {
1816   assert_lock_strong(SpaceManager::expand_lock());
1817   _reserved_words = _reserved_words + v;
1818 }
1819 void VirtualSpaceList::dec_reserved_words(size_t v) {
1820   assert_lock_strong(SpaceManager::expand_lock());
1821   _reserved_words = _reserved_words - v;
1822 }
1823 
1824 #define assert_committed_below_limit()                        \
1825   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1826          "Too much committed memory. Committed: " SIZE_FORMAT \
1827          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1828                  MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1829 
1830 void VirtualSpaceList::inc_committed_words(size_t v) {
1831   assert_lock_strong(SpaceManager::expand_lock());
1832   _committed_words = _committed_words + v;
1833 
1834   assert_committed_below_limit();
1835 }
1836 void VirtualSpaceList::dec_committed_words(size_t v) {
1837   assert_lock_strong(SpaceManager::expand_lock());
1838   _committed_words = _committed_words - v;
1839 
1840   assert_committed_below_limit();
1841 }
1842 
1843 void VirtualSpaceList::inc_virtual_space_count() {
1844   assert_lock_strong(SpaceManager::expand_lock());
1845   _virtual_space_count++;
1846 }
1847 void VirtualSpaceList::dec_virtual_space_count() {
1848   assert_lock_strong(SpaceManager::expand_lock());
1849   _virtual_space_count--;
1850 }
1851 
1852 void ChunkManager::remove_chunk(Metachunk* chunk) {
1853   size_t word_size = chunk->word_size();
1854   ChunkIndex index = list_index(word_size);
1855   if (index != HumongousIndex) {
1856     free_chunks(index)->remove_chunk(chunk);
1857   } else {
1858     humongous_dictionary()->remove_chunk(chunk);
1859   }
1860 
1861   // Chunk has been removed from the chunks free list, update counters.
1862   account_for_removed_chunk(chunk);
1863 }
1864 
1865 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1866   assert_lock_strong(SpaceManager::expand_lock());
1867   assert(chunk != NULL, "invalid chunk pointer");
1868   // Check for valid merge combinations.
1869   assert((chunk->get_chunk_type() == SpecializedIndex &&
1870           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1871          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1872         "Invalid chunk merge combination.");
1873 
1874   const size_t target_chunk_word_size =
1875     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1876 
1877   // [ prospective merge region )
1878   MetaWord* const p_merge_region_start =
1879     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1880   MetaWord* const p_merge_region_end =
1881     p_merge_region_start + target_chunk_word_size;
1882 
1883   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1884   VirtualSpaceNode* const vsn = chunk->container();
1885   OccupancyMap* const ocmap = vsn->occupancy_map();
1886 
1887   // The prospective chunk merge range must be completely contained by the
1888   // committed range of the virtual space node.
1889   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1890     return false;
1891   }
1892 
1893   // Only attempt to merge this range if at its start a chunk starts and at its end
1894   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1895   // of that range, we cannot merge.
1896   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1897     return false;
1898   }
1899   if (p_merge_region_end < vsn->top() &&
1900       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1901     return false;
1902   }
1903 
1904   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1905   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1906     return false;
1907   }
1908 
1909   // Success! Remove all chunks in this region...
1910   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1911     (is_class() ? "class space" : "metaspace"),
1912     p_merge_region_start, p_merge_region_end);
1913 
1914   const int num_chunks_removed =
1915     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1916 
1917   // ... and create a single new bigger chunk.
1918   Metachunk* const p_new_chunk =
1919       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1920   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1921   p_new_chunk->set_origin(origin_merge);
1922 
1923   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1924     (is_class() ? "class space" : "metaspace"),
1925     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1926 
1927   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1928   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1929   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1930 
1931   // Mark chunk as free. Note: it is not necessary to update the occupancy
1932   // map in-use map, because the old chunks were also free, so nothing
1933   // should have changed.
1934   p_new_chunk->set_is_tagged_free(true);
1935 
1936   // Add new chunk to its freelist.
1937   ChunkList* const list = free_chunks(target_chunk_type);
1938   list->return_chunk_at_head(p_new_chunk);
1939 
1940   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1941   // should not have changed, because the size of the space should be the same)
1942   _free_chunks_count -= num_chunks_removed;
1943   _free_chunks_count ++;
1944 
1945   // VirtualSpaceNode::container_count does not have to be modified:
1946   // it means "number of active (non-free) chunks", so merging free chunks
1947   // should not affect that count.
1948 
1949   // At the end of a chunk merge, run verification tests.
1950   DEBUG_ONLY(this->locked_verify());
1951   DEBUG_ONLY(vsn->verify());
1952 
1953   return true;
1954 }
1955 
1956 // Remove all chunks in the given area - the chunks are supposed to be free -
1957 // from their corresponding freelists. Mark them as invalid.
1958 // - This does not correct the occupancy map.
1959 // - This does not adjust the counters in ChunkManager.
1960 // - Does not adjust container count counter in containing VirtualSpaceNode
1961 // Returns number of chunks removed.
1962 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1963   assert(p != NULL && word_size > 0, "Invalid range.");
1964   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1965   assert_is_aligned(word_size, smallest_chunk_size);
1966 
1967   Metachunk* const start = (Metachunk*) p;
1968   const Metachunk* const end = (Metachunk*)(p + word_size);
1969   Metachunk* cur = start;
1970   int num_removed = 0;
1971   while (cur < end) {
1972     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1973     DEBUG_ONLY(do_verify_chunk(cur));
1974     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1975     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1976     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1977       (is_class() ? "class space" : "metaspace"),
1978       cur, cur->word_size() * sizeof(MetaWord));
1979     cur->remove_sentinel();
1980     // Note: cannot call ChunkManager::remove_chunk, because that
1981     // modifies the counters in ChunkManager, which we do not want. So
1982     // we call remove_chunk on the freelist directly (see also the
1983     // splitting function which does the same).
1984     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1985     list->remove_chunk(cur);
1986     num_removed ++;
1987     cur = next;
1988   }
1989   return num_removed;
1990 }
1991 
1992 // Walk the list of VirtualSpaceNodes and delete
1993 // nodes with a 0 container_count.  Remove Metachunks in
1994 // the node from their respective freelists.
1995 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1996   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1997   assert_lock_strong(SpaceManager::expand_lock());
1998   // Don't use a VirtualSpaceListIterator because this
1999   // list is being changed and a straightforward use of an iterator is not safe.
2000   VirtualSpaceNode* purged_vsl = NULL;
2001   VirtualSpaceNode* prev_vsl = virtual_space_list();
2002   VirtualSpaceNode* next_vsl = prev_vsl;
2003   while (next_vsl != NULL) {
2004     VirtualSpaceNode* vsl = next_vsl;
2005     DEBUG_ONLY(vsl->verify_container_count();)
2006     next_vsl = vsl->next();
2007     // Don't free the current virtual space since it will likely
2008     // be needed soon.
2009     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2010       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2011                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2012       // Unlink it from the list
2013       if (prev_vsl == vsl) {
2014         // This is the case of the current node being the first node.
2015         assert(vsl == virtual_space_list(), "Expected to be the first node");
2016         set_virtual_space_list(vsl->next());
2017       } else {
2018         prev_vsl->set_next(vsl->next());
2019       }
2020 
2021       vsl->purge(chunk_manager);
2022       dec_reserved_words(vsl->reserved_words());
2023       dec_committed_words(vsl->committed_words());
2024       dec_virtual_space_count();
2025       purged_vsl = vsl;
2026       delete vsl;
2027     } else {
2028       prev_vsl = vsl;
2029     }
2030   }
2031 #ifdef ASSERT
2032   if (purged_vsl != NULL) {
2033     // List should be stable enough to use an iterator here.
2034     VirtualSpaceListIterator iter(virtual_space_list());
2035     while (iter.repeat()) {
2036       VirtualSpaceNode* vsl = iter.get_next();
2037       assert(vsl != purged_vsl, "Purge of vsl failed");
2038     }
2039   }
2040 #endif
2041 }
2042 
2043 
2044 // This function looks at the mmap regions in the metaspace without locking.
2045 // The chunks are added with store ordering and not deleted except for at
2046 // unloading time during a safepoint.
2047 bool VirtualSpaceList::contains(const void* ptr) {
2048   // List should be stable enough to use an iterator here because removing virtual
2049   // space nodes is only allowed at a safepoint.
2050   VirtualSpaceListIterator iter(virtual_space_list());
2051   while (iter.repeat()) {
2052     VirtualSpaceNode* vsn = iter.get_next();
2053     if (vsn->contains(ptr)) {
2054       return true;
2055     }
2056   }
2057   return false;
2058 }
2059 
2060 void VirtualSpaceList::retire_current_virtual_space() {
2061   assert_lock_strong(SpaceManager::expand_lock());
2062 
2063   VirtualSpaceNode* vsn = current_virtual_space();
2064 
2065   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2066                                   Metaspace::chunk_manager_metadata();
2067 
2068   vsn->retire(cm);
2069 }
2070 
2071 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2072   DEBUG_ONLY(verify_container_count();)
2073   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2074   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2075     ChunkIndex index = (ChunkIndex)i;
2076     size_t chunk_size = chunk_manager->size_by_index(index);
2077 
2078     while (free_words_in_vs() >= chunk_size) {
2079       Metachunk* chunk = get_chunk_vs(chunk_size);
2080       // Chunk will be allocated aligned, so allocation may require
2081       // additional padding chunks. That may cause above allocation to
2082       // fail. Just ignore the failed allocation and continue with the
2083       // next smaller chunk size. As the VirtualSpaceNode comitted
2084       // size should be a multiple of the smallest chunk size, we
2085       // should always be able to fill the VirtualSpace completely.
2086       if (chunk == NULL) {
2087         break;
2088       }
2089       chunk_manager->return_single_chunk(index, chunk);
2090     }
2091     DEBUG_ONLY(verify_container_count();)
2092   }
2093   assert(free_words_in_vs() == 0, "should be empty now");
2094 }
2095 
2096 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2097                                    _is_class(false),
2098                                    _virtual_space_list(NULL),
2099                                    _current_virtual_space(NULL),
2100                                    _reserved_words(0),
2101                                    _committed_words(0),
2102                                    _virtual_space_count(0) {
2103   MutexLockerEx cl(SpaceManager::expand_lock(),
2104                    Mutex::_no_safepoint_check_flag);
2105   create_new_virtual_space(word_size);
2106 }
2107 
2108 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2109                                    _is_class(true),
2110                                    _virtual_space_list(NULL),
2111                                    _current_virtual_space(NULL),
2112                                    _reserved_words(0),
2113                                    _committed_words(0),
2114                                    _virtual_space_count(0) {
2115   MutexLockerEx cl(SpaceManager::expand_lock(),
2116                    Mutex::_no_safepoint_check_flag);
2117   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2118   bool succeeded = class_entry->initialize();
2119   if (succeeded) {
2120     link_vs(class_entry);
2121   }
2122 }
2123 
2124 size_t VirtualSpaceList::free_bytes() {
2125   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2126 }
2127 
2128 // Allocate another meta virtual space and add it to the list.
2129 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2130   assert_lock_strong(SpaceManager::expand_lock());
2131 
2132   if (is_class()) {
2133     assert(false, "We currently don't support more than one VirtualSpace for"
2134                   " the compressed class space. The initialization of the"
2135                   " CCS uses another code path and should not hit this path.");
2136     return false;
2137   }
2138 
2139   if (vs_word_size == 0) {
2140     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2141     return false;
2142   }
2143 
2144   // Reserve the space
2145   size_t vs_byte_size = vs_word_size * BytesPerWord;
2146   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2147 
2148   // Allocate the meta virtual space and initialize it.
2149   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2150   if (!new_entry->initialize()) {
2151     delete new_entry;
2152     return false;
2153   } else {
2154     assert(new_entry->reserved_words() == vs_word_size,
2155         "Reserved memory size differs from requested memory size");
2156     // ensure lock-free iteration sees fully initialized node
2157     OrderAccess::storestore();
2158     link_vs(new_entry);
2159     return true;
2160   }
2161 }
2162 
2163 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2164   if (virtual_space_list() == NULL) {
2165       set_virtual_space_list(new_entry);
2166   } else {
2167     current_virtual_space()->set_next(new_entry);
2168   }
2169   set_current_virtual_space(new_entry);
2170   inc_reserved_words(new_entry->reserved_words());
2171   inc_committed_words(new_entry->committed_words());
2172   inc_virtual_space_count();
2173 #ifdef ASSERT
2174   new_entry->mangle();
2175 #endif
2176   LogTarget(Trace, gc, metaspace) lt;
2177   if (lt.is_enabled()) {
2178     LogStream ls(lt);
2179     VirtualSpaceNode* vsl = current_virtual_space();
2180     ResourceMark rm;
2181     vsl->print_on(&ls);
2182   }
2183 }
2184 
2185 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2186                                       size_t min_words,
2187                                       size_t preferred_words) {
2188   size_t before = node->committed_words();
2189 
2190   bool result = node->expand_by(min_words, preferred_words);
2191 
2192   size_t after = node->committed_words();
2193 
2194   // after and before can be the same if the memory was pre-committed.
2195   assert(after >= before, "Inconsistency");
2196   inc_committed_words(after - before);
2197 
2198   return result;
2199 }
2200 
2201 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2202   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2203   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2204   assert(min_words <= preferred_words, "Invalid arguments");
2205 
2206   const char* const class_or_not = (is_class() ? "class" : "non-class");
2207 
2208   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2209     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2210               class_or_not);
2211     return  false;
2212   }
2213 
2214   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2215   if (allowed_expansion_words < min_words) {
2216     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2217               class_or_not);
2218     return false;
2219   }
2220 
2221   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2222 
2223   // Commit more memory from the the current virtual space.
2224   bool vs_expanded = expand_node_by(current_virtual_space(),
2225                                     min_words,
2226                                     max_expansion_words);
2227   if (vs_expanded) {
2228      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2229                class_or_not);
2230      return true;
2231   }
2232   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2233             class_or_not);
2234   retire_current_virtual_space();
2235 
2236   // Get another virtual space.
2237   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2238   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2239 
2240   if (create_new_virtual_space(grow_vs_words)) {
2241     if (current_virtual_space()->is_pre_committed()) {
2242       // The memory was pre-committed, so we are done here.
2243       assert(min_words <= current_virtual_space()->committed_words(),
2244           "The new VirtualSpace was pre-committed, so it"
2245           "should be large enough to fit the alloc request.");
2246       return true;
2247     }
2248 
2249     return expand_node_by(current_virtual_space(),
2250                           min_words,
2251                           max_expansion_words);
2252   }
2253 
2254   return false;
2255 }
2256 
2257 // Given a chunk, calculate the largest possible padding space which
2258 // could be required when allocating it.
2259 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2260   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2261   if (chunk_type != HumongousIndex) {
2262     // Normal, non-humongous chunks are allocated at chunk size
2263     // boundaries, so the largest padding space required would be that
2264     // minus the smallest chunk size.
2265     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2266     return chunk_word_size - smallest_chunk_size;
2267   } else {
2268     // Humongous chunks are allocated at smallest-chunksize
2269     // boundaries, so there is no padding required.
2270     return 0;
2271   }
2272 }
2273 
2274 
2275 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2276 
2277   // Allocate a chunk out of the current virtual space.
2278   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2279 
2280   if (next != NULL) {
2281     return next;
2282   }
2283 
2284   // The expand amount is currently only determined by the requested sizes
2285   // and not how much committed memory is left in the current virtual space.
2286 
2287   // We must have enough space for the requested size and any
2288   // additional reqired padding chunks.
2289   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2290 
2291   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2292   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2293   if (min_word_size >= preferred_word_size) {
2294     // Can happen when humongous chunks are allocated.
2295     preferred_word_size = min_word_size;
2296   }
2297 
2298   bool expanded = expand_by(min_word_size, preferred_word_size);
2299   if (expanded) {
2300     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2301     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2302   }
2303 
2304    return next;
2305 }
2306 
2307 void VirtualSpaceList::print_on(outputStream* st) const {
2308   VirtualSpaceListIterator iter(virtual_space_list());
2309   while (iter.repeat()) {
2310     VirtualSpaceNode* node = iter.get_next();
2311     node->print_on(st);
2312   }
2313 }
2314 
2315 void VirtualSpaceList::print_map(outputStream* st) const {
2316   VirtualSpaceNode* list = virtual_space_list();
2317   VirtualSpaceListIterator iter(list);
2318   unsigned i = 0;
2319   while (iter.repeat()) {
2320     st->print_cr("Node %u:", i);
2321     VirtualSpaceNode* node = iter.get_next();
2322     node->print_map(st, this->is_class());
2323     i ++;
2324   }
2325 }
2326 
2327 // MetaspaceGC methods
2328 
2329 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2330 // Within the VM operation after the GC the attempt to allocate the metadata
2331 // should succeed.  If the GC did not free enough space for the metaspace
2332 // allocation, the HWM is increased so that another virtualspace will be
2333 // allocated for the metadata.  With perm gen the increase in the perm
2334 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2335 // metaspace policy uses those as the small and large steps for the HWM.
2336 //
2337 // After the GC the compute_new_size() for MetaspaceGC is called to
2338 // resize the capacity of the metaspaces.  The current implementation
2339 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2340 // to resize the Java heap by some GC's.  New flags can be implemented
2341 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2342 // free space is desirable in the metaspace capacity to decide how much
2343 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2344 // free space is desirable in the metaspace capacity before decreasing
2345 // the HWM.
2346 
2347 // Calculate the amount to increase the high water mark (HWM).
2348 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2349 // another expansion is not requested too soon.  If that is not
2350 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2351 // If that is still not enough, expand by the size of the allocation
2352 // plus some.
2353 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2354   size_t min_delta = MinMetaspaceExpansion;
2355   size_t max_delta = MaxMetaspaceExpansion;
2356   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2357 
2358   if (delta <= min_delta) {
2359     delta = min_delta;
2360   } else if (delta <= max_delta) {
2361     // Don't want to hit the high water mark on the next
2362     // allocation so make the delta greater than just enough
2363     // for this allocation.
2364     delta = max_delta;
2365   } else {
2366     // This allocation is large but the next ones are probably not
2367     // so increase by the minimum.
2368     delta = delta + min_delta;
2369   }
2370 
2371   assert_is_aligned(delta, Metaspace::commit_alignment());
2372 
2373   return delta;
2374 }
2375 
2376 size_t MetaspaceGC::capacity_until_GC() {
2377   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2378   assert(value >= MetaspaceSize, "Not initialized properly?");
2379   return value;
2380 }
2381 
2382 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2383   assert_is_aligned(v, Metaspace::commit_alignment());
2384 
2385   intptr_t capacity_until_GC = _capacity_until_GC;
2386   intptr_t new_value = capacity_until_GC + v;
2387 
2388   if (new_value < capacity_until_GC) {
2389     // The addition wrapped around, set new_value to aligned max value.
2390     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2391   }
2392 
2393   intptr_t expected = _capacity_until_GC;
2394   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2395 
2396   if (expected != actual) {
2397     return false;
2398   }
2399 
2400   if (new_cap_until_GC != NULL) {
2401     *new_cap_until_GC = new_value;
2402   }
2403   if (old_cap_until_GC != NULL) {
2404     *old_cap_until_GC = capacity_until_GC;
2405   }
2406   return true;
2407 }
2408 
2409 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2410   assert_is_aligned(v, Metaspace::commit_alignment());
2411 
2412   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2413 }
2414 
2415 void MetaspaceGC::initialize() {
2416   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2417   // we can't do a GC during initialization.
2418   _capacity_until_GC = MaxMetaspaceSize;
2419 }
2420 
2421 void MetaspaceGC::post_initialize() {
2422   // Reset the high-water mark once the VM initialization is done.
2423   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2424 }
2425 
2426 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2427   // Check if the compressed class space is full.
2428   if (is_class && Metaspace::using_class_space()) {
2429     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2430     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2431       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2432                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2433       return false;
2434     }
2435   }
2436 
2437   // Check if the user has imposed a limit on the metaspace memory.
2438   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2439   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2440     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2441               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2442     return false;
2443   }
2444 
2445   return true;
2446 }
2447 
2448 size_t MetaspaceGC::allowed_expansion() {
2449   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2450   size_t capacity_until_gc = capacity_until_GC();
2451 
2452   assert(capacity_until_gc >= committed_bytes,
2453          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2454          capacity_until_gc, committed_bytes);
2455 
2456   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2457   size_t left_until_GC = capacity_until_gc - committed_bytes;
2458   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2459   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2460             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2461             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2462 
2463   return left_to_commit / BytesPerWord;
2464 }
2465 
2466 void MetaspaceGC::compute_new_size() {
2467   assert(_shrink_factor <= 100, "invalid shrink factor");
2468   uint current_shrink_factor = _shrink_factor;
2469   _shrink_factor = 0;
2470 
2471   // Using committed_bytes() for used_after_gc is an overestimation, since the
2472   // chunk free lists are included in committed_bytes() and the memory in an
2473   // un-fragmented chunk free list is available for future allocations.
2474   // However, if the chunk free lists becomes fragmented, then the memory may
2475   // not be available for future allocations and the memory is therefore "in use".
2476   // Including the chunk free lists in the definition of "in use" is therefore
2477   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2478   // shrink below committed_bytes() and this has caused serious bugs in the past.
2479   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2480   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2481 
2482   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2483   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2484 
2485   const double min_tmp = used_after_gc / maximum_used_percentage;
2486   size_t minimum_desired_capacity =
2487     (size_t)MIN2(min_tmp, double(max_uintx));
2488   // Don't shrink less than the initial generation size
2489   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2490                                   MetaspaceSize);
2491 
2492   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2493   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2494                            minimum_free_percentage, maximum_used_percentage);
2495   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2496 
2497 
2498   size_t shrink_bytes = 0;
2499   if (capacity_until_GC < minimum_desired_capacity) {
2500     // If we have less capacity below the metaspace HWM, then
2501     // increment the HWM.
2502     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2503     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2504     // Don't expand unless it's significant
2505     if (expand_bytes >= MinMetaspaceExpansion) {
2506       size_t new_capacity_until_GC = 0;
2507       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2508       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2509 
2510       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2511                                                new_capacity_until_GC,
2512                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2513       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2514                                minimum_desired_capacity / (double) K,
2515                                expand_bytes / (double) K,
2516                                MinMetaspaceExpansion / (double) K,
2517                                new_capacity_until_GC / (double) K);
2518     }
2519     return;
2520   }
2521 
2522   // No expansion, now see if we want to shrink
2523   // We would never want to shrink more than this
2524   assert(capacity_until_GC >= minimum_desired_capacity,
2525          SIZE_FORMAT " >= " SIZE_FORMAT,
2526          capacity_until_GC, minimum_desired_capacity);
2527   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2528 
2529   // Should shrinking be considered?
2530   if (MaxMetaspaceFreeRatio < 100) {
2531     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2532     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2533     const double max_tmp = used_after_gc / minimum_used_percentage;
2534     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2535     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2536                                     MetaspaceSize);
2537     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2538                              maximum_free_percentage, minimum_used_percentage);
2539     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2540                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2541 
2542     assert(minimum_desired_capacity <= maximum_desired_capacity,
2543            "sanity check");
2544 
2545     if (capacity_until_GC > maximum_desired_capacity) {
2546       // Capacity too large, compute shrinking size
2547       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2548       // We don't want shrink all the way back to initSize if people call
2549       // System.gc(), because some programs do that between "phases" and then
2550       // we'd just have to grow the heap up again for the next phase.  So we
2551       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2552       // on the third call, and 100% by the fourth call.  But if we recompute
2553       // size without shrinking, it goes back to 0%.
2554       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2555 
2556       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2557 
2558       assert(shrink_bytes <= max_shrink_bytes,
2559              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2560              shrink_bytes, max_shrink_bytes);
2561       if (current_shrink_factor == 0) {
2562         _shrink_factor = 10;
2563       } else {
2564         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2565       }
2566       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2567                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2568       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2569                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2570     }
2571   }
2572 
2573   // Don't shrink unless it's significant
2574   if (shrink_bytes >= MinMetaspaceExpansion &&
2575       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2576     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2577     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2578                                              new_capacity_until_GC,
2579                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2580   }
2581 }
2582 
2583 // Metadebug methods
2584 
2585 void Metadebug::init_allocation_fail_alot_count() {
2586   if (MetadataAllocationFailALot) {
2587     _allocation_fail_alot_count =
2588       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2589   }
2590 }
2591 
2592 #ifdef ASSERT
2593 bool Metadebug::test_metadata_failure() {
2594   if (MetadataAllocationFailALot &&
2595       Threads::is_vm_complete()) {
2596     if (_allocation_fail_alot_count > 0) {
2597       _allocation_fail_alot_count--;
2598     } else {
2599       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2600       init_allocation_fail_alot_count();
2601       return true;
2602     }
2603   }
2604   return false;
2605 }
2606 #endif
2607 
2608 // ChunkManager methods
2609 size_t ChunkManager::free_chunks_total_words() {
2610   return _free_chunks_total;
2611 }
2612 
2613 size_t ChunkManager::free_chunks_total_bytes() {
2614   return free_chunks_total_words() * BytesPerWord;
2615 }
2616 
2617 // Update internal accounting after a chunk was added
2618 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2619   assert_lock_strong(SpaceManager::expand_lock());
2620   _free_chunks_count ++;
2621   _free_chunks_total += c->word_size();
2622 }
2623 
2624 // Update internal accounting after a chunk was removed
2625 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2626   assert_lock_strong(SpaceManager::expand_lock());
2627   assert(_free_chunks_count >= 1,
2628     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2629   assert(_free_chunks_total >= c->word_size(),
2630     "ChunkManager::_free_chunks_total: about to go negative"
2631      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2632   _free_chunks_count --;
2633   _free_chunks_total -= c->word_size();
2634 }
2635 
2636 size_t ChunkManager::free_chunks_count() {
2637 #ifdef ASSERT
2638   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2639     MutexLockerEx cl(SpaceManager::expand_lock(),
2640                      Mutex::_no_safepoint_check_flag);
2641     // This lock is only needed in debug because the verification
2642     // of the _free_chunks_totals walks the list of free chunks
2643     slow_locked_verify_free_chunks_count();
2644   }
2645 #endif
2646   return _free_chunks_count;
2647 }
2648 
2649 ChunkIndex ChunkManager::list_index(size_t size) {
2650   if (size_by_index(SpecializedIndex) == size) {
2651     return SpecializedIndex;
2652   }
2653   if (size_by_index(SmallIndex) == size) {
2654     return SmallIndex;
2655   }
2656   const size_t med_size = size_by_index(MediumIndex);
2657   if (med_size == size) {
2658     return MediumIndex;
2659   }
2660 
2661   assert(size > med_size, "Not a humongous chunk");
2662   return HumongousIndex;
2663 }
2664 
2665 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2666   index_bounds_check(index);
2667   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2668   return _free_chunks[index].size();
2669 }
2670 
2671 void ChunkManager::locked_verify_free_chunks_total() {
2672   assert_lock_strong(SpaceManager::expand_lock());
2673   assert(sum_free_chunks() == _free_chunks_total,
2674          "_free_chunks_total " SIZE_FORMAT " is not the"
2675          " same as sum " SIZE_FORMAT, _free_chunks_total,
2676          sum_free_chunks());
2677 }
2678 
2679 void ChunkManager::verify_free_chunks_total() {
2680   MutexLockerEx cl(SpaceManager::expand_lock(),
2681                      Mutex::_no_safepoint_check_flag);
2682   locked_verify_free_chunks_total();
2683 }
2684 
2685 void ChunkManager::locked_verify_free_chunks_count() {
2686   assert_lock_strong(SpaceManager::expand_lock());
2687   assert(sum_free_chunks_count() == _free_chunks_count,
2688          "_free_chunks_count " SIZE_FORMAT " is not the"
2689          " same as sum " SIZE_FORMAT, _free_chunks_count,
2690          sum_free_chunks_count());
2691 }
2692 
2693 void ChunkManager::verify_free_chunks_count() {
2694 #ifdef ASSERT
2695   MutexLockerEx cl(SpaceManager::expand_lock(),
2696                      Mutex::_no_safepoint_check_flag);
2697   locked_verify_free_chunks_count();
2698 #endif
2699 }
2700 
2701 void ChunkManager::verify() {
2702   MutexLockerEx cl(SpaceManager::expand_lock(),
2703                      Mutex::_no_safepoint_check_flag);
2704   locked_verify();
2705 }
2706 
2707 void ChunkManager::locked_verify() {
2708   locked_verify_free_chunks_count();
2709   locked_verify_free_chunks_total();
2710   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2711     ChunkList* list = free_chunks(i);
2712     if (list != NULL) {
2713       Metachunk* chunk = list->head();
2714       while (chunk) {
2715         DEBUG_ONLY(do_verify_chunk(chunk);)
2716         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2717         chunk = chunk->next();
2718       }
2719     }
2720   }
2721 }
2722 
2723 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2724   assert_lock_strong(SpaceManager::expand_lock());
2725   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2726                 _free_chunks_total, _free_chunks_count);
2727 }
2728 
2729 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2730   assert_lock_strong(SpaceManager::expand_lock());
2731   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2732                 sum_free_chunks(), sum_free_chunks_count());
2733 }
2734 
2735 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2736   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2737          "Bad index: %d", (int)index);
2738 
2739   return &_free_chunks[index];
2740 }
2741 
2742 // These methods that sum the free chunk lists are used in printing
2743 // methods that are used in product builds.
2744 size_t ChunkManager::sum_free_chunks() {
2745   assert_lock_strong(SpaceManager::expand_lock());
2746   size_t result = 0;
2747   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2748     ChunkList* list = free_chunks(i);
2749 
2750     if (list == NULL) {
2751       continue;
2752     }
2753 
2754     result = result + list->count() * list->size();
2755   }
2756   result = result + humongous_dictionary()->total_size();
2757   return result;
2758 }
2759 
2760 size_t ChunkManager::sum_free_chunks_count() {
2761   assert_lock_strong(SpaceManager::expand_lock());
2762   size_t count = 0;
2763   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2764     ChunkList* list = free_chunks(i);
2765     if (list == NULL) {
2766       continue;
2767     }
2768     count = count + list->count();
2769   }
2770   count = count + humongous_dictionary()->total_free_blocks();
2771   return count;
2772 }
2773 
2774 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2775   ChunkIndex index = list_index(word_size);
2776   assert(index < HumongousIndex, "No humongous list");
2777   return free_chunks(index);
2778 }
2779 
2780 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2781 // split up the larger chunk into n smaller chunks, at least one of which should be
2782 // the target chunk of target chunk size. The smaller chunks, including the target
2783 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2784 // Note that this chunk is supposed to be removed from the freelist right away.
2785 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2786   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2787 
2788   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2789   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2790 
2791   MetaWord* const region_start = (MetaWord*)larger_chunk;
2792   const size_t region_word_len = larger_chunk->word_size();
2793   MetaWord* const region_end = region_start + region_word_len;
2794   VirtualSpaceNode* const vsn = larger_chunk->container();
2795   OccupancyMap* const ocmap = vsn->occupancy_map();
2796 
2797   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2798   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2799   // at an address suitable to place the smaller target chunk.
2800   assert_is_aligned(region_start, target_chunk_word_size);
2801 
2802   // Remove old chunk.
2803   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2804   larger_chunk->remove_sentinel();
2805 
2806   // Prevent access to the old chunk from here on.
2807   larger_chunk = NULL;
2808   // ... and wipe it.
2809   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2810 
2811   // In its place create first the target chunk...
2812   MetaWord* p = region_start;
2813   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2814   assert(target_chunk == (Metachunk*)p, "Sanity");
2815   target_chunk->set_origin(origin_split);
2816 
2817   // Note: we do not need to mark its start in the occupancy map
2818   // because it coincides with the old chunk start.
2819 
2820   // Mark chunk as free and return to the freelist.
2821   do_update_in_use_info_for_chunk(target_chunk, false);
2822   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2823 
2824   // This chunk should now be valid and can be verified.
2825   DEBUG_ONLY(do_verify_chunk(target_chunk));
2826 
2827   // In the remaining space create the remainder chunks.
2828   p += target_chunk->word_size();
2829   assert(p < region_end, "Sanity");
2830 
2831   while (p < region_end) {
2832 
2833     // Find the largest chunk size which fits the alignment requirements at address p.
2834     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2835     size_t this_chunk_word_size = 0;
2836     for(;;) {
2837       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2838       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2839         break;
2840       } else {
2841         this_chunk_index = prev_chunk_index(this_chunk_index);
2842         assert(this_chunk_index >= target_chunk_index, "Sanity");
2843       }
2844     }
2845 
2846     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2847     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2848     assert(p + this_chunk_word_size <= region_end, "Sanity");
2849 
2850     // Create splitting chunk.
2851     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2852     assert(this_chunk == (Metachunk*)p, "Sanity");
2853     this_chunk->set_origin(origin_split);
2854     ocmap->set_chunk_starts_at_address(p, true);
2855     do_update_in_use_info_for_chunk(this_chunk, false);
2856 
2857     // This chunk should be valid and can be verified.
2858     DEBUG_ONLY(do_verify_chunk(this_chunk));
2859 
2860     // Return this chunk to freelist and correct counter.
2861     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2862     _free_chunks_count ++;
2863 
2864     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2865       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2866       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2867       p2i(region_start), p2i(region_end));
2868 
2869     p += this_chunk_word_size;
2870 
2871   }
2872 
2873   return target_chunk;
2874 }
2875 
2876 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2877   assert_lock_strong(SpaceManager::expand_lock());
2878 
2879   slow_locked_verify();
2880 
2881   Metachunk* chunk = NULL;
2882   bool we_did_split_a_chunk = false;
2883 
2884   if (list_index(word_size) != HumongousIndex) {
2885 
2886     ChunkList* free_list = find_free_chunks_list(word_size);
2887     assert(free_list != NULL, "Sanity check");
2888 
2889     chunk = free_list->head();
2890 
2891     if (chunk == NULL) {
2892       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2893       // This is the counterpart of the coalescing-upon-chunk-return.
2894 
2895       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2896 
2897       // Is there a larger chunk we could split?
2898       Metachunk* larger_chunk = NULL;
2899       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2900       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2901         larger_chunk = free_chunks(larger_chunk_index)->head();
2902         if (larger_chunk == NULL) {
2903           larger_chunk_index = next_chunk_index(larger_chunk_index);
2904         }
2905       }
2906 
2907       if (larger_chunk != NULL) {
2908         assert(larger_chunk->word_size() > word_size, "Sanity");
2909         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2910 
2911         // We found a larger chunk. Lets split it up:
2912         // - remove old chunk
2913         // - in its place, create new smaller chunks, with at least one chunk
2914         //   being of target size, the others sized as large as possible. This
2915         //   is to make sure the resulting chunks are "as coalesced as possible"
2916         //   (similar to VirtualSpaceNode::retire()).
2917         // Note: during this operation both ChunkManager and VirtualSpaceNode
2918         //  are temporarily invalid, so be careful with asserts.
2919 
2920         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2921            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2922           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2923           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2924 
2925         chunk = split_chunk(word_size, larger_chunk);
2926 
2927         // This should have worked.
2928         assert(chunk != NULL, "Sanity");
2929         assert(chunk->word_size() == word_size, "Sanity");
2930         assert(chunk->is_tagged_free(), "Sanity");
2931 
2932         we_did_split_a_chunk = true;
2933 
2934       }
2935     }
2936 
2937     if (chunk == NULL) {
2938       return NULL;
2939     }
2940 
2941     // Remove the chunk as the head of the list.
2942     free_list->remove_chunk(chunk);
2943 
2944     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2945                                        p2i(free_list), free_list->count());
2946 
2947   } else {
2948     chunk = humongous_dictionary()->get_chunk(word_size);
2949 
2950     if (chunk == NULL) {
2951       return NULL;
2952     }
2953 
2954     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2955                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2956   }
2957 
2958   // Chunk has been removed from the chunk manager; update counters.
2959   account_for_removed_chunk(chunk);
2960   do_update_in_use_info_for_chunk(chunk, true);
2961   chunk->container()->inc_container_count();
2962   chunk->inc_use_count();
2963 
2964   // Remove it from the links to this freelist
2965   chunk->set_next(NULL);
2966   chunk->set_prev(NULL);
2967 
2968   // Run some verifications (some more if we did a chunk split)
2969 #ifdef ASSERT
2970   locked_verify();
2971   VirtualSpaceNode* const vsn = chunk->container();
2972   vsn->verify();
2973   if (we_did_split_a_chunk) {
2974     vsn->verify_free_chunks_are_ideally_merged();
2975   }
2976 #endif
2977 
2978   return chunk;
2979 }
2980 
2981 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2982   assert_lock_strong(SpaceManager::expand_lock());
2983   slow_locked_verify();
2984 
2985   // Take from the beginning of the list
2986   Metachunk* chunk = free_chunks_get(word_size);
2987   if (chunk == NULL) {
2988     return NULL;
2989   }
2990 
2991   assert((word_size <= chunk->word_size()) ||
2992          (list_index(chunk->word_size()) == HumongousIndex),
2993          "Non-humongous variable sized chunk");
2994   LogTarget(Debug, gc, metaspace, freelist) lt;
2995   if (lt.is_enabled()) {
2996     size_t list_count;
2997     if (list_index(word_size) < HumongousIndex) {
2998       ChunkList* list = find_free_chunks_list(word_size);
2999       list_count = list->count();
3000     } else {
3001       list_count = humongous_dictionary()->total_count();
3002     }
3003     LogStream ls(lt);
3004     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3005              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3006     ResourceMark rm;
3007     locked_print_free_chunks(&ls);
3008   }
3009 
3010   return chunk;
3011 }
3012 
3013 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3014   assert_lock_strong(SpaceManager::expand_lock());
3015   DEBUG_ONLY(do_verify_chunk(chunk);)
3016   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3017   assert(chunk != NULL, "Expected chunk.");
3018   assert(chunk->container() != NULL, "Container should have been set.");
3019   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3020   index_bounds_check(index);
3021 
3022   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3023   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3024   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3025   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3026 
3027   if (index != HumongousIndex) {
3028     // Return non-humongous chunk to freelist.
3029     ChunkList* list = free_chunks(index);
3030     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3031     list->return_chunk_at_head(chunk);
3032     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3033         chunk_size_name(index), p2i(chunk));
3034   } else {
3035     // Return humongous chunk to dictionary.
3036     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3037     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3038            "Humongous chunk has wrong alignment.");
3039     _humongous_dictionary.return_chunk(chunk);
3040     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3041         chunk_size_name(index), p2i(chunk), chunk->word_size());
3042   }
3043   chunk->container()->dec_container_count();
3044   do_update_in_use_info_for_chunk(chunk, false);
3045 
3046   // Chunk has been added; update counters.
3047   account_for_added_chunk(chunk);
3048 
3049   // Attempt coalesce returned chunks with its neighboring chunks:
3050   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3051   if (index == SmallIndex || index == SpecializedIndex) {
3052     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3053       // This did not work. But if this chunk is special, we still may form a small chunk?
3054       if (index == SpecializedIndex) {
3055         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3056           // give up.
3057         }
3058       }
3059     }
3060   }
3061 
3062 }
3063 
3064 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3065   index_bounds_check(index);
3066   if (chunks == NULL) {
3067     return;
3068   }
3069   LogTarget(Trace, gc, metaspace, freelist) log;
3070   if (log.is_enabled()) { // tracing
3071     log.print("returning list of %s chunks...", chunk_size_name(index));
3072   }
3073   unsigned num_chunks_returned = 0;
3074   size_t size_chunks_returned = 0;
3075   Metachunk* cur = chunks;
3076   while (cur != NULL) {
3077     // Capture the next link before it is changed
3078     // by the call to return_chunk_at_head();
3079     Metachunk* next = cur->next();
3080     if (log.is_enabled()) { // tracing
3081       num_chunks_returned ++;
3082       size_chunks_returned += cur->word_size();
3083     }
3084     return_single_chunk(index, cur);
3085     cur = next;
3086   }
3087   if (log.is_enabled()) { // tracing
3088     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3089         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3090     if (index != HumongousIndex) {
3091       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3092     } else {
3093       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3094     }
3095   }
3096 }
3097 
3098 void ChunkManager::print_on(outputStream* out) const {
3099   _humongous_dictionary.report_statistics(out);
3100 }
3101 
3102 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3103   assert_lock_strong(SpaceManager::expand_lock());
3104   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3105     stat->num_by_type[i] = num_free_chunks(i);
3106     stat->single_size_by_type[i] = size_by_index(i);
3107     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3108   }
3109   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3110   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3111 }
3112 
3113 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3114   MutexLockerEx cl(SpaceManager::expand_lock(),
3115                    Mutex::_no_safepoint_check_flag);
3116   locked_get_statistics(stat);
3117 }
3118 
3119 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3120   size_t total = 0;
3121   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3122 
3123   const char* unit = scale_unit(scale);
3124   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3125     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3126                    stat->num_by_type[i], chunk_size_name(i),
3127                    stat->single_size_by_type[i]);
3128     if (scale == 1) {
3129       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3130     } else {
3131       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3132     }
3133 
3134     total += stat->total_size_by_type[i];
3135   }
3136 
3137 
3138   total += stat->total_size_humongous_chunks;
3139 
3140   if (scale == 1) {
3141     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3142     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3143 
3144     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3145   } else {
3146     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3147     stat->num_humongous_chunks,
3148     (float)stat->total_size_humongous_chunks / scale, unit);
3149 
3150     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3151   }
3152 
3153 }
3154 
3155 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3156   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3157 
3158   // Note: keep lock protection only to retrieving statistics; keep printing
3159   // out of lock protection
3160   ChunkManagerStatistics stat;
3161   out->print_cr("Chunkmanager (non-class):");
3162   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3163   if (non_class_cm != NULL) {
3164     non_class_cm->get_statistics(&stat);
3165     ChunkManager::print_statistics(&stat, out, scale);
3166   } else {
3167     out->print_cr("unavailable.");
3168   }
3169   out->print_cr("Chunkmanager (class):");
3170   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3171   if (class_cm != NULL) {
3172     class_cm->get_statistics(&stat);
3173     ChunkManager::print_statistics(&stat, out, scale);
3174   } else {
3175     out->print_cr("unavailable.");
3176   }
3177 }
3178 
3179 // SpaceManager methods
3180 
3181 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3182   size_t chunk_sizes[] = {
3183       specialized_chunk_size(is_class_space),
3184       small_chunk_size(is_class_space),
3185       medium_chunk_size(is_class_space)
3186   };
3187 
3188   // Adjust up to one of the fixed chunk sizes ...
3189   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3190     if (requested <= chunk_sizes[i]) {
3191       return chunk_sizes[i];
3192     }
3193   }
3194 
3195   // ... or return the size as a humongous chunk.
3196   return requested;
3197 }
3198 
3199 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3200   return adjust_initial_chunk_size(requested, is_class());
3201 }
3202 
3203 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3204   size_t requested;
3205 
3206   if (is_class()) {
3207     switch (type) {
3208     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3209     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3210     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3211     default:                                 requested = ClassSmallChunk; break;
3212     }
3213   } else {
3214     switch (type) {
3215     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3216     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3217     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3218     default:                                 requested = SmallChunk; break;
3219     }
3220   }
3221 
3222   // Adjust to one of the fixed chunk sizes (unless humongous)
3223   const size_t adjusted = adjust_initial_chunk_size(requested);
3224 
3225   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3226          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3227 
3228   return adjusted;
3229 }
3230 
3231 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3232   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3233   size_t free = 0;
3234   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3235     Metachunk* chunk = chunks_in_use(i);
3236     while (chunk != NULL) {
3237       free += chunk->free_word_size();
3238       chunk = chunk->next();
3239     }
3240   }
3241   return free;
3242 }
3243 
3244 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3245   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3246   size_t result = 0;
3247   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3248    result += sum_waste_in_chunks_in_use(i);
3249   }
3250 
3251   return result;
3252 }
3253 
3254 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3255   size_t result = 0;
3256   Metachunk* chunk = chunks_in_use(index);
3257   // Count the free space in all the chunk but not the
3258   // current chunk from which allocations are still being done.
3259   while (chunk != NULL) {
3260     if (chunk != current_chunk()) {
3261       result += chunk->free_word_size();
3262     }
3263     chunk = chunk->next();
3264   }
3265   return result;
3266 }
3267 
3268 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3269   // For CMS use "allocated_chunks_words()" which does not need the
3270   // Metaspace lock.  For the other collectors sum over the
3271   // lists.  Use both methods as a check that "allocated_chunks_words()"
3272   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3273   // to use in the product and allocated_chunks_words() should be used
3274   // but allow for  checking that allocated_chunks_words() returns the same
3275   // value as sum_capacity_in_chunks_in_use() which is the definitive
3276   // answer.
3277   if (UseConcMarkSweepGC) {
3278     return allocated_chunks_words();
3279   } else {
3280     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3281     size_t sum = 0;
3282     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3283       Metachunk* chunk = chunks_in_use(i);
3284       while (chunk != NULL) {
3285         sum += chunk->word_size();
3286         chunk = chunk->next();
3287       }
3288     }
3289   return sum;
3290   }
3291 }
3292 
3293 size_t SpaceManager::sum_count_in_chunks_in_use() {
3294   size_t count = 0;
3295   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3296     count = count + sum_count_in_chunks_in_use(i);
3297   }
3298 
3299   return count;
3300 }
3301 
3302 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3303   size_t count = 0;
3304   Metachunk* chunk = chunks_in_use(i);
3305   while (chunk != NULL) {
3306     count++;
3307     chunk = chunk->next();
3308   }
3309   return count;
3310 }
3311 
3312 
3313 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3314   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3315   size_t used = 0;
3316   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3317     Metachunk* chunk = chunks_in_use(i);
3318     while (chunk != NULL) {
3319       used += chunk->used_word_size();
3320       chunk = chunk->next();
3321     }
3322   }
3323   return used;
3324 }
3325 
3326 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3327 
3328   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3329     Metachunk* chunk = chunks_in_use(i);
3330     st->print("SpaceManager: %s " PTR_FORMAT,
3331                  chunk_size_name(i), p2i(chunk));
3332     if (chunk != NULL) {
3333       st->print_cr(" free " SIZE_FORMAT,
3334                    chunk->free_word_size());
3335     } else {
3336       st->cr();
3337     }
3338   }
3339 
3340   chunk_manager()->locked_print_free_chunks(st);
3341   chunk_manager()->locked_print_sum_free_chunks(st);
3342 }
3343 
3344 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3345 
3346   // Decide between a small chunk and a medium chunk.  Up to
3347   // _small_chunk_limit small chunks can be allocated.
3348   // After that a medium chunk is preferred.
3349   size_t chunk_word_size;
3350 
3351   // Special case for anonymous metadata space.
3352   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3353   // rarely about 4K (64-bits JVM).
3354   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3355   // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
3356   // from 60+% to around 30%.
3357   if (_space_type == Metaspace::AnonymousMetaspaceType &&
3358       _mdtype == Metaspace::NonClassType &&
3359       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
3360       word_size + Metachunk::overhead() <= SpecializedChunk) {
3361     return SpecializedChunk;
3362   }
3363 
3364   if (chunks_in_use(MediumIndex) == NULL &&
3365       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3366     chunk_word_size = (size_t) small_chunk_size();
3367     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3368       chunk_word_size = medium_chunk_size();
3369     }
3370   } else {
3371     chunk_word_size = medium_chunk_size();
3372   }
3373 
3374   // Might still need a humongous chunk.  Enforce
3375   // humongous allocations sizes to be aligned up to
3376   // the smallest chunk size.
3377   size_t if_humongous_sized_chunk =
3378     align_up(word_size + Metachunk::overhead(),
3379                   smallest_chunk_size());
3380   chunk_word_size =
3381     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3382 
3383   assert(!SpaceManager::is_humongous(word_size) ||
3384          chunk_word_size == if_humongous_sized_chunk,
3385          "Size calculation is wrong, word_size " SIZE_FORMAT
3386          " chunk_word_size " SIZE_FORMAT,
3387          word_size, chunk_word_size);
3388   Log(gc, metaspace, alloc) log;
3389   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3390     log.debug("Metadata humongous allocation:");
3391     log.debug("  word_size " PTR_FORMAT, word_size);
3392     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3393     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3394   }
3395   return chunk_word_size;
3396 }
3397 
3398 void SpaceManager::track_metaspace_memory_usage() {
3399   if (is_init_completed()) {
3400     if (is_class()) {
3401       MemoryService::track_compressed_class_memory_usage();
3402     }
3403     MemoryService::track_metaspace_memory_usage();
3404   }
3405 }
3406 
3407 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3408   assert(vs_list()->current_virtual_space() != NULL,
3409          "Should have been set");
3410   assert(current_chunk() == NULL ||
3411          current_chunk()->allocate(word_size) == NULL,
3412          "Don't need to expand");
3413   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3414 
3415   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3416     size_t words_left = 0;
3417     size_t words_used = 0;
3418     if (current_chunk() != NULL) {
3419       words_left = current_chunk()->free_word_size();
3420       words_used = current_chunk()->used_word_size();
3421     }
3422     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3423                                        word_size, words_used, words_left);
3424   }
3425 
3426   // Get another chunk
3427   size_t chunk_word_size = calc_chunk_size(word_size);
3428   Metachunk* next = get_new_chunk(chunk_word_size);
3429 
3430   MetaWord* mem = NULL;
3431 
3432   // If a chunk was available, add it to the in-use chunk list
3433   // and do an allocation from it.
3434   if (next != NULL) {
3435     // Add to this manager's list of chunks in use.
3436     add_chunk(next, false);
3437     mem = next->allocate(word_size);
3438   }
3439 
3440   // Track metaspace memory usage statistic.
3441   track_metaspace_memory_usage();
3442 
3443   return mem;
3444 }
3445 
3446 void SpaceManager::print_on(outputStream* st) const {
3447 
3448   for (ChunkIndex i = ZeroIndex;
3449        i < NumberOfInUseLists ;
3450        i = next_chunk_index(i) ) {
3451     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3452                  p2i(chunks_in_use(i)),
3453                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3454   }
3455   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3456                " Humongous " SIZE_FORMAT,
3457                sum_waste_in_chunks_in_use(SmallIndex),
3458                sum_waste_in_chunks_in_use(MediumIndex),
3459                sum_waste_in_chunks_in_use(HumongousIndex));
3460   // block free lists
3461   if (block_freelists() != NULL) {
3462     st->print_cr("total in block free lists " SIZE_FORMAT,
3463       block_freelists()->total_size());
3464   }
3465 }
3466 
3467 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3468                            Metaspace::MetaspaceType space_type,
3469                            Mutex* lock) :
3470   _mdtype(mdtype),
3471   _space_type(space_type),
3472   _allocated_blocks_words(0),
3473   _allocated_chunks_words(0),
3474   _allocated_chunks_count(0),
3475   _block_freelists(NULL),
3476   _lock(lock)
3477 {
3478   initialize();
3479 }
3480 
3481 void SpaceManager::inc_size_metrics(size_t words) {
3482   assert_lock_strong(SpaceManager::expand_lock());
3483   // Total of allocated Metachunks and allocated Metachunks count
3484   // for each SpaceManager
3485   _allocated_chunks_words = _allocated_chunks_words + words;
3486   _allocated_chunks_count++;
3487   // Global total of capacity in allocated Metachunks
3488   MetaspaceUtils::inc_capacity(mdtype(), words);
3489   // Global total of allocated Metablocks.
3490   // used_words_slow() includes the overhead in each
3491   // Metachunk so include it in the used when the
3492   // Metachunk is first added (so only added once per
3493   // Metachunk).
3494   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3495 }
3496 
3497 void SpaceManager::inc_used_metrics(size_t words) {
3498   // Add to the per SpaceManager total
3499   Atomic::add(words, &_allocated_blocks_words);
3500   // Add to the global total
3501   MetaspaceUtils::inc_used(mdtype(), words);
3502 }
3503 
3504 void SpaceManager::dec_total_from_size_metrics() {
3505   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3506   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3507   // Also deduct the overhead per Metachunk
3508   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3509 }
3510 
3511 void SpaceManager::initialize() {
3512   Metadebug::init_allocation_fail_alot_count();
3513   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3514     _chunks_in_use[i] = NULL;
3515   }
3516   _current_chunk = NULL;
3517   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3518 }
3519 
3520 SpaceManager::~SpaceManager() {
3521   // This call this->_lock which can't be done while holding expand_lock()
3522   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3523          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3524          " allocated_chunks_words() " SIZE_FORMAT,
3525          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3526 
3527   MutexLockerEx fcl(SpaceManager::expand_lock(),
3528                     Mutex::_no_safepoint_check_flag);
3529 
3530   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3531          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3532          " allocated_chunks_count() " SIZE_FORMAT,
3533          sum_count_in_chunks_in_use(), allocated_chunks_count());
3534 
3535   chunk_manager()->slow_locked_verify();
3536 
3537   dec_total_from_size_metrics();
3538 
3539   Log(gc, metaspace, freelist) log;
3540   if (log.is_trace()) {
3541     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3542     ResourceMark rm;
3543     LogStream ls(log.trace());
3544     locked_print_chunks_in_use_on(&ls);
3545     if (block_freelists() != NULL) {
3546       block_freelists()->print_on(&ls);
3547     }
3548   }
3549 
3550   // Add all the chunks in use by this space manager
3551   // to the global list of free chunks.
3552 
3553   // Follow each list of chunks-in-use and add them to the
3554   // free lists.  Each list is NULL terminated.
3555 
3556   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3557     Metachunk* chunks = chunks_in_use(i);
3558     chunk_manager()->return_chunk_list(i, chunks);
3559     set_chunks_in_use(i, NULL);
3560   }
3561 
3562   chunk_manager()->slow_locked_verify();
3563 
3564   if (_block_freelists != NULL) {
3565     delete _block_freelists;
3566   }
3567 }
3568 
3569 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3570   assert_lock_strong(_lock);
3571   // Allocations and deallocations are in raw_word_size
3572   size_t raw_word_size = get_allocation_word_size(word_size);
3573   // Lazily create a block_freelist
3574   if (block_freelists() == NULL) {
3575     _block_freelists = new BlockFreelist();
3576   }
3577   block_freelists()->return_block(p, raw_word_size);
3578 }
3579 
3580 // Adds a chunk to the list of chunks in use.
3581 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3582 
3583   assert(new_chunk != NULL, "Should not be NULL");
3584   assert(new_chunk->next() == NULL, "Should not be on a list");
3585 
3586   new_chunk->reset_empty();
3587 
3588   // Find the correct list and and set the current
3589   // chunk for that list.
3590   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3591 
3592   if (index != HumongousIndex) {
3593     retire_current_chunk();
3594     set_current_chunk(new_chunk);
3595     new_chunk->set_next(chunks_in_use(index));
3596     set_chunks_in_use(index, new_chunk);
3597   } else {
3598     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3599     // small, so small will be null.  Link this first chunk as the current
3600     // chunk.
3601     if (make_current) {
3602       // Set as the current chunk but otherwise treat as a humongous chunk.
3603       set_current_chunk(new_chunk);
3604     }
3605     // Link at head.  The _current_chunk only points to a humongous chunk for
3606     // the null class loader metaspace (class and data virtual space managers)
3607     // any humongous chunks so will not point to the tail
3608     // of the humongous chunks list.
3609     new_chunk->set_next(chunks_in_use(HumongousIndex));
3610     set_chunks_in_use(HumongousIndex, new_chunk);
3611 
3612     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3613   }
3614 
3615   // Add to the running sum of capacity
3616   inc_size_metrics(new_chunk->word_size());
3617 
3618   assert(new_chunk->is_empty(), "Not ready for reuse");
3619   Log(gc, metaspace, freelist) log;
3620   if (log.is_trace()) {
3621     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3622     ResourceMark rm;
3623     LogStream ls(log.trace());
3624     new_chunk->print_on(&ls);
3625     chunk_manager()->locked_print_free_chunks(&ls);
3626   }
3627 }
3628 
3629 void SpaceManager::retire_current_chunk() {
3630   if (current_chunk() != NULL) {
3631     size_t remaining_words = current_chunk()->free_word_size();
3632     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3633       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3634       deallocate(ptr, remaining_words);
3635       inc_used_metrics(remaining_words);
3636     }
3637   }
3638 }
3639 
3640 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3641   // Get a chunk from the chunk freelist
3642   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3643 
3644   if (next == NULL) {
3645     next = vs_list()->get_new_chunk(chunk_word_size,
3646                                     medium_chunk_bunch());
3647   }
3648 
3649   Log(gc, metaspace, alloc) log;
3650   if (log.is_debug() && next != NULL &&
3651       SpaceManager::is_humongous(next->word_size())) {
3652     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3653   }
3654 
3655   return next;
3656 }
3657 
3658 MetaWord* SpaceManager::allocate(size_t word_size) {
3659   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3660   size_t raw_word_size = get_allocation_word_size(word_size);
3661   BlockFreelist* fl =  block_freelists();
3662   MetaWord* p = NULL;
3663   // Allocation from the dictionary is expensive in the sense that
3664   // the dictionary has to be searched for a size.  Don't allocate
3665   // from the dictionary until it starts to get fat.  Is this
3666   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3667   // for allocations.  Do some profiling.  JJJ
3668   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3669     p = fl->get_block(raw_word_size);
3670   }
3671   if (p == NULL) {
3672     p = allocate_work(raw_word_size);
3673   }
3674 
3675   return p;
3676 }
3677 
3678 // Returns the address of spaced allocated for "word_size".
3679 // This methods does not know about blocks (Metablocks)
3680 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3681   assert_lock_strong(_lock);
3682 #ifdef ASSERT
3683   if (Metadebug::test_metadata_failure()) {
3684     return NULL;
3685   }
3686 #endif
3687   // Is there space in the current chunk?
3688   MetaWord* result = NULL;
3689 
3690   if (current_chunk() != NULL) {
3691     result = current_chunk()->allocate(word_size);
3692   }
3693 
3694   if (result == NULL) {
3695     result = grow_and_allocate(word_size);
3696   }
3697 
3698   if (result != NULL) {
3699     inc_used_metrics(word_size);
3700     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3701            "Head of the list is being allocated");
3702   }
3703 
3704   return result;
3705 }
3706 
3707 void SpaceManager::verify() {
3708   // If there are blocks in the dictionary, then
3709   // verification of chunks does not work since
3710   // being in the dictionary alters a chunk.
3711   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
3712     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3713       Metachunk* curr = chunks_in_use(i);
3714       while (curr != NULL) {
3715         DEBUG_ONLY(do_verify_chunk(curr);)
3716         assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3717         curr = curr->next();
3718       }
3719     }
3720   }
3721 }
3722 
3723 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3724   assert(is_humongous(chunk->word_size()) ||
3725          chunk->word_size() == medium_chunk_size() ||
3726          chunk->word_size() == small_chunk_size() ||
3727          chunk->word_size() == specialized_chunk_size(),
3728          "Chunk size is wrong");
3729   return;
3730 }
3731 
3732 #ifdef ASSERT
3733 void SpaceManager::verify_allocated_blocks_words() {
3734   // Verification is only guaranteed at a safepoint.
3735   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3736     "Verification can fail if the applications is running");
3737   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3738          "allocation total is not consistent " SIZE_FORMAT
3739          " vs " SIZE_FORMAT,
3740          allocated_blocks_words(), sum_used_in_chunks_in_use());
3741 }
3742 
3743 #endif
3744 
3745 void SpaceManager::dump(outputStream* const out) const {
3746   size_t curr_total = 0;
3747   size_t waste = 0;
3748   uint i = 0;
3749   size_t used = 0;
3750   size_t capacity = 0;
3751 
3752   // Add up statistics for all chunks in this SpaceManager.
3753   for (ChunkIndex index = ZeroIndex;
3754        index < NumberOfInUseLists;
3755        index = next_chunk_index(index)) {
3756     for (Metachunk* curr = chunks_in_use(index);
3757          curr != NULL;
3758          curr = curr->next()) {
3759       out->print("%d) ", i++);
3760       curr->print_on(out);
3761       curr_total += curr->word_size();
3762       used += curr->used_word_size();
3763       capacity += curr->word_size();
3764       waste += curr->free_word_size() + curr->overhead();;
3765     }
3766   }
3767 
3768   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3769     if (block_freelists() != NULL) block_freelists()->print_on(out);
3770   }
3771 
3772   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3773   // Free space isn't wasted.
3774   waste -= free;
3775 
3776   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3777                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3778                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3779 }
3780 
3781 // MetaspaceUtils
3782 
3783 
3784 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3785 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3786 
3787 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3788   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3789   return list == NULL ? 0 : list->free_bytes();
3790 }
3791 
3792 size_t MetaspaceUtils::free_bytes() {
3793   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3794 }
3795 
3796 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3797   assert_lock_strong(SpaceManager::expand_lock());
3798   assert(words <= capacity_words(mdtype),
3799          "About to decrement below 0: words " SIZE_FORMAT
3800          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3801          words, mdtype, capacity_words(mdtype));
3802   _capacity_words[mdtype] -= words;
3803 }
3804 
3805 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3806   assert_lock_strong(SpaceManager::expand_lock());
3807   // Needs to be atomic
3808   _capacity_words[mdtype] += words;
3809 }
3810 
3811 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3812   assert(words <= used_words(mdtype),
3813          "About to decrement below 0: words " SIZE_FORMAT
3814          " is greater than _used_words[%u] " SIZE_FORMAT,
3815          words, mdtype, used_words(mdtype));
3816   // For CMS deallocation of the Metaspaces occurs during the
3817   // sweep which is a concurrent phase.  Protection by the expand_lock()
3818   // is not enough since allocation is on a per Metaspace basis
3819   // and protected by the Metaspace lock.
3820   Atomic::sub(words, &_used_words[mdtype]);
3821 }
3822 
3823 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3824   // _used_words tracks allocations for
3825   // each piece of metadata.  Those allocations are
3826   // generally done concurrently by different application
3827   // threads so must be done atomically.
3828   Atomic::add(words, &_used_words[mdtype]);
3829 }
3830 
3831 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3832   size_t used = 0;
3833   ClassLoaderDataGraphMetaspaceIterator iter;
3834   while (iter.repeat()) {
3835     ClassLoaderMetaspace* msp = iter.get_next();
3836     // Sum allocated_blocks_words for each metaspace
3837     if (msp != NULL) {
3838       used += msp->used_words_slow(mdtype);
3839     }
3840   }
3841   return used * BytesPerWord;
3842 }
3843 
3844 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3845   size_t free = 0;
3846   ClassLoaderDataGraphMetaspaceIterator iter;
3847   while (iter.repeat()) {
3848     ClassLoaderMetaspace* msp = iter.get_next();
3849     if (msp != NULL) {
3850       free += msp->free_words_slow(mdtype);
3851     }
3852   }
3853   return free * BytesPerWord;
3854 }
3855 
3856 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3857   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3858     return 0;
3859   }
3860   // Don't count the space in the freelists.  That space will be
3861   // added to the capacity calculation as needed.
3862   size_t capacity = 0;
3863   ClassLoaderDataGraphMetaspaceIterator iter;
3864   while (iter.repeat()) {
3865     ClassLoaderMetaspace* msp = iter.get_next();
3866     if (msp != NULL) {
3867       capacity += msp->capacity_words_slow(mdtype);
3868     }
3869   }
3870   return capacity * BytesPerWord;
3871 }
3872 
3873 size_t MetaspaceUtils::capacity_bytes_slow() {
3874 #ifdef PRODUCT
3875   // Use capacity_bytes() in PRODUCT instead of this function.
3876   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3877 #endif
3878   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3879   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3880   assert(capacity_bytes() == class_capacity + non_class_capacity,
3881          "bad accounting: capacity_bytes() " SIZE_FORMAT
3882          " class_capacity + non_class_capacity " SIZE_FORMAT
3883          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3884          capacity_bytes(), class_capacity + non_class_capacity,
3885          class_capacity, non_class_capacity);
3886 
3887   return class_capacity + non_class_capacity;
3888 }
3889 
3890 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3891   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3892   return list == NULL ? 0 : list->reserved_bytes();
3893 }
3894 
3895 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3896   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3897   return list == NULL ? 0 : list->committed_bytes();
3898 }
3899 
3900 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3901 
3902 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3903   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3904   if (chunk_manager == NULL) {
3905     return 0;
3906   }
3907   chunk_manager->slow_verify();
3908   return chunk_manager->free_chunks_total_words();
3909 }
3910 
3911 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3912   return free_chunks_total_words(mdtype) * BytesPerWord;
3913 }
3914 
3915 size_t MetaspaceUtils::free_chunks_total_words() {
3916   return free_chunks_total_words(Metaspace::ClassType) +
3917          free_chunks_total_words(Metaspace::NonClassType);
3918 }
3919 
3920 size_t MetaspaceUtils::free_chunks_total_bytes() {
3921   return free_chunks_total_words() * BytesPerWord;
3922 }
3923 
3924 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3925   return Metaspace::get_chunk_manager(mdtype) != NULL;
3926 }
3927 
3928 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3929   if (!has_chunk_free_list(mdtype)) {
3930     return MetaspaceChunkFreeListSummary();
3931   }
3932 
3933   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3934   return cm->chunk_free_list_summary();
3935 }
3936 
3937 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3938   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3939                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3940 }
3941 
3942 void MetaspaceUtils::print_on(outputStream* out) {
3943   Metaspace::MetadataType nct = Metaspace::NonClassType;
3944 
3945   out->print_cr(" Metaspace       "
3946                 "used "      SIZE_FORMAT "K, "
3947                 "capacity "  SIZE_FORMAT "K, "
3948                 "committed " SIZE_FORMAT "K, "
3949                 "reserved "  SIZE_FORMAT "K",
3950                 used_bytes()/K,
3951                 capacity_bytes()/K,
3952                 committed_bytes()/K,
3953                 reserved_bytes()/K);
3954 
3955   if (Metaspace::using_class_space()) {
3956     Metaspace::MetadataType ct = Metaspace::ClassType;
3957     out->print_cr("  class space    "
3958                   "used "      SIZE_FORMAT "K, "
3959                   "capacity "  SIZE_FORMAT "K, "
3960                   "committed " SIZE_FORMAT "K, "
3961                   "reserved "  SIZE_FORMAT "K",
3962                   used_bytes(ct)/K,
3963                   capacity_bytes(ct)/K,
3964                   committed_bytes(ct)/K,
3965                   reserved_bytes(ct)/K);
3966   }
3967 }
3968 
3969 // Print information for class space and data space separately.
3970 // This is almost the same as above.
3971 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3972   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3973   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3974   size_t used_bytes = used_bytes_slow(mdtype);
3975   size_t free_bytes = free_bytes_slow(mdtype);
3976   size_t used_and_free = used_bytes + free_bytes +
3977                            free_chunks_capacity_bytes;
3978   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3979              "K + unused in chunks " SIZE_FORMAT "K  + "
3980              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3981              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3982              used_bytes / K,
3983              free_bytes / K,
3984              free_chunks_capacity_bytes / K,
3985              used_and_free / K,
3986              capacity_bytes / K);
3987   // Accounting can only be correct if we got the values during a safepoint
3988   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3989 }
3990 
3991 // Print total fragmentation for class metaspaces
3992 void MetaspaceUtils::print_class_waste(outputStream* out) {
3993   assert(Metaspace::using_class_space(), "class metaspace not used");
3994   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3995   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3996   ClassLoaderDataGraphMetaspaceIterator iter;
3997   while (iter.repeat()) {
3998     ClassLoaderMetaspace* msp = iter.get_next();
3999     if (msp != NULL) {
4000       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4001       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4002       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4003       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
4004       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4005       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
4006       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4007     }
4008   }
4009   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4010                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4011                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4012                 "large count " SIZE_FORMAT,
4013                 cls_specialized_count, cls_specialized_waste,
4014                 cls_small_count, cls_small_waste,
4015                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4016 }
4017 
4018 // Print total fragmentation for data and class metaspaces separately
4019 void MetaspaceUtils::print_waste(outputStream* out) {
4020   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4021   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4022 
4023   ClassLoaderDataGraphMetaspaceIterator iter;
4024   while (iter.repeat()) {
4025     ClassLoaderMetaspace* msp = iter.get_next();
4026     if (msp != NULL) {
4027       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4028       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4029       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4030       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4031       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4032       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4033       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4034     }
4035   }
4036   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4037   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4038                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4039                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4040                         "large count " SIZE_FORMAT,
4041              specialized_count, specialized_waste, small_count,
4042              small_waste, medium_count, medium_waste, humongous_count);
4043   if (Metaspace::using_class_space()) {
4044     print_class_waste(out);
4045   }
4046 }
4047 
4048 class MetadataStats VALUE_OBJ_CLASS_SPEC {
4049 private:
4050   size_t _capacity;
4051   size_t _used;
4052   size_t _free;
4053   size_t _waste;
4054 
4055 public:
4056   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4057   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4058   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4059 
4060   void add(const MetadataStats& stats) {
4061     _capacity += stats.capacity();
4062     _used += stats.used();
4063     _free += stats.free();
4064     _waste += stats.waste();
4065   }
4066 
4067   size_t capacity() const { return _capacity; }
4068   size_t used() const     { return _used; }
4069   size_t free() const     { return _free; }
4070   size_t waste() const    { return _waste; }
4071 
4072   void print_on(outputStream* out, size_t scale) const;
4073 };
4074 
4075 
4076 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4077   const char* unit = scale_unit(scale);
4078   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4079     (float)capacity() / scale, unit,
4080     (float)used() / scale, unit,
4081     (float)free() / scale, unit,
4082     (float)waste() / scale, unit);
4083 }
4084 
4085 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4086 private:
4087   outputStream*  _out;
4088   size_t         _scale;
4089 
4090   size_t         _total_count;
4091   MetadataStats  _total_metadata;
4092   MetadataStats  _total_class;
4093 
4094   size_t         _total_anon_count;
4095   MetadataStats  _total_anon_metadata;
4096   MetadataStats  _total_anon_class;
4097 
4098 public:
4099   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4100   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4101 
4102   ~PrintCLDMetaspaceInfoClosure() {
4103     print_summary();
4104   }
4105 
4106   void do_cld(ClassLoaderData* cld) {
4107     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4108 
4109     if (cld->is_unloading()) return;
4110     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4111     if (msp == NULL) {
4112       return;
4113     }
4114 
4115     bool anonymous = false;
4116     if (cld->is_anonymous()) {
4117       _out->print_cr("ClassLoader: for anonymous class");
4118       anonymous = true;
4119     } else {
4120       ResourceMark rm;
4121       _out->print_cr("ClassLoader: %s", cld->loader_name());
4122     }
4123 
4124     print_metaspace(msp, anonymous);
4125     _out->cr();
4126   }
4127 
4128 private:
4129   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4130   void print_summary() const;
4131 };
4132 
4133 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4134   assert(msp != NULL, "Sanity");
4135   SpaceManager* vsm = msp->vsm();
4136   const char* unit = scale_unit(_scale);
4137 
4138   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4139   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4140   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4141   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4142 
4143   _total_count ++;
4144   MetadataStats metadata_stats(capacity, used, free, waste);
4145   _total_metadata.add(metadata_stats);
4146 
4147   if (anonymous) {
4148     _total_anon_count ++;
4149     _total_anon_metadata.add(metadata_stats);
4150   }
4151 
4152   _out->print("  Metadata   ");
4153   metadata_stats.print_on(_out, _scale);
4154 
4155   if (Metaspace::using_class_space()) {
4156     vsm = msp->class_vsm();
4157 
4158     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4159     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4160     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4161     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4162 
4163     MetadataStats class_stats(capacity, used, free, waste);
4164     _total_class.add(class_stats);
4165 
4166     if (anonymous) {
4167       _total_anon_class.add(class_stats);
4168     }
4169 
4170     _out->print("  Class data ");
4171     class_stats.print_on(_out, _scale);
4172   }
4173 }
4174 
4175 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4176   const char* unit = scale_unit(_scale);
4177   _out->cr();
4178   _out->print_cr("Summary:");
4179 
4180   MetadataStats total;
4181   total.add(_total_metadata);
4182   total.add(_total_class);
4183 
4184   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4185   total.print_on(_out, _scale);
4186 
4187   _out->print("                    Metadata ");
4188   _total_metadata.print_on(_out, _scale);
4189 
4190   if (Metaspace::using_class_space()) {
4191     _out->print("                  Class data ");
4192     _total_class.print_on(_out, _scale);
4193   }
4194   _out->cr();
4195 
4196   MetadataStats total_anon;
4197   total_anon.add(_total_anon_metadata);
4198   total_anon.add(_total_anon_class);
4199 
4200   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4201   total_anon.print_on(_out, _scale);
4202 
4203   _out->print("                    Metadata ");
4204   _total_anon_metadata.print_on(_out, _scale);
4205 
4206   if (Metaspace::using_class_space()) {
4207     _out->print("                  Class data ");
4208     _total_anon_class.print_on(_out, _scale);
4209   }
4210 }
4211 
4212 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4213   const char* unit = scale_unit(scale);
4214   out->print_cr("Metaspaces:");
4215   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4216     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4217     committed_bytes(Metaspace::NonClassType) / scale, unit);
4218   if (Metaspace::using_class_space()) {
4219     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4220     reserved_bytes(Metaspace::ClassType) / scale, unit,
4221     committed_bytes(Metaspace::ClassType) / scale, unit);
4222   }
4223 
4224   out->cr();
4225   ChunkManager::print_all_chunkmanagers(out, scale);
4226 
4227   out->cr();
4228   out->print_cr("Per-classloader metadata:");
4229   out->cr();
4230 
4231   PrintCLDMetaspaceInfoClosure cl(out, scale);
4232   ClassLoaderDataGraph::cld_do(&cl);
4233 }
4234 
4235 
4236 // Dump global metaspace things from the end of ClassLoaderDataGraph
4237 void MetaspaceUtils::dump(outputStream* out) {
4238   out->print_cr("All Metaspace:");
4239   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4240   out->print("class space: "); print_on(out, Metaspace::ClassType);
4241   print_waste(out);
4242 }
4243 
4244 // Prints an ASCII representation of the given space.
4245 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4246   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4247   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4248   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4249   if (vsl != NULL) {
4250     if (for_class) {
4251       if (!Metaspace::using_class_space()) {
4252         out->print_cr("No Class Space.");
4253         return;
4254       }
4255       out->print_raw("---- Metaspace Map (Class Space) ----");
4256     } else {
4257       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4258     }
4259     // Print legend:
4260     out->cr();
4261     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4262     out->cr();
4263     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4264     vsl->print_map(out);
4265     out->cr();
4266   }
4267 }
4268 
4269 void MetaspaceUtils::verify_free_chunks() {
4270   Metaspace::chunk_manager_metadata()->verify();
4271   if (Metaspace::using_class_space()) {
4272     Metaspace::chunk_manager_class()->verify();
4273   }
4274 }
4275 
4276 void MetaspaceUtils::verify_capacity() {
4277 #ifdef ASSERT
4278   size_t running_sum_capacity_bytes = capacity_bytes();
4279   // For purposes of the running sum of capacity, verify against capacity
4280   size_t capacity_in_use_bytes = capacity_bytes_slow();
4281   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4282          "capacity_words() * BytesPerWord " SIZE_FORMAT
4283          " capacity_bytes_slow()" SIZE_FORMAT,
4284          running_sum_capacity_bytes, capacity_in_use_bytes);
4285   for (Metaspace::MetadataType i = Metaspace::ClassType;
4286        i < Metaspace:: MetadataTypeCount;
4287        i = (Metaspace::MetadataType)(i + 1)) {
4288     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4289     assert(capacity_bytes(i) == capacity_in_use_bytes,
4290            "capacity_bytes(%u) " SIZE_FORMAT
4291            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4292            i, capacity_bytes(i), i, capacity_in_use_bytes);
4293   }
4294 #endif
4295 }
4296 
4297 void MetaspaceUtils::verify_used() {
4298 #ifdef ASSERT
4299   size_t running_sum_used_bytes = used_bytes();
4300   // For purposes of the running sum of used, verify against used
4301   size_t used_in_use_bytes = used_bytes_slow();
4302   assert(used_bytes() == used_in_use_bytes,
4303          "used_bytes() " SIZE_FORMAT
4304          " used_bytes_slow()" SIZE_FORMAT,
4305          used_bytes(), used_in_use_bytes);
4306   for (Metaspace::MetadataType i = Metaspace::ClassType;
4307        i < Metaspace:: MetadataTypeCount;
4308        i = (Metaspace::MetadataType)(i + 1)) {
4309     size_t used_in_use_bytes = used_bytes_slow(i);
4310     assert(used_bytes(i) == used_in_use_bytes,
4311            "used_bytes(%u) " SIZE_FORMAT
4312            " used_bytes_slow(%u)" SIZE_FORMAT,
4313            i, used_bytes(i), i, used_in_use_bytes);
4314   }
4315 #endif
4316 }
4317 
4318 void MetaspaceUtils::verify_metrics() {
4319   verify_capacity();
4320   verify_used();
4321 }
4322 
4323 
4324 // Metaspace methods
4325 
4326 size_t Metaspace::_first_chunk_word_size = 0;
4327 size_t Metaspace::_first_class_chunk_word_size = 0;
4328 
4329 size_t Metaspace::_commit_alignment = 0;
4330 size_t Metaspace::_reserve_alignment = 0;
4331 
4332 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4333   initialize(lock, type);
4334 }
4335 
4336 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4337   delete _vsm;
4338   if (Metaspace::using_class_space()) {
4339     delete _class_vsm;
4340   }
4341 }
4342 
4343 VirtualSpaceList* Metaspace::_space_list = NULL;
4344 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4345 
4346 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4347 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4348 
4349 #define VIRTUALSPACEMULTIPLIER 2
4350 
4351 #ifdef _LP64
4352 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4353 
4354 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4355   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4356   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4357   // narrow_klass_base is the lower of the metaspace base and the cds base
4358   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4359   // between the lower base and higher address.
4360   address lower_base;
4361   address higher_address;
4362 #if INCLUDE_CDS
4363   if (UseSharedSpaces) {
4364     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4365                           (address)(metaspace_base + compressed_class_space_size()));
4366     lower_base = MIN2(metaspace_base, cds_base);
4367   } else
4368 #endif
4369   {
4370     higher_address = metaspace_base + compressed_class_space_size();
4371     lower_base = metaspace_base;
4372 
4373     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4374     // If compressed class space fits in lower 32G, we don't need a base.
4375     if (higher_address <= (address)klass_encoding_max) {
4376       lower_base = 0; // Effectively lower base is zero.
4377     }
4378   }
4379 
4380   Universe::set_narrow_klass_base(lower_base);
4381 
4382   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4383   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4384   // how dump time narrow_klass_shift is set. Although, CDS can work
4385   // with zero-shift mode also, to be consistent with AOT it uses
4386   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4387   // can be used at same time as AOT code.
4388   if (!UseSharedSpaces
4389       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4390     Universe::set_narrow_klass_shift(0);
4391   } else {
4392     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4393   }
4394   AOTLoader::set_narrow_klass_shift();
4395 }
4396 
4397 #if INCLUDE_CDS
4398 // Return TRUE if the specified metaspace_base and cds_base are close enough
4399 // to work with compressed klass pointers.
4400 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4401   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4402   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4403   address lower_base = MIN2((address)metaspace_base, cds_base);
4404   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4405                                 (address)(metaspace_base + compressed_class_space_size()));
4406   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4407 }
4408 #endif
4409 
4410 // Try to allocate the metaspace at the requested addr.
4411 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4412   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4413   assert(using_class_space(), "called improperly");
4414   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4415   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4416          "Metaspace size is too big");
4417   assert_is_aligned(requested_addr, _reserve_alignment);
4418   assert_is_aligned(cds_base, _reserve_alignment);
4419   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4420 
4421   // Don't use large pages for the class space.
4422   bool large_pages = false;
4423 
4424 #if !(defined(AARCH64) || defined(AIX))
4425   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4426                                              _reserve_alignment,
4427                                              large_pages,
4428                                              requested_addr);
4429 #else // AARCH64
4430   ReservedSpace metaspace_rs;
4431 
4432   // Our compressed klass pointers may fit nicely into the lower 32
4433   // bits.
4434   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4435     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4436                                  _reserve_alignment,
4437                                  large_pages,
4438                                  requested_addr);
4439   }
4440 
4441   if (! metaspace_rs.is_reserved()) {
4442     // Aarch64: Try to align metaspace so that we can decode a compressed
4443     // klass with a single MOVK instruction.  We can do this iff the
4444     // compressed class base is a multiple of 4G.
4445     // Aix: Search for a place where we can find memory. If we need to load
4446     // the base, 4G alignment is helpful, too.
4447     size_t increment = AARCH64_ONLY(4*)G;
4448     for (char *a = align_up(requested_addr, increment);
4449          a < (char*)(1024*G);
4450          a += increment) {
4451       if (a == (char *)(32*G)) {
4452         // Go faster from here on. Zero-based is no longer possible.
4453         increment = 4*G;
4454       }
4455 
4456 #if INCLUDE_CDS
4457       if (UseSharedSpaces
4458           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4459         // We failed to find an aligned base that will reach.  Fall
4460         // back to using our requested addr.
4461         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4462                                      _reserve_alignment,
4463                                      large_pages,
4464                                      requested_addr);
4465         break;
4466       }
4467 #endif
4468 
4469       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4470                                    _reserve_alignment,
4471                                    large_pages,
4472                                    a);
4473       if (metaspace_rs.is_reserved())
4474         break;
4475     }
4476   }
4477 
4478 #endif // AARCH64
4479 
4480   if (!metaspace_rs.is_reserved()) {
4481 #if INCLUDE_CDS
4482     if (UseSharedSpaces) {
4483       size_t increment = align_up(1*G, _reserve_alignment);
4484 
4485       // Keep trying to allocate the metaspace, increasing the requested_addr
4486       // by 1GB each time, until we reach an address that will no longer allow
4487       // use of CDS with compressed klass pointers.
4488       char *addr = requested_addr;
4489       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4490              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4491         addr = addr + increment;
4492         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4493                                      _reserve_alignment, large_pages, addr);
4494       }
4495     }
4496 #endif
4497     // If no successful allocation then try to allocate the space anywhere.  If
4498     // that fails then OOM doom.  At this point we cannot try allocating the
4499     // metaspace as if UseCompressedClassPointers is off because too much
4500     // initialization has happened that depends on UseCompressedClassPointers.
4501     // So, UseCompressedClassPointers cannot be turned off at this point.
4502     if (!metaspace_rs.is_reserved()) {
4503       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4504                                    _reserve_alignment, large_pages);
4505       if (!metaspace_rs.is_reserved()) {
4506         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4507                                               compressed_class_space_size()));
4508       }
4509     }
4510   }
4511 
4512   // If we got here then the metaspace got allocated.
4513   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4514 
4515 #if INCLUDE_CDS
4516   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4517   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4518     FileMapInfo::stop_sharing_and_unmap(
4519         "Could not allocate metaspace at a compatible address");
4520   }
4521 #endif
4522   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4523                                   UseSharedSpaces ? (address)cds_base : 0);
4524 
4525   initialize_class_space(metaspace_rs);
4526 
4527   LogTarget(Trace, gc, metaspace) lt;
4528   if (lt.is_enabled()) {
4529     ResourceMark rm;
4530     LogStream ls(lt);
4531     print_compressed_class_space(&ls, requested_addr);
4532   }
4533 }
4534 
4535 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4536   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4537                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4538   if (_class_space_list != NULL) {
4539     address base = (address)_class_space_list->current_virtual_space()->bottom();
4540     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4541                  compressed_class_space_size(), p2i(base));
4542     if (requested_addr != 0) {
4543       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4544     }
4545     st->cr();
4546   }
4547 }
4548 
4549 // For UseCompressedClassPointers the class space is reserved above the top of
4550 // the Java heap.  The argument passed in is at the base of the compressed space.
4551 void Metaspace::initialize_class_space(ReservedSpace rs) {
4552   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4553   assert(rs.size() >= CompressedClassSpaceSize,
4554          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4555   assert(using_class_space(), "Must be using class space");
4556   _class_space_list = new VirtualSpaceList(rs);
4557   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4558 
4559   if (!_class_space_list->initialization_succeeded()) {
4560     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4561   }
4562 }
4563 
4564 #endif
4565 
4566 void Metaspace::ergo_initialize() {
4567   if (DumpSharedSpaces) {
4568     // Using large pages when dumping the shared archive is currently not implemented.
4569     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4570   }
4571 
4572   size_t page_size = os::vm_page_size();
4573   if (UseLargePages && UseLargePagesInMetaspace) {
4574     page_size = os::large_page_size();
4575   }
4576 
4577   _commit_alignment  = page_size;
4578   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4579 
4580   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4581   // override if MaxMetaspaceSize was set on the command line or not.
4582   // This information is needed later to conform to the specification of the
4583   // java.lang.management.MemoryUsage API.
4584   //
4585   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4586   // globals.hpp to the aligned value, but this is not possible, since the
4587   // alignment depends on other flags being parsed.
4588   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4589 
4590   if (MetaspaceSize > MaxMetaspaceSize) {
4591     MetaspaceSize = MaxMetaspaceSize;
4592   }
4593 
4594   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4595 
4596   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4597 
4598   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4599   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4600 
4601   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4602 
4603   // Initial virtual space size will be calculated at global_initialize()
4604   size_t min_metaspace_sz =
4605       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4606   if (UseCompressedClassPointers) {
4607     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4608       if (min_metaspace_sz >= MaxMetaspaceSize) {
4609         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4610       } else {
4611         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4612                       MaxMetaspaceSize - min_metaspace_sz);
4613       }
4614     }
4615   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4616     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4617                   min_metaspace_sz);
4618   }
4619 
4620   set_compressed_class_space_size(CompressedClassSpaceSize);
4621 }
4622 
4623 void Metaspace::global_initialize() {
4624   MetaspaceGC::initialize();
4625 
4626 #if INCLUDE_CDS
4627   if (DumpSharedSpaces) {
4628     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4629   } else if (UseSharedSpaces) {
4630     // If any of the archived space fails to map, UseSharedSpaces
4631     // is reset to false. Fall through to the
4632     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4633     // metaspace.
4634     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4635   }
4636 
4637   if (!DumpSharedSpaces && !UseSharedSpaces)
4638 #endif // INCLUDE_CDS
4639   {
4640 #ifdef _LP64
4641     if (using_class_space()) {
4642       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4643       allocate_metaspace_compressed_klass_ptrs(base, 0);
4644     }
4645 #endif // _LP64
4646   }
4647 
4648   // Initialize these before initializing the VirtualSpaceList
4649   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4650   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4651   // Make the first class chunk bigger than a medium chunk so it's not put
4652   // on the medium chunk list.   The next chunk will be small and progress
4653   // from there.  This size calculated by -version.
4654   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4655                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4656   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4657   // Arbitrarily set the initial virtual space to a multiple
4658   // of the boot class loader size.
4659   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4660   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4661 
4662   // Initialize the list of virtual spaces.
4663   _space_list = new VirtualSpaceList(word_size);
4664   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4665 
4666   if (!_space_list->initialization_succeeded()) {
4667     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4668   }
4669 
4670   _tracer = new MetaspaceTracer();
4671 }
4672 
4673 void Metaspace::post_initialize() {
4674   MetaspaceGC::post_initialize();
4675 }
4676 
4677 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4678   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4679   if (chunk != NULL) {
4680     // Add to this manager's list of chunks in use and current_chunk().
4681     get_space_manager(mdtype)->add_chunk(chunk, true);
4682   }
4683 }
4684 
4685 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4686   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4687 
4688   // Get a chunk from the chunk freelist
4689   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4690 
4691   if (chunk == NULL) {
4692     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4693                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4694   }
4695 
4696   return chunk;
4697 }
4698 
4699 void Metaspace::verify_global_initialization() {
4700   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4701   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4702 
4703   if (using_class_space()) {
4704     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4705     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4706   }
4707 }
4708 
4709 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4710   Metaspace::verify_global_initialization();
4711 
4712   // Allocate SpaceManager for metadata objects.
4713   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4714 
4715   if (Metaspace::using_class_space()) {
4716     // Allocate SpaceManager for classes.
4717     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4718   }
4719 
4720   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4721 
4722   // Allocate chunk for metadata objects
4723   initialize_first_chunk(type, Metaspace::NonClassType);
4724 
4725   // Allocate chunk for class metadata objects
4726   if (Metaspace::using_class_space()) {
4727     initialize_first_chunk(type, Metaspace::ClassType);
4728   }
4729 }
4730 
4731 size_t Metaspace::align_word_size_up(size_t word_size) {
4732   size_t byte_size = word_size * wordSize;
4733   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4734 }
4735 
4736 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4737   Metaspace::assert_not_frozen();
4738   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4739   if (Metaspace::is_class_space_allocation(mdtype)) {
4740     return  class_vsm()->allocate(word_size);
4741   } else {
4742     return  vsm()->allocate(word_size);
4743   }
4744 }
4745 
4746 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4747   Metaspace::assert_not_frozen();
4748   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4749   assert(delta_bytes > 0, "Must be");
4750 
4751   size_t before = 0;
4752   size_t after = 0;
4753   MetaWord* res;
4754   bool incremented;
4755 
4756   // Each thread increments the HWM at most once. Even if the thread fails to increment
4757   // the HWM, an allocation is still attempted. This is because another thread must then
4758   // have incremented the HWM and therefore the allocation might still succeed.
4759   do {
4760     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4761     res = allocate(word_size, mdtype);
4762   } while (!incremented && res == NULL);
4763 
4764   if (incremented) {
4765     Metaspace::tracer()->report_gc_threshold(before, after,
4766                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4767     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4768   }
4769 
4770   return res;
4771 }
4772 
4773 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4774   if (mdtype == Metaspace::ClassType) {
4775     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4776   } else {
4777     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4778   }
4779 }
4780 
4781 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4782   Metaspace::assert_not_frozen();
4783   if (mdtype == Metaspace::ClassType) {
4784     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4785   } else {
4786     return vsm()->sum_free_in_chunks_in_use();
4787   }
4788 }
4789 
4790 // Space capacity in the Metaspace.  It includes
4791 // space in the list of chunks from which allocations
4792 // have been made. Don't include space in the global freelist and
4793 // in the space available in the dictionary which
4794 // is already counted in some chunk.
4795 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4796   if (mdtype == Metaspace::ClassType) {
4797     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4798   } else {
4799     return vsm()->sum_capacity_in_chunks_in_use();
4800   }
4801 }
4802 
4803 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4804   return used_words_slow(mdtype) * BytesPerWord;
4805 }
4806 
4807 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4808   return capacity_words_slow(mdtype) * BytesPerWord;
4809 }
4810 
4811 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4812   return vsm()->allocated_blocks_bytes() +
4813       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4814 }
4815 
4816 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4817   return vsm()->allocated_chunks_bytes() +
4818       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4819 }
4820 
4821 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4822   Metaspace::assert_not_frozen();
4823   assert(!SafepointSynchronize::is_at_safepoint()
4824          || Thread::current()->is_VM_thread(), "should be the VM thread");
4825 
4826   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4827 
4828   if (is_class && Metaspace::using_class_space()) {
4829     class_vsm()->deallocate(ptr, word_size);
4830   } else {
4831     vsm()->deallocate(ptr, word_size);
4832   }
4833 }
4834 
4835 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4836                               MetaspaceObj::Type type, TRAPS) {
4837   assert(!_frozen, "sanity");
4838   if (HAS_PENDING_EXCEPTION) {
4839     assert(false, "Should not allocate with exception pending");
4840     return NULL;  // caller does a CHECK_NULL too
4841   }
4842 
4843   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4844         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4845 
4846   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4847 
4848   // Try to allocate metadata.
4849   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4850 
4851   if (result == NULL) {
4852     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4853 
4854     // Allocation failed.
4855     if (is_init_completed()) {
4856       // Only start a GC if the bootstrapping has completed.
4857 
4858       // Try to clean out some memory and retry.
4859       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4860     }
4861   }
4862 
4863   if (result == NULL) {
4864     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4865   }
4866 
4867   // Zero initialize.
4868   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4869 
4870   return result;
4871 }
4872 
4873 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4874   assert(Metaspace::using_class_space(), "Has to use class space");
4875   return class_vsm()->calc_chunk_size(word_size);
4876 }
4877 
4878 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4879   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4880 
4881   // If result is still null, we are out of memory.
4882   Log(gc, metaspace, freelist) log;
4883   if (log.is_info()) {
4884     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4885              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4886     ResourceMark rm;
4887     if (log.is_debug()) {
4888       if (loader_data->metaspace_or_null() != NULL) {
4889         LogStream ls(log.debug());
4890         loader_data->dump(&ls);
4891       }
4892     }
4893     LogStream ls(log.info());
4894     MetaspaceUtils::dump(&ls);
4895     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4896     ChunkManager::print_all_chunkmanagers(&ls);
4897   }
4898 
4899   bool out_of_compressed_class_space = false;
4900   if (is_class_space_allocation(mdtype)) {
4901     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4902     out_of_compressed_class_space =
4903       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4904       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4905       CompressedClassSpaceSize;
4906   }
4907 
4908   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4909   const char* space_string = out_of_compressed_class_space ?
4910     "Compressed class space" : "Metaspace";
4911 
4912   report_java_out_of_memory(space_string);
4913 
4914   if (JvmtiExport::should_post_resource_exhausted()) {
4915     JvmtiExport::post_resource_exhausted(
4916         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4917         space_string);
4918   }
4919 
4920   if (!is_init_completed()) {
4921     vm_exit_during_initialization("OutOfMemoryError", space_string);
4922   }
4923 
4924   if (out_of_compressed_class_space) {
4925     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4926   } else {
4927     THROW_OOP(Universe::out_of_memory_error_metaspace());
4928   }
4929 }
4930 
4931 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4932   switch (mdtype) {
4933     case Metaspace::ClassType: return "Class";
4934     case Metaspace::NonClassType: return "Metadata";
4935     default:
4936       assert(false, "Got bad mdtype: %d", (int) mdtype);
4937       return NULL;
4938   }
4939 }
4940 
4941 void Metaspace::purge(MetadataType mdtype) {
4942   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4943 }
4944 
4945 void Metaspace::purge() {
4946   MutexLockerEx cl(SpaceManager::expand_lock(),
4947                    Mutex::_no_safepoint_check_flag);
4948   purge(NonClassType);
4949   if (using_class_space()) {
4950     purge(ClassType);
4951   }
4952 }
4953 
4954 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4955   // Print both class virtual space counts and metaspace.
4956   if (Verbose) {
4957     vsm()->print_on(out);
4958     if (Metaspace::using_class_space()) {
4959       class_vsm()->print_on(out);
4960     }
4961   }
4962 }
4963 
4964 bool Metaspace::contains(const void* ptr) {
4965   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4966     return true;
4967   }
4968   return contains_non_shared(ptr);
4969 }
4970 
4971 bool Metaspace::contains_non_shared(const void* ptr) {
4972   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4973      return true;
4974   }
4975 
4976   return get_space_list(NonClassType)->contains(ptr);
4977 }
4978 
4979 void ClassLoaderMetaspace::verify() {
4980   vsm()->verify();
4981   if (Metaspace::using_class_space()) {
4982     class_vsm()->verify();
4983   }
4984 }
4985 
4986 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4987   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4988   vsm()->dump(out);
4989   if (Metaspace::using_class_space()) {
4990     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4991     class_vsm()->dump(out);
4992   }
4993 }
4994 
4995 #ifdef ASSERT
4996 static void do_verify_chunk(Metachunk* chunk) {
4997   guarantee(chunk != NULL, "Sanity");
4998   // Verify chunk itself; then verify that it is consistent with the
4999   // occupany map of its containing node.
5000   chunk->verify();
5001   VirtualSpaceNode* const vsn = chunk->container();
5002   OccupancyMap* const ocmap = vsn->occupancy_map();
5003   ocmap->verify_for_chunk(chunk);
5004 }
5005 #endif
5006 
5007 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5008   chunk->set_is_tagged_free(!inuse);
5009   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5010   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5011 }
5012 
5013 /////////////// Unit tests ///////////////
5014 
5015 #ifndef PRODUCT
5016 
5017 class TestMetaspaceUtilsTest : AllStatic {
5018  public:
5019   static void test_reserved() {
5020     size_t reserved = MetaspaceUtils::reserved_bytes();
5021 
5022     assert(reserved > 0, "assert");
5023 
5024     size_t committed  = MetaspaceUtils::committed_bytes();
5025     assert(committed <= reserved, "assert");
5026 
5027     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5028     assert(reserved_metadata > 0, "assert");
5029     assert(reserved_metadata <= reserved, "assert");
5030 
5031     if (UseCompressedClassPointers) {
5032       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5033       assert(reserved_class > 0, "assert");
5034       assert(reserved_class < reserved, "assert");
5035     }
5036   }
5037 
5038   static void test_committed() {
5039     size_t committed = MetaspaceUtils::committed_bytes();
5040 
5041     assert(committed > 0, "assert");
5042 
5043     size_t reserved  = MetaspaceUtils::reserved_bytes();
5044     assert(committed <= reserved, "assert");
5045 
5046     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5047     assert(committed_metadata > 0, "assert");
5048     assert(committed_metadata <= committed, "assert");
5049 
5050     if (UseCompressedClassPointers) {
5051       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5052       assert(committed_class > 0, "assert");
5053       assert(committed_class < committed, "assert");
5054     }
5055   }
5056 
5057   static void test_virtual_space_list_large_chunk() {
5058     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5059     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5060     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5061     // vm_allocation_granularity aligned on Windows.
5062     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5063     large_size += (os::vm_page_size()/BytesPerWord);
5064     vs_list->get_new_chunk(large_size, 0);
5065   }
5066 
5067   static void test() {
5068     test_reserved();
5069     test_committed();
5070     test_virtual_space_list_large_chunk();
5071   }
5072 };
5073 
5074 void TestMetaspaceUtils_test() {
5075   TestMetaspaceUtilsTest::test();
5076 }
5077 
5078 class TestVirtualSpaceNodeTest {
5079   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5080                                           size_t& num_small_chunks,
5081                                           size_t& num_specialized_chunks) {
5082     num_medium_chunks = words_left / MediumChunk;
5083     words_left = words_left % MediumChunk;
5084 
5085     num_small_chunks = words_left / SmallChunk;
5086     words_left = words_left % SmallChunk;
5087     // how many specialized chunks can we get?
5088     num_specialized_chunks = words_left / SpecializedChunk;
5089     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5090   }
5091 
5092  public:
5093   static void test() {
5094     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5095     const size_t vsn_test_size_words = MediumChunk  * 4;
5096     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5097 
5098     // The chunk sizes must be multiples of eachother, or this will fail
5099     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5100     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5101 
5102     { // No committed memory in VSN
5103       ChunkManager cm(false);
5104       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5105       vsn.initialize();
5106       vsn.retire(&cm);
5107       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5108     }
5109 
5110     { // All of VSN is committed, half is used by chunks
5111       ChunkManager cm(false);
5112       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5113       vsn.initialize();
5114       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5115       vsn.get_chunk_vs(MediumChunk);
5116       vsn.get_chunk_vs(MediumChunk);
5117       vsn.retire(&cm);
5118       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5119       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5120     }
5121 
5122     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5123     // This doesn't work for systems with vm_page_size >= 16K.
5124     if (page_chunks < MediumChunk) {
5125       // 4 pages of VSN is committed, some is used by chunks
5126       ChunkManager cm(false);
5127       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5128 
5129       vsn.initialize();
5130       vsn.expand_by(page_chunks, page_chunks);
5131       vsn.get_chunk_vs(SmallChunk);
5132       vsn.get_chunk_vs(SpecializedChunk);
5133       vsn.retire(&cm);
5134 
5135       // committed - used = words left to retire
5136       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5137 
5138       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5139       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5140 
5141       assert(num_medium_chunks == 0, "should not get any medium chunks");
5142       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5143       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5144     }
5145 
5146     { // Half of VSN is committed, a humongous chunk is used
5147       ChunkManager cm(false);
5148       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5149       vsn.initialize();
5150       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5151       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5152       vsn.retire(&cm);
5153 
5154       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5155       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5156       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5157 
5158       assert(num_medium_chunks == 0, "should not get any medium chunks");
5159       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5160       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5161     }
5162 
5163   }
5164 
5165 #define assert_is_available_positive(word_size) \
5166   assert(vsn.is_available(word_size), \
5167          #word_size ": " PTR_FORMAT " bytes were not available in " \
5168          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5169          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5170 
5171 #define assert_is_available_negative(word_size) \
5172   assert(!vsn.is_available(word_size), \
5173          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5174          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5175          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5176 
5177   static void test_is_available_positive() {
5178     // Reserve some memory.
5179     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5180     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5181 
5182     // Commit some memory.
5183     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5184     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5185     assert(expanded, "Failed to commit");
5186 
5187     // Check that is_available accepts the committed size.
5188     assert_is_available_positive(commit_word_size);
5189 
5190     // Check that is_available accepts half the committed size.
5191     size_t expand_word_size = commit_word_size / 2;
5192     assert_is_available_positive(expand_word_size);
5193   }
5194 
5195   static void test_is_available_negative() {
5196     // Reserve some memory.
5197     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5198     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5199 
5200     // Commit some memory.
5201     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5202     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5203     assert(expanded, "Failed to commit");
5204 
5205     // Check that is_available doesn't accept a too large size.
5206     size_t two_times_commit_word_size = commit_word_size * 2;
5207     assert_is_available_negative(two_times_commit_word_size);
5208   }
5209 
5210   static void test_is_available_overflow() {
5211     // Reserve some memory.
5212     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5213     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5214 
5215     // Commit some memory.
5216     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5217     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5218     assert(expanded, "Failed to commit");
5219 
5220     // Calculate a size that will overflow the virtual space size.
5221     void* virtual_space_max = (void*)(uintptr_t)-1;
5222     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5223     size_t overflow_size = bottom_to_max + BytesPerWord;
5224     size_t overflow_word_size = overflow_size / BytesPerWord;
5225 
5226     // Check that is_available can handle the overflow.
5227     assert_is_available_negative(overflow_word_size);
5228   }
5229 
5230   static void test_is_available() {
5231     TestVirtualSpaceNodeTest::test_is_available_positive();
5232     TestVirtualSpaceNodeTest::test_is_available_negative();
5233     TestVirtualSpaceNodeTest::test_is_available_overflow();
5234   }
5235 };
5236 
5237 // The following test is placed here instead of a gtest / unittest file
5238 // because the ChunkManager class is only available in this file.
5239 void ChunkManager_test_list_index() {
5240   ChunkManager manager(true);
5241 
5242   // Test previous bug where a query for a humongous class metachunk,
5243   // incorrectly matched the non-class medium metachunk size.
5244   {
5245     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5246 
5247     ChunkIndex index = manager.list_index(MediumChunk);
5248 
5249     assert(index == HumongousIndex,
5250            "Requested size is larger than ClassMediumChunk,"
5251            " so should return HumongousIndex. Got index: %d", (int)index);
5252   }
5253 
5254   // Check the specified sizes as well.
5255   {
5256     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5257     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5258   }
5259   {
5260     ChunkIndex index = manager.list_index(ClassSmallChunk);
5261     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5262   }
5263   {
5264     ChunkIndex index = manager.list_index(ClassMediumChunk);
5265     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5266   }
5267   {
5268     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5269     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5270   }
5271 }
5272 
5273 #endif // !PRODUCT
5274 
5275 #ifdef ASSERT
5276 
5277 // The following test is placed here instead of a gtest / unittest file
5278 // because the ChunkManager class is only available in this file.
5279 class SpaceManagerTest : AllStatic {
5280   friend void SpaceManager_test_adjust_initial_chunk_size();
5281 
5282   static void test_adjust_initial_chunk_size(bool is_class) {
5283     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5284     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5285     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5286 
5287 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5288     do {                                                                         \
5289       size_t v = value;                                                          \
5290       size_t e = expected;                                                       \
5291       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5292              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5293     } while (0)
5294 
5295     // Smallest (specialized)
5296     test_adjust_initial_chunk_size(1,            smallest, is_class);
5297     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5298     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5299 
5300     // Small
5301     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5302     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5303     test_adjust_initial_chunk_size(normal,       normal, is_class);
5304 
5305     // Medium
5306     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5307     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5308     test_adjust_initial_chunk_size(medium,     medium, is_class);
5309 
5310     // Humongous
5311     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5312 
5313 #undef test_adjust_initial_chunk_size
5314   }
5315 
5316   static void test_adjust_initial_chunk_size() {
5317     test_adjust_initial_chunk_size(false);
5318     test_adjust_initial_chunk_size(true);
5319   }
5320 };
5321 
5322 void SpaceManager_test_adjust_initial_chunk_size() {
5323   SpaceManagerTest::test_adjust_initial_chunk_size();
5324 }
5325 
5326 #endif // ASSERT
5327 
5328 struct chunkmanager_statistics_t {
5329   int num_specialized_chunks;
5330   int num_small_chunks;
5331   int num_medium_chunks;
5332   int num_humongous_chunks;
5333 };
5334 
5335 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5336   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5337   ChunkManager::ChunkManagerStatistics stat;
5338   chunk_manager->get_statistics(&stat);
5339   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5340   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5341   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5342   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5343 }
5344 
5345 struct chunk_geometry_t {
5346   size_t specialized_chunk_word_size;
5347   size_t small_chunk_word_size;
5348   size_t medium_chunk_word_size;
5349 };
5350 
5351 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5352   if (mdType == Metaspace::NonClassType) {
5353     out->specialized_chunk_word_size = SpecializedChunk;
5354     out->small_chunk_word_size = SmallChunk;
5355     out->medium_chunk_word_size = MediumChunk;
5356   } else {
5357     out->specialized_chunk_word_size = ClassSpecializedChunk;
5358     out->small_chunk_word_size = ClassSmallChunk;
5359     out->medium_chunk_word_size = ClassMediumChunk;
5360   }
5361 }
5362