1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = false;
  60 
  61 // Helper function that does a bunch of checks for a chunk.
  62 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  63 
  64 // Given a Metachunk, update its in-use information (both in the
  65 // chunk and the occupancy map).
  66 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  67 
  68 size_t const allocation_from_dictionary_limit = 4 * K;
  69 
  70 MetaWord* last_allocated = 0;
  71 
  72 size_t Metaspace::_compressed_class_space_size;
  73 const MetaspaceTracer* Metaspace::_tracer = NULL;
  74 
  75 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  76 
  77 enum ChunkSizes {    // in words.
  78   ClassSpecializedChunk = 128,
  79   SpecializedChunk = 128,
  80   ClassSmallChunk = 256,
  81   SmallChunk = 512,
  82   ClassMediumChunk = 4 * K,
  83   MediumChunk = 8 * K
  84 };
  85 
  86 // Returns size of this chunk type.
  87 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  88   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  89   size_t size = 0;
  90   if (is_class) {
  91     switch(chunktype) {
  92       case SpecializedIndex: size = ClassSpecializedChunk; break;
  93       case SmallIndex: size = ClassSmallChunk; break;
  94       case MediumIndex: size = ClassMediumChunk; break;
  95       default:
  96         ShouldNotReachHere();
  97     }
  98   } else {
  99     switch(chunktype) {
 100       case SpecializedIndex: size = SpecializedChunk; break;
 101       case SmallIndex: size = SmallChunk; break;
 102       case MediumIndex: size = MediumChunk; break;
 103       default:
 104         ShouldNotReachHere();
 105     }
 106   }
 107   return size;
 108 }
 109 
 110 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 111   if (is_class) {
 112     if (size == ClassSpecializedChunk) {
 113       return SpecializedIndex;
 114     } else if (size == ClassSmallChunk) {
 115       return SmallIndex;
 116     } else if (size == ClassMediumChunk) {
 117       return MediumIndex;
 118     } else if (size > ClassMediumChunk) {
 119       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 120       return HumongousIndex;
 121     }
 122   } else {
 123     if (size == SpecializedChunk) {
 124       return SpecializedIndex;
 125     } else if (size == SmallChunk) {
 126       return SmallIndex;
 127     } else if (size == MediumChunk) {
 128       return MediumIndex;
 129     } else if (size > MediumChunk) {
 130       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 131       return HumongousIndex;
 132     }
 133   }
 134   ShouldNotReachHere();
 135   return (ChunkIndex)-1;
 136 }
 137 
 138 
 139 static ChunkIndex next_chunk_index(ChunkIndex i) {
 140   assert(i < NumberOfInUseLists, "Out of bound");
 141   return (ChunkIndex) (i+1);
 142 }
 143 
 144 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 145   assert(i > ZeroIndex, "Out of bound");
 146   return (ChunkIndex) (i-1);
 147 }
 148 
 149 static const char* scale_unit(size_t scale) {
 150   switch(scale) {
 151     case 1: return "BYTES";
 152     case K: return "KB";
 153     case M: return "MB";
 154     case G: return "GB";
 155     default:
 156       ShouldNotReachHere();
 157       return NULL;
 158   }
 159 }
 160 
 161 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 162 uint MetaspaceGC::_shrink_factor = 0;
 163 bool MetaspaceGC::_should_concurrent_collect = false;
 164 
 165 typedef class FreeList<Metachunk> ChunkList;
 166 
 167 // Manages the global free lists of chunks.
 168 class ChunkManager : public CHeapObj<mtInternal> {
 169   friend class TestVirtualSpaceNodeTest;
 170 
 171   // Free list of chunks of different sizes.
 172   //   SpecializedChunk
 173   //   SmallChunk
 174   //   MediumChunk
 175   ChunkList _free_chunks[NumberOfFreeLists];
 176 
 177   // Whether or not this is the class chunkmanager.
 178   const bool _is_class;
 179 
 180   // Return non-humongous chunk list by its index.
 181   ChunkList* free_chunks(ChunkIndex index);
 182 
 183   // Returns non-humongous chunk list for the given chunk word size.
 184   ChunkList* find_free_chunks_list(size_t word_size);
 185 
 186   //   HumongousChunk
 187   ChunkTreeDictionary _humongous_dictionary;
 188 
 189   // Returns the humongous chunk dictionary.
 190   ChunkTreeDictionary* humongous_dictionary() {
 191     return &_humongous_dictionary;
 192   }
 193 
 194   // Size, in metaspace words, of all chunks managed by this ChunkManager
 195   size_t _free_chunks_total;
 196   // Number of chunks in this ChunkManager
 197   size_t _free_chunks_count;
 198 
 199   // Update counters after a chunk was added or removed removed.
 200   void account_for_added_chunk(const Metachunk* c);
 201   void account_for_removed_chunk(const Metachunk* c);
 202 
 203   // Debug support
 204 
 205   size_t sum_free_chunks();
 206   size_t sum_free_chunks_count();
 207 
 208   void locked_verify_free_chunks_total();
 209   void slow_locked_verify_free_chunks_total() {
 210     if (metaspace_slow_verify) {
 211       locked_verify_free_chunks_total();
 212     }
 213   }
 214   void locked_verify_free_chunks_count();
 215   void slow_locked_verify_free_chunks_count() {
 216     if (metaspace_slow_verify) {
 217       locked_verify_free_chunks_count();
 218     }
 219   }
 220   void verify_free_chunks_count();
 221 
 222   // Given a pointer to a chunk, attempts to merge it with neighboring
 223   // free chunks to form a bigger chunk. Returns true if successful.
 224   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 225 
 226   // Helper for chunk merging:
 227   //  Given an address range with 1-n chunks which are all supposed to be
 228   //  free and hence currently managed by this ChunkManager, remove them
 229   //  from this ChunkManager and mark them as invalid.
 230   // - This does not correct the occupancy map.
 231   // - This does not adjust the counters in ChunkManager.
 232   // - Does not adjust container count counter in containing VirtualSpaceNode.
 233   // Returns number of chunks removed.
 234   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 235 
 236   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 237   // split up the larger chunk into n smaller chunks, at least one of which should be
 238   // the target chunk of target chunk size. The smaller chunks, including the target
 239   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 240   // Note that this chunk is supposed to be removed from the freelist right away.
 241   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 242 
 243  public:
 244 
 245   struct ChunkManagerStatistics {
 246     size_t num_by_type[NumberOfFreeLists];
 247     size_t single_size_by_type[NumberOfFreeLists];
 248     size_t total_size_by_type[NumberOfFreeLists];
 249     size_t num_humongous_chunks;
 250     size_t total_size_humongous_chunks;
 251   };
 252 
 253   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 254   void get_statistics(ChunkManagerStatistics* stat) const;
 255   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 256 
 257 
 258   ChunkManager(bool is_class)
 259       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 260     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 261     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 262     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 263   }
 264 
 265   // Add or delete (return) a chunk to the global freelist.
 266   Metachunk* chunk_freelist_allocate(size_t word_size);
 267 
 268   // Map a size to a list index assuming that there are lists
 269   // for special, small, medium, and humongous chunks.
 270   ChunkIndex list_index(size_t size);
 271 
 272   // Map a given index to the chunk size.
 273   size_t size_by_index(ChunkIndex index) const;
 274 
 275   bool is_class() const { return _is_class; }
 276 
 277   // Convenience accessors.
 278   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 279   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 280   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 281 
 282   // Take a chunk from the ChunkManager. The chunk is expected to be in
 283   // the chunk manager (the freelist if non-humongous, the dictionary if
 284   // humongous).
 285   void remove_chunk(Metachunk* chunk);
 286 
 287   // Return a single chunk of type index to the ChunkManager.
 288   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 289 
 290   // Add the simple linked list of chunks to the freelist of chunks
 291   // of type index.
 292   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 293 
 294   // Total of the space in the free chunks list
 295   size_t free_chunks_total_words();
 296   size_t free_chunks_total_bytes();
 297 
 298   // Number of chunks in the free chunks list
 299   size_t free_chunks_count();
 300 
 301   // Remove from a list by size.  Selects list based on size of chunk.
 302   Metachunk* free_chunks_get(size_t chunk_word_size);
 303 
 304 #define index_bounds_check(index)                                         \
 305   assert(index == SpecializedIndex ||                                     \
 306          index == SmallIndex ||                                           \
 307          index == MediumIndex ||                                          \
 308          index == HumongousIndex, "Bad index: %d", (int) index)
 309 
 310   size_t num_free_chunks(ChunkIndex index) const {
 311     index_bounds_check(index);
 312 
 313     if (index == HumongousIndex) {
 314       return _humongous_dictionary.total_free_blocks();
 315     }
 316 
 317     ssize_t count = _free_chunks[index].count();
 318     return count == -1 ? 0 : (size_t) count;
 319   }
 320 
 321   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 322     index_bounds_check(index);
 323 
 324     size_t word_size = 0;
 325     if (index == HumongousIndex) {
 326       word_size = _humongous_dictionary.total_size();
 327     } else {
 328       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 329       word_size = size_per_chunk_in_words * num_free_chunks(index);
 330     }
 331 
 332     return word_size * BytesPerWord;
 333   }
 334 
 335   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 336     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 337                                          num_free_chunks(SmallIndex),
 338                                          num_free_chunks(MediumIndex),
 339                                          num_free_chunks(HumongousIndex),
 340                                          size_free_chunks_in_bytes(SpecializedIndex),
 341                                          size_free_chunks_in_bytes(SmallIndex),
 342                                          size_free_chunks_in_bytes(MediumIndex),
 343                                          size_free_chunks_in_bytes(HumongousIndex));
 344   }
 345 
 346   // Debug support
 347   void verify();
 348   void slow_verify() {
 349     if (metaspace_slow_verify) {
 350       verify();
 351     }
 352   }
 353   void locked_verify();
 354   void slow_locked_verify() {
 355     if (metaspace_slow_verify) {
 356       locked_verify();
 357     }
 358   }
 359   void verify_free_chunks_total();
 360 
 361   void locked_print_free_chunks(outputStream* st);
 362   void locked_print_sum_free_chunks(outputStream* st);
 363 
 364   void print_on(outputStream* st) const;
 365 
 366   // Prints composition for both non-class and (if available)
 367   // class chunk manager.
 368   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 369 };
 370 
 371 class SmallBlocks : public CHeapObj<mtClass> {
 372   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 373   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 374 
 375  private:
 376   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 377 
 378   FreeList<Metablock>& list_at(size_t word_size) {
 379     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 380     return _small_lists[word_size - _small_block_min_size];
 381   }
 382 
 383  public:
 384   SmallBlocks() {
 385     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 386       uint k = i - _small_block_min_size;
 387       _small_lists[k].set_size(i);
 388     }
 389   }
 390 
 391   size_t total_size() const {
 392     size_t result = 0;
 393     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 394       uint k = i - _small_block_min_size;
 395       result = result + _small_lists[k].count() * _small_lists[k].size();
 396     }
 397     return result;
 398   }
 399 
 400   static uint small_block_max_size() { return _small_block_max_size; }
 401   static uint small_block_min_size() { return _small_block_min_size; }
 402 
 403   MetaWord* get_block(size_t word_size) {
 404     if (list_at(word_size).count() > 0) {
 405       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 406       return new_block;
 407     } else {
 408       return NULL;
 409     }
 410   }
 411   void return_block(Metablock* free_chunk, size_t word_size) {
 412     list_at(word_size).return_chunk_at_head(free_chunk, false);
 413     assert(list_at(word_size).count() > 0, "Should have a chunk");
 414   }
 415 
 416   void print_on(outputStream* st) const {
 417     st->print_cr("SmallBlocks:");
 418     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 419       uint k = i - _small_block_min_size;
 420       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 421     }
 422   }
 423 };
 424 
 425 // Used to manage the free list of Metablocks (a block corresponds
 426 // to the allocation of a quantum of metadata).
 427 class BlockFreelist : public CHeapObj<mtClass> {
 428   BlockTreeDictionary* const _dictionary;
 429   SmallBlocks* _small_blocks;
 430 
 431   // Only allocate and split from freelist if the size of the allocation
 432   // is at least 1/4th the size of the available block.
 433   const static int WasteMultiplier = 4;
 434 
 435   // Accessors
 436   BlockTreeDictionary* dictionary() const { return _dictionary; }
 437   SmallBlocks* small_blocks() {
 438     if (_small_blocks == NULL) {
 439       _small_blocks = new SmallBlocks();
 440     }
 441     return _small_blocks;
 442   }
 443 
 444  public:
 445   BlockFreelist();
 446   ~BlockFreelist();
 447 
 448   // Get and return a block to the free list
 449   MetaWord* get_block(size_t word_size);
 450   void return_block(MetaWord* p, size_t word_size);
 451 
 452   size_t total_size() const  {
 453     size_t result = dictionary()->total_size();
 454     if (_small_blocks != NULL) {
 455       result = result + _small_blocks->total_size();
 456     }
 457     return result;
 458   }
 459 
 460   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 461   void print_on(outputStream* st) const;
 462 };
 463 
 464 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 465 template <typename T> struct all_ones  { static const T value; };
 466 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 467 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 468 
 469 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 470 // keeps information about
 471 // - where a chunk starts
 472 // - whether a chunk is in-use or free
 473 // A bit in this bitmap represents one range of memory in the smallest
 474 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 475 class OccupancyMap : public CHeapObj<mtInternal> {
 476 
 477   // The address range this map covers.
 478   const MetaWord* const _reference_address;
 479   const size_t _word_size;
 480 
 481   // The word size of a specialized chunk, aka the number of words one
 482   // bit in this map represents.
 483   const size_t _smallest_chunk_word_size;
 484 
 485   // map data
 486   // Data are organized in two bit layers:
 487   // The first layer is the chunk-start-map. Here, a bit is set to mark
 488   // the corresponding region as the head of a chunk.
 489   // The second layer is the in-use-map. Here, a set bit indicates that
 490   // the corresponding belongs to a chunk which is in use.
 491   uint8_t* _map[2];
 492 
 493   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 494 
 495   // length, in bytes, of bitmap data
 496   size_t _map_size;
 497 
 498   // Returns true if bit at position pos at bit-layer layer is set.
 499   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 500     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 501     const unsigned byteoffset = pos / 8;
 502     assert(byteoffset < _map_size,
 503            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 504     const unsigned mask = 1 << (pos % 8);
 505     return (_map[layer][byteoffset] & mask) > 0;
 506   }
 507 
 508   // Changes bit at position pos at bit-layer layer to value v.
 509   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 510     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 511     const unsigned byteoffset = pos / 8;
 512     assert(byteoffset < _map_size,
 513            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 514     const unsigned mask = 1 << (pos % 8);
 515     if (v) {
 516       _map[layer][byteoffset] |= mask;
 517     } else {
 518       _map[layer][byteoffset] &= ~mask;
 519     }
 520   }
 521 
 522   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 523   // pos is 32/64 aligned and num_bits is 32/64.
 524   // This is the typical case when coalescing to medium chunks, whose size is
 525   // 32 or 64 times the specialized chunk size (depending on class or non class
 526   // case), so they occupy 64 bits which should be 64bit aligned, because
 527   // chunks are chunk-size aligned.
 528   template <typename T>
 529   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 530     assert(_map_size > 0, "not initialized");
 531     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 532     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 533     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 534     const size_t byteoffset = pos / 8;
 535     assert(byteoffset <= (_map_size - sizeof(T)),
 536            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 537     const T w = *(T*)(_map[layer] + byteoffset);
 538     return w > 0 ? true : false;
 539   }
 540 
 541   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 542   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 543     if (pos % 32 == 0 && num_bits == 32) {
 544       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 545     } else if (pos % 64 == 0 && num_bits == 64) {
 546       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 547     } else {
 548       for (unsigned n = 0; n < num_bits; n ++) {
 549         if (get_bit_at_position(pos + n, layer)) {
 550           return true;
 551         }
 552       }
 553     }
 554     return false;
 555   }
 556 
 557   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 558   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 559     assert(word_size % _smallest_chunk_word_size == 0,
 560         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 561     const unsigned pos = get_bitpos_for_address(p);
 562     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 563     return is_any_bit_set_in_region(pos, num_bits, layer);
 564   }
 565 
 566   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 567   // pos is 32/64 aligned and num_bits is 32/64.
 568   // This is the typical case when coalescing to medium chunks, whose size
 569   // is 32 or 64 times the specialized chunk size (depending on class or non
 570   // class case), so they occupy 64 bits which should be 64bit aligned,
 571   // because chunks are chunk-size aligned.
 572   template <typename T>
 573   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 574     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 575            (unsigned)(sizeof(T) * 8), pos);
 576     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 577            num_bits, (unsigned)(sizeof(T) * 8));
 578     const size_t byteoffset = pos / 8;
 579     assert(byteoffset <= (_map_size - sizeof(T)),
 580            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 581     T* const pw = (T*)(_map[layer] + byteoffset);
 582     *pw = v ? all_ones<T>::value : (T) 0;
 583   }
 584 
 585   // Set all bits in a region starting at pos to a value.
 586   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 587     assert(_map_size > 0, "not initialized");
 588     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 589     if (pos % 32 == 0 && num_bits == 32) {
 590       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 591     } else if (pos % 64 == 0 && num_bits == 64) {
 592       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 593     } else {
 594       for (unsigned n = 0; n < num_bits; n ++) {
 595         set_bit_at_position(pos + n, layer, v);
 596       }
 597     }
 598   }
 599 
 600   // Helper: sets all bits in a region [p, p+word_size).
 601   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 602     assert(word_size % _smallest_chunk_word_size == 0,
 603         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 604     const unsigned pos = get_bitpos_for_address(p);
 605     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 606     set_bits_of_region(pos, num_bits, layer, v);
 607   }
 608 
 609   // Helper: given an address, return the bit position representing that address.
 610   unsigned get_bitpos_for_address(const MetaWord* p) const {
 611     assert(_reference_address != NULL, "not initialized");
 612     assert(p >= _reference_address && p < _reference_address + _word_size,
 613            "Address %p out of range for occupancy map [%p..%p).",
 614             p, _reference_address, _reference_address + _word_size);
 615     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 616            "Address not aligned (%p).", p);
 617     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 618     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 619     return (unsigned) d;
 620   }
 621 
 622  public:
 623 
 624   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 625     _reference_address(reference_address), _word_size(word_size),
 626     _smallest_chunk_word_size(smallest_chunk_word_size) {
 627     assert(reference_address != NULL, "invalid reference address");
 628     assert(is_aligned(reference_address, smallest_chunk_word_size),
 629            "Reference address not aligned to smallest chunk size.");
 630     assert(is_aligned(word_size, smallest_chunk_word_size),
 631            "Word_size shall be a multiple of the smallest chunk size.");
 632     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 633     size_t num_bits = word_size / smallest_chunk_word_size;
 634     _map_size = (num_bits + 7) / 8;
 635     assert(_map_size * 8 >= num_bits, "sanity");
 636     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 637     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 638     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 639     memset(_map[1], 0, _map_size);
 640     memset(_map[0], 0, _map_size);
 641     // Sanity test: the first respectively last possible chunk start address in
 642     // the covered range shall map to the first and last bit in the bitmap.
 643     assert(get_bitpos_for_address(reference_address) == 0,
 644       "First chunk address in range must map to fist bit in bitmap.");
 645     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 646       "Last chunk address in range must map to last bit in bitmap.");
 647   }
 648 
 649   ~OccupancyMap() {
 650     os::free(_map[0]);
 651     os::free(_map[1]);
 652   }
 653 
 654   // Returns true if at address x a chunk is starting.
 655   bool chunk_starts_at_address(MetaWord* p) const {
 656     const unsigned pos = get_bitpos_for_address(p);
 657     return get_bit_at_position(pos, layer_chunk_start_map);
 658   }
 659 
 660   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 661     const unsigned pos = get_bitpos_for_address(p);
 662     set_bit_at_position(pos, layer_chunk_start_map, v);
 663   }
 664 
 665   // Removes all chunk-start-bits inside a region, typically as a
 666   // result of a chunk merge.
 667   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 668     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 669   }
 670 
 671   // Returns true if there are life (in use) chunks in the region limited
 672   // by [p, p+word_size).
 673   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 674     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 675   }
 676 
 677   // Marks the region starting at p with the size word_size as in use
 678   // or free, depending on v.
 679   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 680     set_bits_of_region(p, word_size, layer_in_use_map, v);
 681   }
 682 
 683 #ifdef ASSERT
 684   // Verify occupancy map for the address range [from, to).
 685   // We need to tell it the address range, because the memory the
 686   // occupancy map is covering may not be fully comitted yet.
 687   void verify(MetaWord* from, MetaWord* to) {
 688     Metachunk* chunk = NULL;
 689     int nth_bit_for_chunk = 0;
 690     MetaWord* chunk_end = NULL;
 691     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 692       const unsigned pos = get_bitpos_for_address(p);
 693       // Check the chunk-starts-info:
 694       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 695         // Chunk start marked in bitmap.
 696         chunk = (Metachunk*) p;
 697         if (chunk_end != NULL) {
 698           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 699                  "the next chunk to start at %p).", p, chunk_end);
 700         }
 701         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 702         if (chunk->get_chunk_type() != HumongousIndex) {
 703           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 704         }
 705         chunk_end = p + chunk->word_size();
 706         nth_bit_for_chunk = 0;
 707         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 708       } else {
 709         // No chunk start marked in bitmap.
 710         assert(chunk != NULL, "Chunk should start at start of address range.");
 711         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 712         nth_bit_for_chunk ++;
 713       }
 714       // Check the in-use-info:
 715       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 716       if (in_use_bit) {
 717         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 718                chunk, nth_bit_for_chunk);
 719       } else {
 720         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 721                chunk, nth_bit_for_chunk);
 722       }
 723     }
 724   }
 725 
 726   // Verify that a given chunk is correctly accounted for in the bitmap.
 727   void verify_for_chunk(Metachunk* chunk) {
 728     assert(chunk_starts_at_address((MetaWord*) chunk),
 729            "No chunk start marked in map for chunk %p.", chunk);
 730     // For chunks larger than the minimal chunk size, no other chunk
 731     // must start in its area.
 732     if (chunk->word_size() > _smallest_chunk_word_size) {
 733       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 734                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 735              "No chunk must start within another chunk.");
 736     }
 737     if (!chunk->is_tagged_free()) {
 738       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 739              "Chunk %p is in use but marked as free in map (%d %d).",
 740              chunk, chunk->get_chunk_type(), chunk->get_origin());
 741     } else {
 742       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 743              "Chunk %p is free but marked as in-use in map (%d %d).",
 744              chunk, chunk->get_chunk_type(), chunk->get_origin());
 745     }
 746   }
 747 
 748 #endif // ASSERT
 749 
 750 };
 751 
 752 // A VirtualSpaceList node.
 753 class VirtualSpaceNode : public CHeapObj<mtClass> {
 754   friend class VirtualSpaceList;
 755 
 756   // Link to next VirtualSpaceNode
 757   VirtualSpaceNode* _next;
 758 
 759   // Whether this node is contained in class or metaspace.
 760   const bool _is_class;
 761 
 762   // total in the VirtualSpace
 763   MemRegion _reserved;
 764   ReservedSpace _rs;
 765   VirtualSpace _virtual_space;
 766   MetaWord* _top;
 767   // count of chunks contained in this VirtualSpace
 768   uintx _container_count;
 769 
 770   OccupancyMap* _occupancy_map;
 771 
 772   // Convenience functions to access the _virtual_space
 773   char* low()  const { return virtual_space()->low(); }
 774   char* high() const { return virtual_space()->high(); }
 775 
 776   // The first Metachunk will be allocated at the bottom of the
 777   // VirtualSpace
 778   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 779 
 780   // Committed but unused space in the virtual space
 781   size_t free_words_in_vs() const;
 782 
 783   // True if this node belongs to class metaspace.
 784   bool is_class() const { return _is_class; }
 785 
 786   // Helper function for take_from_committed: allocate padding chunks
 787   // until top is at the given address.
 788   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 789 
 790  public:
 791 
 792   VirtualSpaceNode(bool is_class, size_t byte_size);
 793   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 794     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 795   ~VirtualSpaceNode();
 796 
 797   // Convenience functions for logical bottom and end
 798   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 799   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 800 
 801   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 802   OccupancyMap* occupancy_map() { return _occupancy_map; }
 803 
 804   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 805 
 806   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 807   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 808 
 809   bool is_pre_committed() const { return _virtual_space.special(); }
 810 
 811   // address of next available space in _virtual_space;
 812   // Accessors
 813   VirtualSpaceNode* next() { return _next; }
 814   void set_next(VirtualSpaceNode* v) { _next = v; }
 815 
 816   void set_reserved(MemRegion const v) { _reserved = v; }
 817   void set_top(MetaWord* v) { _top = v; }
 818 
 819   // Accessors
 820   MemRegion* reserved() { return &_reserved; }
 821   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 822 
 823   // Returns true if "word_size" is available in the VirtualSpace
 824   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 825 
 826   MetaWord* top() const { return _top; }
 827   void inc_top(size_t word_size) { _top += word_size; }
 828 
 829   uintx container_count() { return _container_count; }
 830   void inc_container_count();
 831   void dec_container_count();
 832 #ifdef ASSERT
 833   uintx container_count_slow();
 834   void verify_container_count();
 835 #endif
 836 
 837   // used and capacity in this single entry in the list
 838   size_t used_words_in_vs() const;
 839   size_t capacity_words_in_vs() const;
 840 
 841   bool initialize();
 842 
 843   // get space from the virtual space
 844   Metachunk* take_from_committed(size_t chunk_word_size);
 845 
 846   // Allocate a chunk from the virtual space and return it.
 847   Metachunk* get_chunk_vs(size_t chunk_word_size);
 848 
 849   // Expands/shrinks the committed space in a virtual space.  Delegates
 850   // to Virtualspace
 851   bool expand_by(size_t min_words, size_t preferred_words);
 852 
 853   // In preparation for deleting this node, remove all the chunks
 854   // in the node from any freelist.
 855   void purge(ChunkManager* chunk_manager);
 856 
 857   // If an allocation doesn't fit in the current node a new node is created.
 858   // Allocate chunks out of the remaining committed space in this node
 859   // to avoid wasting that memory.
 860   // This always adds up because all the chunk sizes are multiples of
 861   // the smallest chunk size.
 862   void retire(ChunkManager* chunk_manager);
 863 
 864 
 865   void print_on(outputStream* st) const;
 866   void print_map(outputStream* st, bool is_class) const;
 867 
 868   // Debug support
 869   DEBUG_ONLY(void mangle();)
 870   // Verify counters, all chunks in this list node and the occupancy map.
 871   DEBUG_ONLY(void verify();)
 872   // Verify that all free chunks in this node are ideally merged
 873   // (there not should be multiple small chunks where a large chunk could exist.)
 874   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 875 
 876 };
 877 
 878 #define assert_is_aligned(value, alignment)                  \
 879   assert(is_aligned((value), (alignment)),                   \
 880          SIZE_FORMAT_HEX " is not aligned to "               \
 881          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 882 
 883 // Decide if large pages should be committed when the memory is reserved.
 884 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 885   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 886     size_t words = bytes / BytesPerWord;
 887     bool is_class = false; // We never reserve large pages for the class space.
 888     if (MetaspaceGC::can_expand(words, is_class) &&
 889         MetaspaceGC::allowed_expansion() >= words) {
 890       return true;
 891     }
 892   }
 893 
 894   return false;
 895 }
 896 
 897   // byte_size is the size of the associated virtualspace.
 898 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 899   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 900   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 901   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 902   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 903 
 904   if (_rs.is_reserved()) {
 905     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 906     assert(_rs.size() != 0, "Catch if we get a 0 size");
 907     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 908     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 909 
 910     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 911   }
 912 }
 913 
 914 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 915   DEBUG_ONLY(this->verify();)
 916   Metachunk* chunk = first_chunk();
 917   Metachunk* invalid_chunk = (Metachunk*) top();
 918   while (chunk < invalid_chunk ) {
 919     assert(chunk->is_tagged_free(), "Should be tagged free");
 920     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 921     chunk_manager->remove_chunk(chunk);
 922     chunk->remove_sentinel();
 923     assert(chunk->next() == NULL &&
 924            chunk->prev() == NULL,
 925            "Was not removed from its list");
 926     chunk = (Metachunk*) next;
 927   }
 928 }
 929 
 930 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 931 
 932   if (bottom() == top()) {
 933     return;
 934   }
 935 
 936   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 937   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 938   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 939 
 940   int line_len = 100;
 941   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 942   line_len = (int)(section_len / spec_chunk_size);
 943 
 944   static const int NUM_LINES = 4;
 945 
 946   char* lines[NUM_LINES];
 947   for (int i = 0; i < NUM_LINES; i ++) {
 948     lines[i] = (char*)os::malloc(line_len, mtInternal);
 949   }
 950   int pos = 0;
 951   const MetaWord* p = bottom();
 952   const Metachunk* chunk = (const Metachunk*)p;
 953   const MetaWord* chunk_end = p + chunk->word_size();
 954   while (p < top()) {
 955     if (pos == line_len) {
 956       pos = 0;
 957       for (int i = 0; i < NUM_LINES; i ++) {
 958         st->fill_to(22);
 959         st->print_raw(lines[i], line_len);
 960         st->cr();
 961       }
 962     }
 963     if (pos == 0) {
 964       st->print(PTR_FORMAT ":", p2i(p));
 965     }
 966     if (p == chunk_end) {
 967       chunk = (Metachunk*)p;
 968       chunk_end = p + chunk->word_size();
 969     }
 970     // line 1: chunk starting points (a dot if that area is a chunk start).
 971     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 972 
 973     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 974     // chunk is in use.
 975     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 976     if (chunk->word_size() == spec_chunk_size) {
 977       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 978     } else if (chunk->word_size() == small_chunk_size) {
 979       lines[1][pos] = chunk_is_free ? 's' : 'S';
 980     } else if (chunk->word_size() == med_chunk_size) {
 981       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 982     } else if (chunk->word_size() > med_chunk_size) {
 983       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 984     } else {
 985       ShouldNotReachHere();
 986     }
 987 
 988     // Line 3: chunk origin
 989     const ChunkOrigin origin = chunk->get_origin();
 990     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 991 
 992     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 993     //         but were never used.
 994     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 995 
 996     p += spec_chunk_size;
 997     pos ++;
 998   }
 999   if (pos > 0) {
1000     for (int i = 0; i < NUM_LINES; i ++) {
1001       st->fill_to(22);
1002       st->print_raw(lines[i], line_len);
1003       st->cr();
1004     }
1005   }
1006   for (int i = 0; i < NUM_LINES; i ++) {
1007     os::free(lines[i]);
1008   }
1009 }
1010 
1011 
1012 #ifdef ASSERT
1013 uintx VirtualSpaceNode::container_count_slow() {
1014   uintx count = 0;
1015   Metachunk* chunk = first_chunk();
1016   Metachunk* invalid_chunk = (Metachunk*) top();
1017   while (chunk < invalid_chunk ) {
1018     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1019     do_verify_chunk(chunk);
1020     // Don't count the chunks on the free lists.  Those are
1021     // still part of the VirtualSpaceNode but not currently
1022     // counted.
1023     if (!chunk->is_tagged_free()) {
1024       count++;
1025     }
1026     chunk = (Metachunk*) next;
1027   }
1028   return count;
1029 }
1030 #endif
1031 
1032 #ifdef ASSERT
1033 // Verify counters, all chunks in this list node and the occupancy map.
1034 void VirtualSpaceNode::verify() {
1035   uintx num_in_use_chunks = 0;
1036   Metachunk* chunk = first_chunk();
1037   Metachunk* invalid_chunk = (Metachunk*) top();
1038 
1039   // Iterate the chunks in this node and verify each chunk.
1040   while (chunk < invalid_chunk ) {
1041     DEBUG_ONLY(do_verify_chunk(chunk);)
1042     if (!chunk->is_tagged_free()) {
1043       num_in_use_chunks ++;
1044     }
1045     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1046     chunk = (Metachunk*) next;
1047   }
1048   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1049          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1050   // Also verify the occupancy map.
1051   occupancy_map()->verify(this->bottom(), this->top());
1052 }
1053 #endif // ASSERT
1054 
1055 #ifdef ASSERT
1056 // Verify that all free chunks in this node are ideally merged
1057 // (there not should be multiple small chunks where a large chunk could exist.)
1058 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1059   Metachunk* chunk = first_chunk();
1060   Metachunk* invalid_chunk = (Metachunk*) top();
1061   // Shorthands.
1062   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1063   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1064   int num_free_chunks_since_last_med_boundary = -1;
1065   int num_free_chunks_since_last_small_boundary = -1;
1066   while (chunk < invalid_chunk ) {
1067     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1068     // Reset the counter when encountering a non-free chunk.
1069     if (chunk->get_chunk_type() != HumongousIndex) {
1070       if (chunk->is_tagged_free()) {
1071         // Count successive free, non-humongous chunks.
1072         if (is_aligned(chunk, size_small)) {
1073           assert(num_free_chunks_since_last_small_boundary <= 1,
1074                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1075           num_free_chunks_since_last_small_boundary = 0;
1076         } else if (num_free_chunks_since_last_small_boundary != -1) {
1077           num_free_chunks_since_last_small_boundary ++;
1078         }
1079         if (is_aligned(chunk, size_med)) {
1080           assert(num_free_chunks_since_last_med_boundary <= 1,
1081                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1082           num_free_chunks_since_last_med_boundary = 0;
1083         } else if (num_free_chunks_since_last_med_boundary != -1) {
1084           num_free_chunks_since_last_med_boundary ++;
1085         }
1086       } else {
1087         // Encountering a non-free chunk, reset counters.
1088         num_free_chunks_since_last_med_boundary = -1;
1089         num_free_chunks_since_last_small_boundary = -1;
1090       }
1091     } else {
1092       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1093       num_free_chunks_since_last_med_boundary = -1;
1094       num_free_chunks_since_last_small_boundary = -1;
1095     }
1096 
1097     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1098     chunk = (Metachunk*) next;
1099   }
1100 }
1101 #endif // ASSERT
1102 
1103 // List of VirtualSpaces for metadata allocation.
1104 class VirtualSpaceList : public CHeapObj<mtClass> {
1105   friend class VirtualSpaceNode;
1106 
1107   enum VirtualSpaceSizes {
1108     VirtualSpaceSize = 256 * K
1109   };
1110 
1111   // Head of the list
1112   VirtualSpaceNode* _virtual_space_list;
1113   // virtual space currently being used for allocations
1114   VirtualSpaceNode* _current_virtual_space;
1115 
1116   // Is this VirtualSpaceList used for the compressed class space
1117   bool _is_class;
1118 
1119   // Sum of reserved and committed memory in the virtual spaces
1120   size_t _reserved_words;
1121   size_t _committed_words;
1122 
1123   // Number of virtual spaces
1124   size_t _virtual_space_count;
1125 
1126   ~VirtualSpaceList();
1127 
1128   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1129 
1130   void set_virtual_space_list(VirtualSpaceNode* v) {
1131     _virtual_space_list = v;
1132   }
1133   void set_current_virtual_space(VirtualSpaceNode* v) {
1134     _current_virtual_space = v;
1135   }
1136 
1137   void link_vs(VirtualSpaceNode* new_entry);
1138 
1139   // Get another virtual space and add it to the list.  This
1140   // is typically prompted by a failed attempt to allocate a chunk
1141   // and is typically followed by the allocation of a chunk.
1142   bool create_new_virtual_space(size_t vs_word_size);
1143 
1144   // Chunk up the unused committed space in the current
1145   // virtual space and add the chunks to the free list.
1146   void retire_current_virtual_space();
1147 
1148  public:
1149   VirtualSpaceList(size_t word_size);
1150   VirtualSpaceList(ReservedSpace rs);
1151 
1152   size_t free_bytes();
1153 
1154   Metachunk* get_new_chunk(size_t chunk_word_size,
1155                            size_t suggested_commit_granularity);
1156 
1157   bool expand_node_by(VirtualSpaceNode* node,
1158                       size_t min_words,
1159                       size_t preferred_words);
1160 
1161   bool expand_by(size_t min_words,
1162                  size_t preferred_words);
1163 
1164   VirtualSpaceNode* current_virtual_space() {
1165     return _current_virtual_space;
1166   }
1167 
1168   bool is_class() const { return _is_class; }
1169 
1170   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1171 
1172   size_t reserved_words()  { return _reserved_words; }
1173   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1174   size_t committed_words() { return _committed_words; }
1175   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1176 
1177   void inc_reserved_words(size_t v);
1178   void dec_reserved_words(size_t v);
1179   void inc_committed_words(size_t v);
1180   void dec_committed_words(size_t v);
1181   void inc_virtual_space_count();
1182   void dec_virtual_space_count();
1183 
1184   bool contains(const void* ptr);
1185 
1186   // Unlink empty VirtualSpaceNodes and free it.
1187   void purge(ChunkManager* chunk_manager);
1188 
1189   void print_on(outputStream* st) const;
1190   void print_map(outputStream* st) const;
1191 
1192   class VirtualSpaceListIterator : public StackObj {
1193     VirtualSpaceNode* _virtual_spaces;
1194    public:
1195     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1196       _virtual_spaces(virtual_spaces) {}
1197 
1198     bool repeat() {
1199       return _virtual_spaces != NULL;
1200     }
1201 
1202     VirtualSpaceNode* get_next() {
1203       VirtualSpaceNode* result = _virtual_spaces;
1204       if (_virtual_spaces != NULL) {
1205         _virtual_spaces = _virtual_spaces->next();
1206       }
1207       return result;
1208     }
1209   };
1210 };
1211 
1212 class Metadebug : AllStatic {
1213   // Debugging support for Metaspaces
1214   static int _allocation_fail_alot_count;
1215 
1216  public:
1217 
1218   static void init_allocation_fail_alot_count();
1219 #ifdef ASSERT
1220   static bool test_metadata_failure();
1221 #endif
1222 };
1223 
1224 int Metadebug::_allocation_fail_alot_count = 0;
1225 
1226 //  SpaceManager - used by Metaspace to handle allocations
1227 class SpaceManager : public CHeapObj<mtClass> {
1228   friend class Metaspace;
1229   friend class Metadebug;
1230 
1231  private:
1232 
1233   // protects allocations
1234   Mutex* const _lock;
1235 
1236   // Type of metadata allocated.
1237   const Metaspace::MetadataType   _mdtype;
1238 
1239   // Type of metaspace
1240   const Metaspace::MetaspaceType  _space_type;
1241 
1242   // List of chunks in use by this SpaceManager.  Allocations
1243   // are done from the current chunk.  The list is used for deallocating
1244   // chunks when the SpaceManager is freed.
1245   Metachunk* _chunks_in_use[NumberOfInUseLists];
1246   Metachunk* _current_chunk;
1247 
1248   // Maximum number of small chunks to allocate to a SpaceManager
1249   static uint const _small_chunk_limit;
1250 
1251   // Maximum number of specialize chunks to allocate for anonymous
1252   // metadata space to a SpaceManager
1253   static uint const _anon_metadata_specialize_chunk_limit;
1254 
1255   // Sum of all space in allocated chunks
1256   size_t _allocated_blocks_words;
1257 
1258   // Sum of all allocated chunks
1259   size_t _allocated_chunks_words;
1260   size_t _allocated_chunks_count;
1261 
1262   // Free lists of blocks are per SpaceManager since they
1263   // are assumed to be in chunks in use by the SpaceManager
1264   // and all chunks in use by a SpaceManager are freed when
1265   // the class loader using the SpaceManager is collected.
1266   BlockFreelist* _block_freelists;
1267 
1268   // protects virtualspace and chunk expansions
1269   static const char*  _expand_lock_name;
1270   static const int    _expand_lock_rank;
1271   static Mutex* const _expand_lock;
1272 
1273  private:
1274   // Accessors
1275   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1276   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1277     _chunks_in_use[index] = v;
1278   }
1279 
1280   BlockFreelist* block_freelists() const { return _block_freelists; }
1281 
1282   Metaspace::MetadataType mdtype() { return _mdtype; }
1283 
1284   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1285   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1286 
1287   Metachunk* current_chunk() const { return _current_chunk; }
1288   void set_current_chunk(Metachunk* v) {
1289     _current_chunk = v;
1290   }
1291 
1292   Metachunk* find_current_chunk(size_t word_size);
1293 
1294   // Add chunk to the list of chunks in use
1295   void add_chunk(Metachunk* v, bool make_current);
1296   void retire_current_chunk();
1297 
1298   Mutex* lock() const { return _lock; }
1299 
1300  protected:
1301   void initialize();
1302 
1303  public:
1304   SpaceManager(Metaspace::MetadataType mdtype,
1305                Metaspace::MetaspaceType space_type,
1306                Mutex* lock);
1307   ~SpaceManager();
1308 
1309   enum ChunkMultiples {
1310     MediumChunkMultiple = 4
1311   };
1312 
1313   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1314   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1315   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1316 
1317   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1318 
1319   // Accessors
1320   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1321 
1322   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1323   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1324   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1325 
1326   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1327 
1328   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1329 
1330   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1331   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1332   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1333   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1334   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1335 
1336   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1337 
1338   static Mutex* expand_lock() { return _expand_lock; }
1339 
1340   // Increment the per Metaspace and global running sums for Metachunks
1341   // by the given size.  This is used when a Metachunk to added to
1342   // the in-use list.
1343   void inc_size_metrics(size_t words);
1344   // Increment the per Metaspace and global running sums Metablocks by the given
1345   // size.  This is used when a Metablock is allocated.
1346   void inc_used_metrics(size_t words);
1347   // Delete the portion of the running sums for this SpaceManager. That is,
1348   // the globals running sums for the Metachunks and Metablocks are
1349   // decremented for all the Metachunks in-use by this SpaceManager.
1350   void dec_total_from_size_metrics();
1351 
1352   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1353   // or return the unadjusted size if the requested size is humongous.
1354   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1355   size_t adjust_initial_chunk_size(size_t requested) const;
1356 
1357   // Get the initial chunks size for this metaspace type.
1358   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1359 
1360   size_t sum_capacity_in_chunks_in_use() const;
1361   size_t sum_used_in_chunks_in_use() const;
1362   size_t sum_free_in_chunks_in_use() const;
1363   size_t sum_waste_in_chunks_in_use() const;
1364   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1365 
1366   size_t sum_count_in_chunks_in_use();
1367   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1368 
1369   Metachunk* get_new_chunk(size_t chunk_word_size);
1370 
1371   // Block allocation and deallocation.
1372   // Allocates a block from the current chunk
1373   MetaWord* allocate(size_t word_size);
1374 
1375   // Helper for allocations
1376   MetaWord* allocate_work(size_t word_size);
1377 
1378   // Returns a block to the per manager freelist
1379   void deallocate(MetaWord* p, size_t word_size);
1380 
1381   // Based on the allocation size and a minimum chunk size,
1382   // returned chunk size (for expanding space for chunk allocation).
1383   size_t calc_chunk_size(size_t allocation_word_size);
1384 
1385   // Called when an allocation from the current chunk fails.
1386   // Gets a new chunk (may require getting a new virtual space),
1387   // and allocates from that chunk.
1388   MetaWord* grow_and_allocate(size_t word_size);
1389 
1390   // Notify memory usage to MemoryService.
1391   void track_metaspace_memory_usage();
1392 
1393   // debugging support.
1394 
1395   void dump(outputStream* const out) const;
1396   void print_on(outputStream* st) const;
1397   void locked_print_chunks_in_use_on(outputStream* st) const;
1398 
1399   void verify();
1400   void verify_chunk_size(Metachunk* chunk);
1401 #ifdef ASSERT
1402   void verify_allocated_blocks_words();
1403 #endif
1404 
1405   // This adjusts the size given to be greater than the minimum allocation size in
1406   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1407   size_t get_allocation_word_size(size_t word_size) {
1408     size_t byte_size = word_size * BytesPerWord;
1409 
1410     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1411     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1412 
1413     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1414     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1415 
1416     return raw_word_size;
1417   }
1418 };
1419 
1420 uint const SpaceManager::_small_chunk_limit = 4;
1421 uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
1422 
1423 const char* SpaceManager::_expand_lock_name =
1424   "SpaceManager chunk allocation lock";
1425 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1426 Mutex* const SpaceManager::_expand_lock =
1427   new Mutex(SpaceManager::_expand_lock_rank,
1428             SpaceManager::_expand_lock_name,
1429             Mutex::_allow_vm_block_flag,
1430             Monitor::_safepoint_check_never);
1431 
1432 void VirtualSpaceNode::inc_container_count() {
1433   assert_lock_strong(SpaceManager::expand_lock());
1434   _container_count++;
1435 }
1436 
1437 void VirtualSpaceNode::dec_container_count() {
1438   assert_lock_strong(SpaceManager::expand_lock());
1439   _container_count--;
1440 }
1441 
1442 #ifdef ASSERT
1443 void VirtualSpaceNode::verify_container_count() {
1444   assert(_container_count == container_count_slow(),
1445          "Inconsistency in container_count _container_count " UINTX_FORMAT
1446          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1447 }
1448 #endif
1449 
1450 // BlockFreelist methods
1451 
1452 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1453 
1454 BlockFreelist::~BlockFreelist() {
1455   delete _dictionary;
1456   if (_small_blocks != NULL) {
1457     delete _small_blocks;
1458   }
1459 }
1460 
1461 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1462   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1463 
1464   Metablock* free_chunk = ::new (p) Metablock(word_size);
1465   if (word_size < SmallBlocks::small_block_max_size()) {
1466     small_blocks()->return_block(free_chunk, word_size);
1467   } else {
1468   dictionary()->return_chunk(free_chunk);
1469 }
1470   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1471             SIZE_FORMAT, p2i(free_chunk), word_size);
1472 }
1473 
1474 MetaWord* BlockFreelist::get_block(size_t word_size) {
1475   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1476 
1477   // Try small_blocks first.
1478   if (word_size < SmallBlocks::small_block_max_size()) {
1479     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1480     // this space manager.
1481     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1482     if (new_block != NULL) {
1483       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1484               p2i(new_block), word_size);
1485       return new_block;
1486     }
1487   }
1488 
1489   if (word_size < BlockFreelist::min_dictionary_size()) {
1490     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1491     return NULL;
1492   }
1493 
1494   Metablock* free_block = dictionary()->get_chunk(word_size);
1495   if (free_block == NULL) {
1496     return NULL;
1497   }
1498 
1499   const size_t block_size = free_block->size();
1500   if (block_size > WasteMultiplier * word_size) {
1501     return_block((MetaWord*)free_block, block_size);
1502     return NULL;
1503   }
1504 
1505   MetaWord* new_block = (MetaWord*)free_block;
1506   assert(block_size >= word_size, "Incorrect size of block from freelist");
1507   const size_t unused = block_size - word_size;
1508   if (unused >= SmallBlocks::small_block_min_size()) {
1509     return_block(new_block + word_size, unused);
1510   }
1511 
1512   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1513             p2i(new_block), word_size);
1514   return new_block;
1515 }
1516 
1517 void BlockFreelist::print_on(outputStream* st) const {
1518   dictionary()->print_free_lists(st);
1519   if (_small_blocks != NULL) {
1520     _small_blocks->print_on(st);
1521   }
1522 }
1523 
1524 // VirtualSpaceNode methods
1525 
1526 VirtualSpaceNode::~VirtualSpaceNode() {
1527   _rs.release();
1528   if (_occupancy_map != NULL) {
1529     delete _occupancy_map;
1530   }
1531 #ifdef ASSERT
1532   size_t word_size = sizeof(*this) / BytesPerWord;
1533   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1534 #endif
1535 }
1536 
1537 size_t VirtualSpaceNode::used_words_in_vs() const {
1538   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1539 }
1540 
1541 // Space committed in the VirtualSpace
1542 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1543   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1544 }
1545 
1546 size_t VirtualSpaceNode::free_words_in_vs() const {
1547   return pointer_delta(end(), top(), sizeof(MetaWord));
1548 }
1549 
1550 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1551 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1552 
1553   assert(target_top > top(), "Sanity");
1554 
1555   // Padding chunks are added to the freelist.
1556   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1557 
1558   // shorthands
1559   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1560   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1561   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1562 
1563   while (top() < target_top) {
1564 
1565     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1566     // for padding chunks, so it is not worth it.
1567     size_t padding_chunk_word_size = small_word_size;
1568     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1569       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1570       padding_chunk_word_size = spec_word_size;
1571     }
1572     MetaWord* here = top();
1573     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1574     inc_top(padding_chunk_word_size);
1575 
1576     // Create new padding chunk.
1577     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1578     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1579 
1580     Metachunk* const padding_chunk =
1581       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1582     assert(padding_chunk == (Metachunk*)here, "Sanity");
1583     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1584     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1585                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1586                                        (is_class() ? "class space " : "metaspace"),
1587                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1588 
1589     // Mark chunk start in occupancy map.
1590     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1591 
1592     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1593     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1594     // will assert that).
1595     do_update_in_use_info_for_chunk(padding_chunk, true);
1596 
1597     // Return Chunk to freelist.
1598     inc_container_count();
1599     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1600     // Please note: at this point, ChunkManager::return_single_chunk()
1601     // may already have merged the padding chunk with neighboring chunks, so
1602     // it may have vanished at this point. Do not reference the padding
1603     // chunk beyond this point.
1604   }
1605 
1606   assert(top() == target_top, "Sanity");
1607 
1608 } // allocate_padding_chunks_until_top_is_at()
1609 
1610 // Allocates the chunk from the virtual space only.
1611 // This interface is also used internally for debugging.  Not all
1612 // chunks removed here are necessarily used for allocation.
1613 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1614   // Non-humongous chunks are to be allocated aligned to their chunk
1615   // size. So, start addresses of medium chunks are aligned to medium
1616   // chunk size, those of small chunks to small chunk size and so
1617   // forth. This facilitates merging of free chunks and reduces
1618   // fragmentation. Chunk sizes are spec < small < medium, with each
1619   // larger chunk size being a multiple of the next smaller chunk
1620   // size.
1621   // Because of this alignment, me may need to create a number of padding
1622   // chunks. These chunks are created and added to the freelist.
1623 
1624   // The chunk manager to which we will give our padding chunks.
1625   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1626 
1627   // shorthands
1628   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1629   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1630   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1631 
1632   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1633          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1634 
1635   // Chunk alignment (in bytes) == chunk size unless humongous.
1636   // Humongous chunks are aligned to the smallest chunk size (spec).
1637   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1638                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1639 
1640   // Do we have enough space to create the requested chunk plus
1641   // any padding chunks needed?
1642   MetaWord* const next_aligned =
1643     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1644   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1645     return NULL;
1646   }
1647 
1648   // Before allocating the requested chunk, allocate padding chunks if necessary.
1649   // We only need to do this for small or medium chunks: specialized chunks are the
1650   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1651   // (implicitly, also aligned to smallest chunk size).
1652   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1653     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1654         (is_class() ? "class space " : "metaspace"),
1655         top(), next_aligned);
1656     allocate_padding_chunks_until_top_is_at(next_aligned);
1657     // Now, top should be aligned correctly.
1658     assert_is_aligned(top(), required_chunk_alignment);
1659   }
1660 
1661   // Now, top should be aligned correctly.
1662   assert_is_aligned(top(), required_chunk_alignment);
1663 
1664   // Bottom of the new chunk
1665   MetaWord* chunk_limit = top();
1666   assert(chunk_limit != NULL, "Not safe to call this method");
1667 
1668   // The virtual spaces are always expanded by the
1669   // commit granularity to enforce the following condition.
1670   // Without this the is_available check will not work correctly.
1671   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1672       "The committed memory doesn't match the expanded memory.");
1673 
1674   if (!is_available(chunk_word_size)) {
1675     LogTarget(Debug, gc, metaspace, freelist) lt;
1676     if (lt.is_enabled()) {
1677       LogStream ls(lt);
1678       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1679       // Dump some information about the virtual space that is nearly full
1680       print_on(&ls);
1681     }
1682     return NULL;
1683   }
1684 
1685   // Take the space  (bump top on the current virtual space).
1686   inc_top(chunk_word_size);
1687 
1688   // Initialize the chunk
1689   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1690   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1691   assert(result == (Metachunk*)chunk_limit, "Sanity");
1692   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1693   do_update_in_use_info_for_chunk(result, true);
1694 
1695   inc_container_count();
1696 
1697   DEBUG_ONLY(chunk_manager->locked_verify());
1698   DEBUG_ONLY(this->verify());
1699   DEBUG_ONLY(do_verify_chunk(result));
1700 
1701   result->inc_use_count();
1702 
1703   return result;
1704 }
1705 
1706 
1707 // Expand the virtual space (commit more of the reserved space)
1708 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1709   size_t min_bytes = min_words * BytesPerWord;
1710   size_t preferred_bytes = preferred_words * BytesPerWord;
1711 
1712   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1713 
1714   if (uncommitted < min_bytes) {
1715     return false;
1716   }
1717 
1718   size_t commit = MIN2(preferred_bytes, uncommitted);
1719   bool result = virtual_space()->expand_by(commit, false);
1720 
1721   if (result) {
1722     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1723               (is_class() ? "class" : "non-class"), commit);
1724   } else {
1725     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1726               (is_class() ? "class" : "non-class"), commit);
1727   }
1728 
1729   assert(result, "Failed to commit memory");
1730 
1731   return result;
1732 }
1733 
1734 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1735   assert_lock_strong(SpaceManager::expand_lock());
1736   Metachunk* result = take_from_committed(chunk_word_size);
1737   return result;
1738 }
1739 
1740 bool VirtualSpaceNode::initialize() {
1741 
1742   if (!_rs.is_reserved()) {
1743     return false;
1744   }
1745 
1746   // These are necessary restriction to make sure that the virtual space always
1747   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1748   // aligned only the middle alignment of the VirtualSpace is used.
1749   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1750   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1751 
1752   // ReservedSpaces marked as special will have the entire memory
1753   // pre-committed. Setting a committed size will make sure that
1754   // committed_size and actual_committed_size agrees.
1755   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1756 
1757   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1758                                             Metaspace::commit_alignment());
1759   if (result) {
1760     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1761         "Checking that the pre-committed memory was registered by the VirtualSpace");
1762 
1763     set_top((MetaWord*)virtual_space()->low());
1764     set_reserved(MemRegion((HeapWord*)_rs.base(),
1765                  (HeapWord*)(_rs.base() + _rs.size())));
1766 
1767     assert(reserved()->start() == (HeapWord*) _rs.base(),
1768            "Reserved start was not set properly " PTR_FORMAT
1769            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1770     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1771            "Reserved size was not set properly " SIZE_FORMAT
1772            " != " SIZE_FORMAT, reserved()->word_size(),
1773            _rs.size() / BytesPerWord);
1774   }
1775 
1776   // Initialize Occupancy Map.
1777   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1778   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1779 
1780   return result;
1781 }
1782 
1783 void VirtualSpaceNode::print_on(outputStream* st) const {
1784   size_t used = used_words_in_vs();
1785   size_t capacity = capacity_words_in_vs();
1786   VirtualSpace* vs = virtual_space();
1787   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1788            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1789            PTR_FORMAT ", " PTR_FORMAT ")",
1790            p2i(vs), capacity / K,
1791            capacity == 0 ? 0 : used * 100 / capacity,
1792            p2i(bottom()), p2i(top()), p2i(end()),
1793            p2i(vs->high_boundary()));
1794 }
1795 
1796 #ifdef ASSERT
1797 void VirtualSpaceNode::mangle() {
1798   size_t word_size = capacity_words_in_vs();
1799   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1800 }
1801 #endif // ASSERT
1802 
1803 // VirtualSpaceList methods
1804 // Space allocated from the VirtualSpace
1805 
1806 VirtualSpaceList::~VirtualSpaceList() {
1807   VirtualSpaceListIterator iter(virtual_space_list());
1808   while (iter.repeat()) {
1809     VirtualSpaceNode* vsl = iter.get_next();
1810     delete vsl;
1811   }
1812 }
1813 
1814 void VirtualSpaceList::inc_reserved_words(size_t v) {
1815   assert_lock_strong(SpaceManager::expand_lock());
1816   _reserved_words = _reserved_words + v;
1817 }
1818 void VirtualSpaceList::dec_reserved_words(size_t v) {
1819   assert_lock_strong(SpaceManager::expand_lock());
1820   _reserved_words = _reserved_words - v;
1821 }
1822 
1823 #define assert_committed_below_limit()                        \
1824   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1825          "Too much committed memory. Committed: " SIZE_FORMAT \
1826          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1827          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1828 
1829 void VirtualSpaceList::inc_committed_words(size_t v) {
1830   assert_lock_strong(SpaceManager::expand_lock());
1831   _committed_words = _committed_words + v;
1832 
1833   assert_committed_below_limit();
1834 }
1835 void VirtualSpaceList::dec_committed_words(size_t v) {
1836   assert_lock_strong(SpaceManager::expand_lock());
1837   _committed_words = _committed_words - v;
1838 
1839   assert_committed_below_limit();
1840 }
1841 
1842 void VirtualSpaceList::inc_virtual_space_count() {
1843   assert_lock_strong(SpaceManager::expand_lock());
1844   _virtual_space_count++;
1845 }
1846 void VirtualSpaceList::dec_virtual_space_count() {
1847   assert_lock_strong(SpaceManager::expand_lock());
1848   _virtual_space_count--;
1849 }
1850 
1851 void ChunkManager::remove_chunk(Metachunk* chunk) {
1852   size_t word_size = chunk->word_size();
1853   ChunkIndex index = list_index(word_size);
1854   if (index != HumongousIndex) {
1855     free_chunks(index)->remove_chunk(chunk);
1856   } else {
1857     humongous_dictionary()->remove_chunk(chunk);
1858   }
1859 
1860   // Chunk has been removed from the chunks free list, update counters.
1861   account_for_removed_chunk(chunk);
1862 }
1863 
1864 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1865   assert_lock_strong(SpaceManager::expand_lock());
1866   assert(chunk != NULL, "invalid chunk pointer");
1867   // Check for valid merge combinations.
1868   assert((chunk->get_chunk_type() == SpecializedIndex &&
1869           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1870          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1871         "Invalid chunk merge combination.");
1872 
1873   const size_t target_chunk_word_size =
1874     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1875 
1876   // [ prospective merge region )
1877   MetaWord* const p_merge_region_start =
1878     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1879   MetaWord* const p_merge_region_end =
1880     p_merge_region_start + target_chunk_word_size;
1881 
1882   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1883   VirtualSpaceNode* const vsn = chunk->container();
1884   OccupancyMap* const ocmap = vsn->occupancy_map();
1885 
1886   // The prospective chunk merge range must be completely contained by the
1887   // committed range of the virtual space node.
1888   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1889     return false;
1890   }
1891 
1892   // Only attempt to merge this range if at its start a chunk starts and at its end
1893   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1894   // of that range, we cannot merge.
1895   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1896     return false;
1897   }
1898   if (p_merge_region_end < vsn->top() &&
1899       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1900     return false;
1901   }
1902 
1903   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1904   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1905     return false;
1906   }
1907 
1908   // Success! Remove all chunks in this region...
1909   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1910     (is_class() ? "class space" : "metaspace"),
1911     p_merge_region_start, p_merge_region_end);
1912 
1913   const int num_chunks_removed =
1914     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1915 
1916   // ... and create a single new bigger chunk.
1917   Metachunk* const p_new_chunk =
1918       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1919   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1920   p_new_chunk->set_origin(origin_merge);
1921 
1922   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1923     (is_class() ? "class space" : "metaspace"),
1924     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1925 
1926   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1927   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1928   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1929 
1930   // Mark chunk as free. Note: it is not necessary to update the occupancy
1931   // map in-use map, because the old chunks were also free, so nothing
1932   // should have changed.
1933   p_new_chunk->set_is_tagged_free(true);
1934 
1935   // Add new chunk to its freelist.
1936   ChunkList* const list = free_chunks(target_chunk_type);
1937   list->return_chunk_at_head(p_new_chunk);
1938 
1939   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1940   // should not have changed, because the size of the space should be the same)
1941   _free_chunks_count -= num_chunks_removed;
1942   _free_chunks_count ++;
1943 
1944   // VirtualSpaceNode::container_count does not have to be modified:
1945   // it means "number of active (non-free) chunks", so merging free chunks
1946   // should not affect that count.
1947 
1948   // At the end of a chunk merge, run verification tests.
1949   DEBUG_ONLY(this->locked_verify());
1950   DEBUG_ONLY(vsn->verify());
1951 
1952   return true;
1953 }
1954 
1955 // Remove all chunks in the given area - the chunks are supposed to be free -
1956 // from their corresponding freelists. Mark them as invalid.
1957 // - This does not correct the occupancy map.
1958 // - This does not adjust the counters in ChunkManager.
1959 // - Does not adjust container count counter in containing VirtualSpaceNode
1960 // Returns number of chunks removed.
1961 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1962   assert(p != NULL && word_size > 0, "Invalid range.");
1963   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1964   assert_is_aligned(word_size, smallest_chunk_size);
1965 
1966   Metachunk* const start = (Metachunk*) p;
1967   const Metachunk* const end = (Metachunk*)(p + word_size);
1968   Metachunk* cur = start;
1969   int num_removed = 0;
1970   while (cur < end) {
1971     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1972     DEBUG_ONLY(do_verify_chunk(cur));
1973     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1974     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1975     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1976       (is_class() ? "class space" : "metaspace"),
1977       cur, cur->word_size() * sizeof(MetaWord));
1978     cur->remove_sentinel();
1979     // Note: cannot call ChunkManager::remove_chunk, because that
1980     // modifies the counters in ChunkManager, which we do not want. So
1981     // we call remove_chunk on the freelist directly (see also the
1982     // splitting function which does the same).
1983     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1984     list->remove_chunk(cur);
1985     num_removed ++;
1986     cur = next;
1987   }
1988   return num_removed;
1989 }
1990 
1991 // Walk the list of VirtualSpaceNodes and delete
1992 // nodes with a 0 container_count.  Remove Metachunks in
1993 // the node from their respective freelists.
1994 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1995   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1996   assert_lock_strong(SpaceManager::expand_lock());
1997   // Don't use a VirtualSpaceListIterator because this
1998   // list is being changed and a straightforward use of an iterator is not safe.
1999   VirtualSpaceNode* purged_vsl = NULL;
2000   VirtualSpaceNode* prev_vsl = virtual_space_list();
2001   VirtualSpaceNode* next_vsl = prev_vsl;
2002   while (next_vsl != NULL) {
2003     VirtualSpaceNode* vsl = next_vsl;
2004     DEBUG_ONLY(vsl->verify_container_count();)
2005     next_vsl = vsl->next();
2006     // Don't free the current virtual space since it will likely
2007     // be needed soon.
2008     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2009       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2010                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2011       // Unlink it from the list
2012       if (prev_vsl == vsl) {
2013         // This is the case of the current node being the first node.
2014         assert(vsl == virtual_space_list(), "Expected to be the first node");
2015         set_virtual_space_list(vsl->next());
2016       } else {
2017         prev_vsl->set_next(vsl->next());
2018       }
2019 
2020       vsl->purge(chunk_manager);
2021       dec_reserved_words(vsl->reserved_words());
2022       dec_committed_words(vsl->committed_words());
2023       dec_virtual_space_count();
2024       purged_vsl = vsl;
2025       delete vsl;
2026     } else {
2027       prev_vsl = vsl;
2028     }
2029   }
2030 #ifdef ASSERT
2031   if (purged_vsl != NULL) {
2032     // List should be stable enough to use an iterator here.
2033     VirtualSpaceListIterator iter(virtual_space_list());
2034     while (iter.repeat()) {
2035       VirtualSpaceNode* vsl = iter.get_next();
2036       assert(vsl != purged_vsl, "Purge of vsl failed");
2037     }
2038   }
2039 #endif
2040 }
2041 
2042 
2043 // This function looks at the mmap regions in the metaspace without locking.
2044 // The chunks are added with store ordering and not deleted except for at
2045 // unloading time during a safepoint.
2046 bool VirtualSpaceList::contains(const void* ptr) {
2047   // List should be stable enough to use an iterator here because removing virtual
2048   // space nodes is only allowed at a safepoint.
2049   VirtualSpaceListIterator iter(virtual_space_list());
2050   while (iter.repeat()) {
2051     VirtualSpaceNode* vsn = iter.get_next();
2052     if (vsn->contains(ptr)) {
2053       return true;
2054     }
2055   }
2056   return false;
2057 }
2058 
2059 void VirtualSpaceList::retire_current_virtual_space() {
2060   assert_lock_strong(SpaceManager::expand_lock());
2061 
2062   VirtualSpaceNode* vsn = current_virtual_space();
2063 
2064   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2065                                   Metaspace::chunk_manager_metadata();
2066 
2067   vsn->retire(cm);
2068 }
2069 
2070 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2071   DEBUG_ONLY(verify_container_count();)
2072   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2073   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2074     ChunkIndex index = (ChunkIndex)i;
2075     size_t chunk_size = chunk_manager->size_by_index(index);
2076 
2077     while (free_words_in_vs() >= chunk_size) {
2078       Metachunk* chunk = get_chunk_vs(chunk_size);
2079       // Chunk will be allocated aligned, so allocation may require
2080       // additional padding chunks. That may cause above allocation to
2081       // fail. Just ignore the failed allocation and continue with the
2082       // next smaller chunk size. As the VirtualSpaceNode comitted
2083       // size should be a multiple of the smallest chunk size, we
2084       // should always be able to fill the VirtualSpace completely.
2085       if (chunk == NULL) {
2086         break;
2087       }
2088       chunk_manager->return_single_chunk(index, chunk);
2089     }
2090     DEBUG_ONLY(verify_container_count();)
2091   }
2092   assert(free_words_in_vs() == 0, "should be empty now");
2093 }
2094 
2095 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2096                                    _is_class(false),
2097                                    _virtual_space_list(NULL),
2098                                    _current_virtual_space(NULL),
2099                                    _reserved_words(0),
2100                                    _committed_words(0),
2101                                    _virtual_space_count(0) {
2102   MutexLockerEx cl(SpaceManager::expand_lock(),
2103                    Mutex::_no_safepoint_check_flag);
2104   create_new_virtual_space(word_size);
2105 }
2106 
2107 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2108                                    _is_class(true),
2109                                    _virtual_space_list(NULL),
2110                                    _current_virtual_space(NULL),
2111                                    _reserved_words(0),
2112                                    _committed_words(0),
2113                                    _virtual_space_count(0) {
2114   MutexLockerEx cl(SpaceManager::expand_lock(),
2115                    Mutex::_no_safepoint_check_flag);
2116   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2117   bool succeeded = class_entry->initialize();
2118   if (succeeded) {
2119     link_vs(class_entry);
2120   }
2121 }
2122 
2123 size_t VirtualSpaceList::free_bytes() {
2124   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2125 }
2126 
2127 // Allocate another meta virtual space and add it to the list.
2128 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2129   assert_lock_strong(SpaceManager::expand_lock());
2130 
2131   if (is_class()) {
2132     assert(false, "We currently don't support more than one VirtualSpace for"
2133                   " the compressed class space. The initialization of the"
2134                   " CCS uses another code path and should not hit this path.");
2135     return false;
2136   }
2137 
2138   if (vs_word_size == 0) {
2139     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2140     return false;
2141   }
2142 
2143   // Reserve the space
2144   size_t vs_byte_size = vs_word_size * BytesPerWord;
2145   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2146 
2147   // Allocate the meta virtual space and initialize it.
2148   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2149   if (!new_entry->initialize()) {
2150     delete new_entry;
2151     return false;
2152   } else {
2153     assert(new_entry->reserved_words() == vs_word_size,
2154         "Reserved memory size differs from requested memory size");
2155     // ensure lock-free iteration sees fully initialized node
2156     OrderAccess::storestore();
2157     link_vs(new_entry);
2158     return true;
2159   }
2160 }
2161 
2162 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2163   if (virtual_space_list() == NULL) {
2164       set_virtual_space_list(new_entry);
2165   } else {
2166     current_virtual_space()->set_next(new_entry);
2167   }
2168   set_current_virtual_space(new_entry);
2169   inc_reserved_words(new_entry->reserved_words());
2170   inc_committed_words(new_entry->committed_words());
2171   inc_virtual_space_count();
2172 #ifdef ASSERT
2173   new_entry->mangle();
2174 #endif
2175   LogTarget(Trace, gc, metaspace) lt;
2176   if (lt.is_enabled()) {
2177     LogStream ls(lt);
2178     VirtualSpaceNode* vsl = current_virtual_space();
2179     ResourceMark rm;
2180     vsl->print_on(&ls);
2181   }
2182 }
2183 
2184 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2185                                       size_t min_words,
2186                                       size_t preferred_words) {
2187   size_t before = node->committed_words();
2188 
2189   bool result = node->expand_by(min_words, preferred_words);
2190 
2191   size_t after = node->committed_words();
2192 
2193   // after and before can be the same if the memory was pre-committed.
2194   assert(after >= before, "Inconsistency");
2195   inc_committed_words(after - before);
2196 
2197   return result;
2198 }
2199 
2200 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2201   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2202   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2203   assert(min_words <= preferred_words, "Invalid arguments");
2204 
2205   const char* const class_or_not = (is_class() ? "class" : "non-class");
2206 
2207   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2208     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2209               class_or_not);
2210     return  false;
2211   }
2212 
2213   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2214   if (allowed_expansion_words < min_words) {
2215     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2216               class_or_not);
2217     return false;
2218   }
2219 
2220   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2221 
2222   // Commit more memory from the the current virtual space.
2223   bool vs_expanded = expand_node_by(current_virtual_space(),
2224                                     min_words,
2225                                     max_expansion_words);
2226   if (vs_expanded) {
2227      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2228                class_or_not);
2229      return true;
2230   }
2231   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2232             class_or_not);
2233   retire_current_virtual_space();
2234 
2235   // Get another virtual space.
2236   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2237   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2238 
2239   if (create_new_virtual_space(grow_vs_words)) {
2240     if (current_virtual_space()->is_pre_committed()) {
2241       // The memory was pre-committed, so we are done here.
2242       assert(min_words <= current_virtual_space()->committed_words(),
2243           "The new VirtualSpace was pre-committed, so it"
2244           "should be large enough to fit the alloc request.");
2245       return true;
2246     }
2247 
2248     return expand_node_by(current_virtual_space(),
2249                           min_words,
2250                           max_expansion_words);
2251   }
2252 
2253   return false;
2254 }
2255 
2256 // Given a chunk, calculate the largest possible padding space which
2257 // could be required when allocating it.
2258 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2259   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2260   if (chunk_type != HumongousIndex) {
2261     // Normal, non-humongous chunks are allocated at chunk size
2262     // boundaries, so the largest padding space required would be that
2263     // minus the smallest chunk size.
2264     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2265     return chunk_word_size - smallest_chunk_size;
2266   } else {
2267     // Humongous chunks are allocated at smallest-chunksize
2268     // boundaries, so there is no padding required.
2269     return 0;
2270   }
2271 }
2272 
2273 
2274 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2275 
2276   // Allocate a chunk out of the current virtual space.
2277   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2278 
2279   if (next != NULL) {
2280     return next;
2281   }
2282 
2283   // The expand amount is currently only determined by the requested sizes
2284   // and not how much committed memory is left in the current virtual space.
2285 
2286   // We must have enough space for the requested size and any
2287   // additional reqired padding chunks.
2288   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2289 
2290   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2291   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2292   if (min_word_size >= preferred_word_size) {
2293     // Can happen when humongous chunks are allocated.
2294     preferred_word_size = min_word_size;
2295   }
2296 
2297   bool expanded = expand_by(min_word_size, preferred_word_size);
2298   if (expanded) {
2299     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2300     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2301   }
2302 
2303    return next;
2304 }
2305 
2306 void VirtualSpaceList::print_on(outputStream* st) const {
2307   VirtualSpaceListIterator iter(virtual_space_list());
2308   while (iter.repeat()) {
2309     VirtualSpaceNode* node = iter.get_next();
2310     node->print_on(st);
2311   }
2312 }
2313 
2314 void VirtualSpaceList::print_map(outputStream* st) const {
2315   VirtualSpaceNode* list = virtual_space_list();
2316   VirtualSpaceListIterator iter(list);
2317   unsigned i = 0;
2318   while (iter.repeat()) {
2319     st->print_cr("Node %u:", i);
2320     VirtualSpaceNode* node = iter.get_next();
2321     node->print_map(st, this->is_class());
2322     i ++;
2323   }
2324 }
2325 
2326 // MetaspaceGC methods
2327 
2328 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2329 // Within the VM operation after the GC the attempt to allocate the metadata
2330 // should succeed.  If the GC did not free enough space for the metaspace
2331 // allocation, the HWM is increased so that another virtualspace will be
2332 // allocated for the metadata.  With perm gen the increase in the perm
2333 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2334 // metaspace policy uses those as the small and large steps for the HWM.
2335 //
2336 // After the GC the compute_new_size() for MetaspaceGC is called to
2337 // resize the capacity of the metaspaces.  The current implementation
2338 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2339 // to resize the Java heap by some GC's.  New flags can be implemented
2340 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2341 // free space is desirable in the metaspace capacity to decide how much
2342 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2343 // free space is desirable in the metaspace capacity before decreasing
2344 // the HWM.
2345 
2346 // Calculate the amount to increase the high water mark (HWM).
2347 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2348 // another expansion is not requested too soon.  If that is not
2349 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2350 // If that is still not enough, expand by the size of the allocation
2351 // plus some.
2352 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2353   size_t min_delta = MinMetaspaceExpansion;
2354   size_t max_delta = MaxMetaspaceExpansion;
2355   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2356 
2357   if (delta <= min_delta) {
2358     delta = min_delta;
2359   } else if (delta <= max_delta) {
2360     // Don't want to hit the high water mark on the next
2361     // allocation so make the delta greater than just enough
2362     // for this allocation.
2363     delta = max_delta;
2364   } else {
2365     // This allocation is large but the next ones are probably not
2366     // so increase by the minimum.
2367     delta = delta + min_delta;
2368   }
2369 
2370   assert_is_aligned(delta, Metaspace::commit_alignment());
2371 
2372   return delta;
2373 }
2374 
2375 size_t MetaspaceGC::capacity_until_GC() {
2376   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2377   assert(value >= MetaspaceSize, "Not initialized properly?");
2378   return value;
2379 }
2380 
2381 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2382   assert_is_aligned(v, Metaspace::commit_alignment());
2383 
2384   intptr_t capacity_until_GC = _capacity_until_GC;
2385   intptr_t new_value = capacity_until_GC + v;
2386 
2387   if (new_value < capacity_until_GC) {
2388     // The addition wrapped around, set new_value to aligned max value.
2389     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2390   }
2391 
2392   intptr_t expected = _capacity_until_GC;
2393   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2394 
2395   if (expected != actual) {
2396     return false;
2397   }
2398 
2399   if (new_cap_until_GC != NULL) {
2400     *new_cap_until_GC = new_value;
2401   }
2402   if (old_cap_until_GC != NULL) {
2403     *old_cap_until_GC = capacity_until_GC;
2404   }
2405   return true;
2406 }
2407 
2408 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2409   assert_is_aligned(v, Metaspace::commit_alignment());
2410 
2411   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2412 }
2413 
2414 void MetaspaceGC::initialize() {
2415   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2416   // we can't do a GC during initialization.
2417   _capacity_until_GC = MaxMetaspaceSize;
2418 }
2419 
2420 void MetaspaceGC::post_initialize() {
2421   // Reset the high-water mark once the VM initialization is done.
2422   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
2423 }
2424 
2425 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2426   // Check if the compressed class space is full.
2427   if (is_class && Metaspace::using_class_space()) {
2428     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
2429     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2430       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2431                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2432       return false;
2433     }
2434   }
2435 
2436   // Check if the user has imposed a limit on the metaspace memory.
2437   size_t committed_bytes = MetaspaceAux::committed_bytes();
2438   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2439     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2440               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2441     return false;
2442   }
2443 
2444   return true;
2445 }
2446 
2447 size_t MetaspaceGC::allowed_expansion() {
2448   size_t committed_bytes = MetaspaceAux::committed_bytes();
2449   size_t capacity_until_gc = capacity_until_GC();
2450 
2451   assert(capacity_until_gc >= committed_bytes,
2452          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2453          capacity_until_gc, committed_bytes);
2454 
2455   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2456   size_t left_until_GC = capacity_until_gc - committed_bytes;
2457   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2458   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2459             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2460             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2461 
2462   return left_to_commit / BytesPerWord;
2463 }
2464 
2465 void MetaspaceGC::compute_new_size() {
2466   assert(_shrink_factor <= 100, "invalid shrink factor");
2467   uint current_shrink_factor = _shrink_factor;
2468   _shrink_factor = 0;
2469 
2470   // Using committed_bytes() for used_after_gc is an overestimation, since the
2471   // chunk free lists are included in committed_bytes() and the memory in an
2472   // un-fragmented chunk free list is available for future allocations.
2473   // However, if the chunk free lists becomes fragmented, then the memory may
2474   // not be available for future allocations and the memory is therefore "in use".
2475   // Including the chunk free lists in the definition of "in use" is therefore
2476   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2477   // shrink below committed_bytes() and this has caused serious bugs in the past.
2478   const size_t used_after_gc = MetaspaceAux::committed_bytes();
2479   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2480 
2481   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2482   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2483 
2484   const double min_tmp = used_after_gc / maximum_used_percentage;
2485   size_t minimum_desired_capacity =
2486     (size_t)MIN2(min_tmp, double(max_uintx));
2487   // Don't shrink less than the initial generation size
2488   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2489                                   MetaspaceSize);
2490 
2491   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2492   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2493                            minimum_free_percentage, maximum_used_percentage);
2494   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2495 
2496 
2497   size_t shrink_bytes = 0;
2498   if (capacity_until_GC < minimum_desired_capacity) {
2499     // If we have less capacity below the metaspace HWM, then
2500     // increment the HWM.
2501     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2502     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2503     // Don't expand unless it's significant
2504     if (expand_bytes >= MinMetaspaceExpansion) {
2505       size_t new_capacity_until_GC = 0;
2506       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2507       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2508 
2509       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2510                                                new_capacity_until_GC,
2511                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2512       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2513                                minimum_desired_capacity / (double) K,
2514                                expand_bytes / (double) K,
2515                                MinMetaspaceExpansion / (double) K,
2516                                new_capacity_until_GC / (double) K);
2517     }
2518     return;
2519   }
2520 
2521   // No expansion, now see if we want to shrink
2522   // We would never want to shrink more than this
2523   assert(capacity_until_GC >= minimum_desired_capacity,
2524          SIZE_FORMAT " >= " SIZE_FORMAT,
2525          capacity_until_GC, minimum_desired_capacity);
2526   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2527 
2528   // Should shrinking be considered?
2529   if (MaxMetaspaceFreeRatio < 100) {
2530     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2531     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2532     const double max_tmp = used_after_gc / minimum_used_percentage;
2533     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2534     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2535                                     MetaspaceSize);
2536     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2537                              maximum_free_percentage, minimum_used_percentage);
2538     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2539                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2540 
2541     assert(minimum_desired_capacity <= maximum_desired_capacity,
2542            "sanity check");
2543 
2544     if (capacity_until_GC > maximum_desired_capacity) {
2545       // Capacity too large, compute shrinking size
2546       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2547       // We don't want shrink all the way back to initSize if people call
2548       // System.gc(), because some programs do that between "phases" and then
2549       // we'd just have to grow the heap up again for the next phase.  So we
2550       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2551       // on the third call, and 100% by the fourth call.  But if we recompute
2552       // size without shrinking, it goes back to 0%.
2553       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2554 
2555       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2556 
2557       assert(shrink_bytes <= max_shrink_bytes,
2558              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2559              shrink_bytes, max_shrink_bytes);
2560       if (current_shrink_factor == 0) {
2561         _shrink_factor = 10;
2562       } else {
2563         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2564       }
2565       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2566                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2567       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2568                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2569     }
2570   }
2571 
2572   // Don't shrink unless it's significant
2573   if (shrink_bytes >= MinMetaspaceExpansion &&
2574       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2575     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2576     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2577                                              new_capacity_until_GC,
2578                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2579   }
2580 }
2581 
2582 // Metadebug methods
2583 
2584 void Metadebug::init_allocation_fail_alot_count() {
2585   if (MetadataAllocationFailALot) {
2586     _allocation_fail_alot_count =
2587       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2588   }
2589 }
2590 
2591 #ifdef ASSERT
2592 bool Metadebug::test_metadata_failure() {
2593   if (MetadataAllocationFailALot &&
2594       Threads::is_vm_complete()) {
2595     if (_allocation_fail_alot_count > 0) {
2596       _allocation_fail_alot_count--;
2597     } else {
2598       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2599       init_allocation_fail_alot_count();
2600       return true;
2601     }
2602   }
2603   return false;
2604 }
2605 #endif
2606 
2607 // ChunkManager methods
2608 size_t ChunkManager::free_chunks_total_words() {
2609   return _free_chunks_total;
2610 }
2611 
2612 size_t ChunkManager::free_chunks_total_bytes() {
2613   return free_chunks_total_words() * BytesPerWord;
2614 }
2615 
2616 // Update internal accounting after a chunk was added
2617 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2618   assert_lock_strong(SpaceManager::expand_lock());
2619   _free_chunks_count ++;
2620   _free_chunks_total += c->word_size();
2621 }
2622 
2623 // Update internal accounting after a chunk was removed
2624 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2625   assert_lock_strong(SpaceManager::expand_lock());
2626   assert(_free_chunks_count >= 1,
2627     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2628   assert(_free_chunks_total >= c->word_size(),
2629     "ChunkManager::_free_chunks_total: about to go negative"
2630      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2631   _free_chunks_count --;
2632   _free_chunks_total -= c->word_size();
2633 }
2634 
2635 size_t ChunkManager::free_chunks_count() {
2636 #ifdef ASSERT
2637   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2638     MutexLockerEx cl(SpaceManager::expand_lock(),
2639                      Mutex::_no_safepoint_check_flag);
2640     // This lock is only needed in debug because the verification
2641     // of the _free_chunks_totals walks the list of free chunks
2642     slow_locked_verify_free_chunks_count();
2643   }
2644 #endif
2645   return _free_chunks_count;
2646 }
2647 
2648 ChunkIndex ChunkManager::list_index(size_t size) {
2649   if (size_by_index(SpecializedIndex) == size) {
2650     return SpecializedIndex;
2651   }
2652   if (size_by_index(SmallIndex) == size) {
2653     return SmallIndex;
2654   }
2655   const size_t med_size = size_by_index(MediumIndex);
2656   if (med_size == size) {
2657     return MediumIndex;
2658   }
2659 
2660   assert(size > med_size, "Not a humongous chunk");
2661   return HumongousIndex;
2662 }
2663 
2664 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2665   index_bounds_check(index);
2666   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2667   return _free_chunks[index].size();
2668 }
2669 
2670 void ChunkManager::locked_verify_free_chunks_total() {
2671   assert_lock_strong(SpaceManager::expand_lock());
2672   assert(sum_free_chunks() == _free_chunks_total,
2673          "_free_chunks_total " SIZE_FORMAT " is not the"
2674          " same as sum " SIZE_FORMAT, _free_chunks_total,
2675          sum_free_chunks());
2676 }
2677 
2678 void ChunkManager::verify_free_chunks_total() {
2679   MutexLockerEx cl(SpaceManager::expand_lock(),
2680                      Mutex::_no_safepoint_check_flag);
2681   locked_verify_free_chunks_total();
2682 }
2683 
2684 void ChunkManager::locked_verify_free_chunks_count() {
2685   assert_lock_strong(SpaceManager::expand_lock());
2686   assert(sum_free_chunks_count() == _free_chunks_count,
2687          "_free_chunks_count " SIZE_FORMAT " is not the"
2688          " same as sum " SIZE_FORMAT, _free_chunks_count,
2689          sum_free_chunks_count());
2690 }
2691 
2692 void ChunkManager::verify_free_chunks_count() {
2693 #ifdef ASSERT
2694   MutexLockerEx cl(SpaceManager::expand_lock(),
2695                      Mutex::_no_safepoint_check_flag);
2696   locked_verify_free_chunks_count();
2697 #endif
2698 }
2699 
2700 void ChunkManager::verify() {
2701   MutexLockerEx cl(SpaceManager::expand_lock(),
2702                      Mutex::_no_safepoint_check_flag);
2703   locked_verify();
2704 }
2705 
2706 void ChunkManager::locked_verify() {
2707   locked_verify_free_chunks_count();
2708   locked_verify_free_chunks_total();
2709   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2710     ChunkList* list = free_chunks(i);
2711     if (list != NULL) {
2712       Metachunk* chunk = list->head();
2713       while (chunk) {
2714         DEBUG_ONLY(do_verify_chunk(chunk);)
2715         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2716         chunk = chunk->next();
2717       }
2718     }
2719   }
2720 }
2721 
2722 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2723   assert_lock_strong(SpaceManager::expand_lock());
2724   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2725                 _free_chunks_total, _free_chunks_count);
2726 }
2727 
2728 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2729   assert_lock_strong(SpaceManager::expand_lock());
2730   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2731                 sum_free_chunks(), sum_free_chunks_count());
2732 }
2733 
2734 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2735   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2736          "Bad index: %d", (int)index);
2737 
2738   return &_free_chunks[index];
2739 }
2740 
2741 // These methods that sum the free chunk lists are used in printing
2742 // methods that are used in product builds.
2743 size_t ChunkManager::sum_free_chunks() {
2744   assert_lock_strong(SpaceManager::expand_lock());
2745   size_t result = 0;
2746   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2747     ChunkList* list = free_chunks(i);
2748 
2749     if (list == NULL) {
2750       continue;
2751     }
2752 
2753     result = result + list->count() * list->size();
2754   }
2755   result = result + humongous_dictionary()->total_size();
2756   return result;
2757 }
2758 
2759 size_t ChunkManager::sum_free_chunks_count() {
2760   assert_lock_strong(SpaceManager::expand_lock());
2761   size_t count = 0;
2762   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2763     ChunkList* list = free_chunks(i);
2764     if (list == NULL) {
2765       continue;
2766     }
2767     count = count + list->count();
2768   }
2769   count = count + humongous_dictionary()->total_free_blocks();
2770   return count;
2771 }
2772 
2773 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2774   ChunkIndex index = list_index(word_size);
2775   assert(index < HumongousIndex, "No humongous list");
2776   return free_chunks(index);
2777 }
2778 
2779 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2780 // split up the larger chunk into n smaller chunks, at least one of which should be
2781 // the target chunk of target chunk size. The smaller chunks, including the target
2782 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2783 // Note that this chunk is supposed to be removed from the freelist right away.
2784 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2785   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2786 
2787   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2788   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2789 
2790   MetaWord* const region_start = (MetaWord*)larger_chunk;
2791   const size_t region_word_len = larger_chunk->word_size();
2792   MetaWord* const region_end = region_start + region_word_len;
2793   VirtualSpaceNode* const vsn = larger_chunk->container();
2794   OccupancyMap* const ocmap = vsn->occupancy_map();
2795 
2796   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2797   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2798   // at an address suitable to place the smaller target chunk.
2799   assert_is_aligned(region_start, target_chunk_word_size);
2800 
2801   // Remove old chunk.
2802   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2803   larger_chunk->remove_sentinel();
2804 
2805   // Prevent access to the old chunk from here on.
2806   larger_chunk = NULL;
2807   // ... and wipe it.
2808   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2809 
2810   // In its place create first the target chunk...
2811   MetaWord* p = region_start;
2812   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2813   assert(target_chunk == (Metachunk*)p, "Sanity");
2814   target_chunk->set_origin(origin_split);
2815 
2816   // Note: we do not need to mark its start in the occupancy map
2817   // because it coincides with the old chunk start.
2818 
2819   // Mark chunk as free and return to the freelist.
2820   do_update_in_use_info_for_chunk(target_chunk, false);
2821   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2822 
2823   // This chunk should now be valid and can be verified.
2824   DEBUG_ONLY(do_verify_chunk(target_chunk));
2825 
2826   // In the remaining space create the remainder chunks.
2827   p += target_chunk->word_size();
2828   assert(p < region_end, "Sanity");
2829 
2830   while (p < region_end) {
2831 
2832     // Find the largest chunk size which fits the alignment requirements at address p.
2833     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2834     size_t this_chunk_word_size = 0;
2835     for(;;) {
2836       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2837       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2838         break;
2839       } else {
2840         this_chunk_index = prev_chunk_index(this_chunk_index);
2841         assert(this_chunk_index >= target_chunk_index, "Sanity");
2842       }
2843     }
2844 
2845     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2846     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2847     assert(p + this_chunk_word_size <= region_end, "Sanity");
2848 
2849     // Create splitting chunk.
2850     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2851     assert(this_chunk == (Metachunk*)p, "Sanity");
2852     this_chunk->set_origin(origin_split);
2853     ocmap->set_chunk_starts_at_address(p, true);
2854     do_update_in_use_info_for_chunk(this_chunk, false);
2855 
2856     // This chunk should be valid and can be verified.
2857     DEBUG_ONLY(do_verify_chunk(this_chunk));
2858 
2859     // Return this chunk to freelist and correct counter.
2860     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2861     _free_chunks_count ++;
2862 
2863     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2864       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2865       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2866       p2i(region_start), p2i(region_end));
2867 
2868     p += this_chunk_word_size;
2869 
2870   }
2871 
2872   return target_chunk;
2873 }
2874 
2875 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2876   assert_lock_strong(SpaceManager::expand_lock());
2877 
2878   slow_locked_verify();
2879 
2880   Metachunk* chunk = NULL;
2881   bool we_did_split_a_chunk = false;
2882 
2883   if (list_index(word_size) != HumongousIndex) {
2884 
2885     ChunkList* free_list = find_free_chunks_list(word_size);
2886     assert(free_list != NULL, "Sanity check");
2887 
2888     chunk = free_list->head();
2889 
2890     if (chunk == NULL) {
2891       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2892       // This is the counterpart of the coalescing-upon-chunk-return.
2893 
2894       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2895 
2896       // Is there a larger chunk we could split?
2897       Metachunk* larger_chunk = NULL;
2898       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2899       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2900         larger_chunk = free_chunks(larger_chunk_index)->head();
2901         if (larger_chunk == NULL) {
2902           larger_chunk_index = next_chunk_index(larger_chunk_index);
2903         }
2904       }
2905 
2906       if (larger_chunk != NULL) {
2907         assert(larger_chunk->word_size() > word_size, "Sanity");
2908         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2909 
2910         // We found a larger chunk. Lets split it up:
2911         // - remove old chunk
2912         // - in its place, create new smaller chunks, with at least one chunk
2913         //   being of target size, the others sized as large as possible. This
2914         //   is to make sure the resulting chunks are "as coalesced as possible"
2915         //   (similar to VirtualSpaceNode::retire()).
2916         // Note: during this operation both ChunkManager and VirtualSpaceNode
2917         //  are temporarily invalid, so be careful with asserts.
2918 
2919         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2920            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2921           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2922           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2923 
2924         chunk = split_chunk(word_size, larger_chunk);
2925 
2926         // This should have worked.
2927         assert(chunk != NULL, "Sanity");
2928         assert(chunk->word_size() == word_size, "Sanity");
2929         assert(chunk->is_tagged_free(), "Sanity");
2930 
2931         we_did_split_a_chunk = true;
2932 
2933       }
2934     }
2935 
2936     if (chunk == NULL) {
2937       return NULL;
2938     }
2939 
2940     // Remove the chunk as the head of the list.
2941     free_list->remove_chunk(chunk);
2942 
2943     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2944                                        p2i(free_list), free_list->count());
2945 
2946   } else {
2947     chunk = humongous_dictionary()->get_chunk(word_size);
2948 
2949     if (chunk == NULL) {
2950       return NULL;
2951     }
2952 
2953     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2954                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2955   }
2956 
2957   // Chunk has been removed from the chunk manager; update counters.
2958   account_for_removed_chunk(chunk);
2959   do_update_in_use_info_for_chunk(chunk, true);
2960   chunk->container()->inc_container_count();
2961   chunk->inc_use_count();
2962 
2963   // Remove it from the links to this freelist
2964   chunk->set_next(NULL);
2965   chunk->set_prev(NULL);
2966 
2967   // Run some verifications (some more if we did a chunk split)
2968 #ifdef ASSERT
2969   locked_verify();
2970   VirtualSpaceNode* const vsn = chunk->container();
2971   vsn->verify();
2972   if (we_did_split_a_chunk) {
2973     vsn->verify_free_chunks_are_ideally_merged();
2974   }
2975 #endif
2976 
2977   return chunk;
2978 }
2979 
2980 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2981   assert_lock_strong(SpaceManager::expand_lock());
2982   slow_locked_verify();
2983 
2984   // Take from the beginning of the list
2985   Metachunk* chunk = free_chunks_get(word_size);
2986   if (chunk == NULL) {
2987     return NULL;
2988   }
2989 
2990   assert((word_size <= chunk->word_size()) ||
2991          (list_index(chunk->word_size()) == HumongousIndex),
2992          "Non-humongous variable sized chunk");
2993   LogTarget(Debug, gc, metaspace, freelist) lt;
2994   if (lt.is_enabled()) {
2995     size_t list_count;
2996     if (list_index(word_size) < HumongousIndex) {
2997       ChunkList* list = find_free_chunks_list(word_size);
2998       list_count = list->count();
2999     } else {
3000       list_count = humongous_dictionary()->total_count();
3001     }
3002     LogStream ls(lt);
3003     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3004              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3005     ResourceMark rm;
3006     locked_print_free_chunks(&ls);
3007   }
3008 
3009   return chunk;
3010 }
3011 
3012 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3013   assert_lock_strong(SpaceManager::expand_lock());
3014   DEBUG_ONLY(do_verify_chunk(chunk);)
3015   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3016   assert(chunk != NULL, "Expected chunk.");
3017   assert(chunk->container() != NULL, "Container should have been set.");
3018   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3019   index_bounds_check(index);
3020 
3021   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3022   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3023   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3024   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3025 
3026   if (index != HumongousIndex) {
3027     // Return non-humongous chunk to freelist.
3028     ChunkList* list = free_chunks(index);
3029     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3030     list->return_chunk_at_head(chunk);
3031     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3032         chunk_size_name(index), p2i(chunk));
3033   } else {
3034     // Return humongous chunk to dictionary.
3035     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3036     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3037            "Humongous chunk has wrong alignment.");
3038     _humongous_dictionary.return_chunk(chunk);
3039     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3040         chunk_size_name(index), p2i(chunk), chunk->word_size());
3041   }
3042   chunk->container()->dec_container_count();
3043   do_update_in_use_info_for_chunk(chunk, false);
3044 
3045   // Chunk has been added; update counters.
3046   account_for_added_chunk(chunk);
3047 
3048   // Attempt coalesce returned chunks with its neighboring chunks:
3049   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3050   if (index == SmallIndex || index == SpecializedIndex) {
3051     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3052       // This did not work. But if this chunk is special, we still may form a small chunk?
3053       if (index == SpecializedIndex) {
3054         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3055           // give up.
3056         }
3057       }
3058     }
3059   }
3060 
3061 }
3062 
3063 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3064   index_bounds_check(index);
3065   if (chunks == NULL) {
3066     return;
3067   }
3068   LogTarget(Trace, gc, metaspace, freelist) log;
3069   if (log.is_enabled()) { // tracing
3070     log.print("returning list of %s chunks...", chunk_size_name(index));
3071   }
3072   unsigned num_chunks_returned = 0;
3073   size_t size_chunks_returned = 0;
3074   Metachunk* cur = chunks;
3075   while (cur != NULL) {
3076     // Capture the next link before it is changed
3077     // by the call to return_chunk_at_head();
3078     Metachunk* next = cur->next();
3079     if (log.is_enabled()) { // tracing
3080       num_chunks_returned ++;
3081       size_chunks_returned += cur->word_size();
3082     }
3083     return_single_chunk(index, cur);
3084     cur = next;
3085   }
3086   if (log.is_enabled()) { // tracing
3087     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3088         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3089     if (index != HumongousIndex) {
3090       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3091     } else {
3092       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3093     }
3094   }
3095 }
3096 
3097 void ChunkManager::print_on(outputStream* out) const {
3098   _humongous_dictionary.report_statistics(out);
3099 }
3100 
3101 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3102   assert_lock_strong(SpaceManager::expand_lock());
3103   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3104     stat->num_by_type[i] = num_free_chunks(i);
3105     stat->single_size_by_type[i] = size_by_index(i);
3106     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3107   }
3108   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3109   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3110 }
3111 
3112 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3113   MutexLockerEx cl(SpaceManager::expand_lock(),
3114                    Mutex::_no_safepoint_check_flag);
3115   locked_get_statistics(stat);
3116 }
3117 
3118 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3119   size_t total = 0;
3120   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3121 
3122   const char* unit = scale_unit(scale);
3123   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3124     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3125                    stat->num_by_type[i], chunk_size_name(i),
3126                    stat->single_size_by_type[i]);
3127     if (scale == 1) {
3128       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3129     } else {
3130       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3131     }
3132 
3133     total += stat->total_size_by_type[i];
3134   }
3135 
3136 
3137   total += stat->total_size_humongous_chunks;
3138 
3139   if (scale == 1) {
3140     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3141     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3142 
3143     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3144   } else {
3145     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3146     stat->num_humongous_chunks,
3147     (float)stat->total_size_humongous_chunks / scale, unit);
3148 
3149     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3150   }
3151 
3152 }
3153 
3154 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3155   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3156 
3157   // Note: keep lock protection only to retrieving statistics; keep printing
3158   // out of lock protection
3159   ChunkManagerStatistics stat;
3160   out->print_cr("Chunkmanager (non-class):");
3161   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3162   if (non_class_cm != NULL) {
3163     non_class_cm->get_statistics(&stat);
3164     ChunkManager::print_statistics(&stat, out, scale);
3165   } else {
3166     out->print_cr("unavailable.");
3167   }
3168   out->print_cr("Chunkmanager (class):");
3169   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3170   if (class_cm != NULL) {
3171     class_cm->get_statistics(&stat);
3172     ChunkManager::print_statistics(&stat, out, scale);
3173   } else {
3174     out->print_cr("unavailable.");
3175   }
3176 }
3177 
3178 // SpaceManager methods
3179 
3180 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3181   size_t chunk_sizes[] = {
3182       specialized_chunk_size(is_class_space),
3183       small_chunk_size(is_class_space),
3184       medium_chunk_size(is_class_space)
3185   };
3186 
3187   // Adjust up to one of the fixed chunk sizes ...
3188   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3189     if (requested <= chunk_sizes[i]) {
3190       return chunk_sizes[i];
3191     }
3192   }
3193 
3194   // ... or return the size as a humongous chunk.
3195   return requested;
3196 }
3197 
3198 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3199   return adjust_initial_chunk_size(requested, is_class());
3200 }
3201 
3202 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3203   size_t requested;
3204 
3205   if (is_class()) {
3206     switch (type) {
3207     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3208     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3209     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3210     default:                                 requested = ClassSmallChunk; break;
3211     }
3212   } else {
3213     switch (type) {
3214     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3215     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3216     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3217     default:                                 requested = SmallChunk; break;
3218     }
3219   }
3220 
3221   // Adjust to one of the fixed chunk sizes (unless humongous)
3222   const size_t adjusted = adjust_initial_chunk_size(requested);
3223 
3224   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3225          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3226 
3227   return adjusted;
3228 }
3229 
3230 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3231   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3232   size_t free = 0;
3233   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3234     Metachunk* chunk = chunks_in_use(i);
3235     while (chunk != NULL) {
3236       free += chunk->free_word_size();
3237       chunk = chunk->next();
3238     }
3239   }
3240   return free;
3241 }
3242 
3243 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3244   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3245   size_t result = 0;
3246   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3247    result += sum_waste_in_chunks_in_use(i);
3248   }
3249 
3250   return result;
3251 }
3252 
3253 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3254   size_t result = 0;
3255   Metachunk* chunk = chunks_in_use(index);
3256   // Count the free space in all the chunk but not the
3257   // current chunk from which allocations are still being done.
3258   while (chunk != NULL) {
3259     if (chunk != current_chunk()) {
3260       result += chunk->free_word_size();
3261     }
3262     chunk = chunk->next();
3263   }
3264   return result;
3265 }
3266 
3267 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3268   // For CMS use "allocated_chunks_words()" which does not need the
3269   // Metaspace lock.  For the other collectors sum over the
3270   // lists.  Use both methods as a check that "allocated_chunks_words()"
3271   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3272   // to use in the product and allocated_chunks_words() should be used
3273   // but allow for  checking that allocated_chunks_words() returns the same
3274   // value as sum_capacity_in_chunks_in_use() which is the definitive
3275   // answer.
3276   if (UseConcMarkSweepGC) {
3277     return allocated_chunks_words();
3278   } else {
3279     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3280     size_t sum = 0;
3281     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3282       Metachunk* chunk = chunks_in_use(i);
3283       while (chunk != NULL) {
3284         sum += chunk->word_size();
3285         chunk = chunk->next();
3286       }
3287     }
3288   return sum;
3289   }
3290 }
3291 
3292 size_t SpaceManager::sum_count_in_chunks_in_use() {
3293   size_t count = 0;
3294   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3295     count = count + sum_count_in_chunks_in_use(i);
3296   }
3297 
3298   return count;
3299 }
3300 
3301 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3302   size_t count = 0;
3303   Metachunk* chunk = chunks_in_use(i);
3304   while (chunk != NULL) {
3305     count++;
3306     chunk = chunk->next();
3307   }
3308   return count;
3309 }
3310 
3311 
3312 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3313   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3314   size_t used = 0;
3315   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3316     Metachunk* chunk = chunks_in_use(i);
3317     while (chunk != NULL) {
3318       used += chunk->used_word_size();
3319       chunk = chunk->next();
3320     }
3321   }
3322   return used;
3323 }
3324 
3325 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3326 
3327   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3328     Metachunk* chunk = chunks_in_use(i);
3329     st->print("SpaceManager: %s " PTR_FORMAT,
3330                  chunk_size_name(i), p2i(chunk));
3331     if (chunk != NULL) {
3332       st->print_cr(" free " SIZE_FORMAT,
3333                    chunk->free_word_size());
3334     } else {
3335       st->cr();
3336     }
3337   }
3338 
3339   chunk_manager()->locked_print_free_chunks(st);
3340   chunk_manager()->locked_print_sum_free_chunks(st);
3341 }
3342 
3343 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3344 
3345   // Decide between a small chunk and a medium chunk.  Up to
3346   // _small_chunk_limit small chunks can be allocated.
3347   // After that a medium chunk is preferred.
3348   size_t chunk_word_size;
3349 
3350   // Special case for anonymous metadata space.
3351   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3352   // rarely about 4K (64-bits JVM).
3353   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3354   // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
3355   // from 60+% to around 30%.
3356   if (_space_type == Metaspace::AnonymousMetaspaceType &&
3357       _mdtype == Metaspace::NonClassType &&
3358       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
3359       word_size + Metachunk::overhead() <= SpecializedChunk) {
3360     return SpecializedChunk;
3361   }
3362 
3363   if (chunks_in_use(MediumIndex) == NULL &&
3364       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3365     chunk_word_size = (size_t) small_chunk_size();
3366     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3367       chunk_word_size = medium_chunk_size();
3368     }
3369   } else {
3370     chunk_word_size = medium_chunk_size();
3371   }
3372 
3373   // Might still need a humongous chunk.  Enforce
3374   // humongous allocations sizes to be aligned up to
3375   // the smallest chunk size.
3376   size_t if_humongous_sized_chunk =
3377     align_up(word_size + Metachunk::overhead(),
3378                   smallest_chunk_size());
3379   chunk_word_size =
3380     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3381 
3382   assert(!SpaceManager::is_humongous(word_size) ||
3383          chunk_word_size == if_humongous_sized_chunk,
3384          "Size calculation is wrong, word_size " SIZE_FORMAT
3385          " chunk_word_size " SIZE_FORMAT,
3386          word_size, chunk_word_size);
3387   Log(gc, metaspace, alloc) log;
3388   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3389     log.debug("Metadata humongous allocation:");
3390     log.debug("  word_size " PTR_FORMAT, word_size);
3391     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3392     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3393   }
3394   return chunk_word_size;
3395 }
3396 
3397 void SpaceManager::track_metaspace_memory_usage() {
3398   if (is_init_completed()) {
3399     if (is_class()) {
3400       MemoryService::track_compressed_class_memory_usage();
3401     }
3402     MemoryService::track_metaspace_memory_usage();
3403   }
3404 }
3405 
3406 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3407   assert(vs_list()->current_virtual_space() != NULL,
3408          "Should have been set");
3409   assert(current_chunk() == NULL ||
3410          current_chunk()->allocate(word_size) == NULL,
3411          "Don't need to expand");
3412   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3413 
3414   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3415     size_t words_left = 0;
3416     size_t words_used = 0;
3417     if (current_chunk() != NULL) {
3418       words_left = current_chunk()->free_word_size();
3419       words_used = current_chunk()->used_word_size();
3420     }
3421     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3422                                        word_size, words_used, words_left);
3423   }
3424 
3425   // Get another chunk
3426   size_t chunk_word_size = calc_chunk_size(word_size);
3427   Metachunk* next = get_new_chunk(chunk_word_size);
3428 
3429   MetaWord* mem = NULL;
3430 
3431   // If a chunk was available, add it to the in-use chunk list
3432   // and do an allocation from it.
3433   if (next != NULL) {
3434     // Add to this manager's list of chunks in use.
3435     add_chunk(next, false);
3436     mem = next->allocate(word_size);
3437   }
3438 
3439   // Track metaspace memory usage statistic.
3440   track_metaspace_memory_usage();
3441 
3442   return mem;
3443 }
3444 
3445 void SpaceManager::print_on(outputStream* st) const {
3446 
3447   for (ChunkIndex i = ZeroIndex;
3448        i < NumberOfInUseLists ;
3449        i = next_chunk_index(i) ) {
3450     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3451                  p2i(chunks_in_use(i)),
3452                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3453   }
3454   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3455                " Humongous " SIZE_FORMAT,
3456                sum_waste_in_chunks_in_use(SmallIndex),
3457                sum_waste_in_chunks_in_use(MediumIndex),
3458                sum_waste_in_chunks_in_use(HumongousIndex));
3459   // block free lists
3460   if (block_freelists() != NULL) {
3461     st->print_cr("total in block free lists " SIZE_FORMAT,
3462       block_freelists()->total_size());
3463   }
3464 }
3465 
3466 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3467                            Metaspace::MetaspaceType space_type,
3468                            Mutex* lock) :
3469   _mdtype(mdtype),
3470   _space_type(space_type),
3471   _allocated_blocks_words(0),
3472   _allocated_chunks_words(0),
3473   _allocated_chunks_count(0),
3474   _block_freelists(NULL),
3475   _lock(lock)
3476 {
3477   initialize();
3478 }
3479 
3480 void SpaceManager::inc_size_metrics(size_t words) {
3481   assert_lock_strong(SpaceManager::expand_lock());
3482   // Total of allocated Metachunks and allocated Metachunks count
3483   // for each SpaceManager
3484   _allocated_chunks_words = _allocated_chunks_words + words;
3485   _allocated_chunks_count++;
3486   // Global total of capacity in allocated Metachunks
3487   MetaspaceAux::inc_capacity(mdtype(), words);
3488   // Global total of allocated Metablocks.
3489   // used_words_slow() includes the overhead in each
3490   // Metachunk so include it in the used when the
3491   // Metachunk is first added (so only added once per
3492   // Metachunk).
3493   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
3494 }
3495 
3496 void SpaceManager::inc_used_metrics(size_t words) {
3497   // Add to the per SpaceManager total
3498   Atomic::add(words, &_allocated_blocks_words);
3499   // Add to the global total
3500   MetaspaceAux::inc_used(mdtype(), words);
3501 }
3502 
3503 void SpaceManager::dec_total_from_size_metrics() {
3504   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
3505   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
3506   // Also deduct the overhead per Metachunk
3507   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3508 }
3509 
3510 void SpaceManager::initialize() {
3511   Metadebug::init_allocation_fail_alot_count();
3512   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3513     _chunks_in_use[i] = NULL;
3514   }
3515   _current_chunk = NULL;
3516   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3517 }
3518 
3519 SpaceManager::~SpaceManager() {
3520   // This call this->_lock which can't be done while holding expand_lock()
3521   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3522          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3523          " allocated_chunks_words() " SIZE_FORMAT,
3524          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3525 
3526   MutexLockerEx fcl(SpaceManager::expand_lock(),
3527                     Mutex::_no_safepoint_check_flag);
3528 
3529   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3530          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3531          " allocated_chunks_count() " SIZE_FORMAT,
3532          sum_count_in_chunks_in_use(), allocated_chunks_count());
3533 
3534   chunk_manager()->slow_locked_verify();
3535 
3536   dec_total_from_size_metrics();
3537 
3538   Log(gc, metaspace, freelist) log;
3539   if (log.is_trace()) {
3540     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3541     ResourceMark rm;
3542     LogStream ls(log.trace());
3543     locked_print_chunks_in_use_on(&ls);
3544     if (block_freelists() != NULL) {
3545       block_freelists()->print_on(&ls);
3546     }
3547   }
3548 
3549   // Add all the chunks in use by this space manager
3550   // to the global list of free chunks.
3551 
3552   // Follow each list of chunks-in-use and add them to the
3553   // free lists.  Each list is NULL terminated.
3554 
3555   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3556     Metachunk* chunks = chunks_in_use(i);
3557     chunk_manager()->return_chunk_list(i, chunks);
3558     set_chunks_in_use(i, NULL);
3559   }
3560 
3561   chunk_manager()->slow_locked_verify();
3562 
3563   if (_block_freelists != NULL) {
3564     delete _block_freelists;
3565   }
3566 }
3567 
3568 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3569   assert_lock_strong(_lock);
3570   // Allocations and deallocations are in raw_word_size
3571   size_t raw_word_size = get_allocation_word_size(word_size);
3572   // Lazily create a block_freelist
3573   if (block_freelists() == NULL) {
3574     _block_freelists = new BlockFreelist();
3575   }
3576   block_freelists()->return_block(p, raw_word_size);
3577 }
3578 
3579 // Adds a chunk to the list of chunks in use.
3580 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3581 
3582   assert(new_chunk != NULL, "Should not be NULL");
3583   assert(new_chunk->next() == NULL, "Should not be on a list");
3584 
3585   new_chunk->reset_empty();
3586 
3587   // Find the correct list and and set the current
3588   // chunk for that list.
3589   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3590 
3591   if (index != HumongousIndex) {
3592     retire_current_chunk();
3593     set_current_chunk(new_chunk);
3594     new_chunk->set_next(chunks_in_use(index));
3595     set_chunks_in_use(index, new_chunk);
3596   } else {
3597     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3598     // small, so small will be null.  Link this first chunk as the current
3599     // chunk.
3600     if (make_current) {
3601       // Set as the current chunk but otherwise treat as a humongous chunk.
3602       set_current_chunk(new_chunk);
3603     }
3604     // Link at head.  The _current_chunk only points to a humongous chunk for
3605     // the null class loader metaspace (class and data virtual space managers)
3606     // any humongous chunks so will not point to the tail
3607     // of the humongous chunks list.
3608     new_chunk->set_next(chunks_in_use(HumongousIndex));
3609     set_chunks_in_use(HumongousIndex, new_chunk);
3610 
3611     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3612   }
3613 
3614   // Add to the running sum of capacity
3615   inc_size_metrics(new_chunk->word_size());
3616 
3617   assert(new_chunk->is_empty(), "Not ready for reuse");
3618   Log(gc, metaspace, freelist) log;
3619   if (log.is_trace()) {
3620     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3621     ResourceMark rm;
3622     LogStream ls(log.trace());
3623     new_chunk->print_on(&ls);
3624     chunk_manager()->locked_print_free_chunks(&ls);
3625   }
3626 }
3627 
3628 void SpaceManager::retire_current_chunk() {
3629   if (current_chunk() != NULL) {
3630     size_t remaining_words = current_chunk()->free_word_size();
3631     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3632       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3633       deallocate(ptr, remaining_words);
3634       inc_used_metrics(remaining_words);
3635     }
3636   }
3637 }
3638 
3639 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3640   // Get a chunk from the chunk freelist
3641   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3642 
3643   if (next == NULL) {
3644     next = vs_list()->get_new_chunk(chunk_word_size,
3645                                     medium_chunk_bunch());
3646   }
3647 
3648   Log(gc, metaspace, alloc) log;
3649   if (log.is_debug() && next != NULL &&
3650       SpaceManager::is_humongous(next->word_size())) {
3651     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3652   }
3653 
3654   return next;
3655 }
3656 
3657 MetaWord* SpaceManager::allocate(size_t word_size) {
3658   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3659   size_t raw_word_size = get_allocation_word_size(word_size);
3660   BlockFreelist* fl =  block_freelists();
3661   MetaWord* p = NULL;
3662   // Allocation from the dictionary is expensive in the sense that
3663   // the dictionary has to be searched for a size.  Don't allocate
3664   // from the dictionary until it starts to get fat.  Is this
3665   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3666   // for allocations.  Do some profiling.  JJJ
3667   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3668     p = fl->get_block(raw_word_size);
3669   }
3670   if (p == NULL) {
3671     p = allocate_work(raw_word_size);
3672   }
3673 
3674   return p;
3675 }
3676 
3677 // Returns the address of spaced allocated for "word_size".
3678 // This methods does not know about blocks (Metablocks)
3679 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3680   assert_lock_strong(_lock);
3681 #ifdef ASSERT
3682   if (Metadebug::test_metadata_failure()) {
3683     return NULL;
3684   }
3685 #endif
3686   // Is there space in the current chunk?
3687   MetaWord* result = NULL;
3688 
3689   if (current_chunk() != NULL) {
3690     result = current_chunk()->allocate(word_size);
3691   }
3692 
3693   if (result == NULL) {
3694     result = grow_and_allocate(word_size);
3695   }
3696 
3697   if (result != NULL) {
3698     inc_used_metrics(word_size);
3699     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3700            "Head of the list is being allocated");
3701   }
3702 
3703   return result;
3704 }
3705 
3706 void SpaceManager::verify() {
3707   // If there are blocks in the dictionary, then
3708   // verification of chunks does not work since
3709   // being in the dictionary alters a chunk.
3710   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
3711     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3712       Metachunk* curr = chunks_in_use(i);
3713       while (curr != NULL) {
3714         DEBUG_ONLY(do_verify_chunk(curr);)
3715         assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3716         curr = curr->next();
3717       }
3718     }
3719   }
3720 }
3721 
3722 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3723   assert(is_humongous(chunk->word_size()) ||
3724          chunk->word_size() == medium_chunk_size() ||
3725          chunk->word_size() == small_chunk_size() ||
3726          chunk->word_size() == specialized_chunk_size(),
3727          "Chunk size is wrong");
3728   return;
3729 }
3730 
3731 #ifdef ASSERT
3732 void SpaceManager::verify_allocated_blocks_words() {
3733   // Verification is only guaranteed at a safepoint.
3734   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3735     "Verification can fail if the applications is running");
3736   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3737          "allocation total is not consistent " SIZE_FORMAT
3738          " vs " SIZE_FORMAT,
3739          allocated_blocks_words(), sum_used_in_chunks_in_use());
3740 }
3741 
3742 #endif
3743 
3744 void SpaceManager::dump(outputStream* const out) const {
3745   size_t curr_total = 0;
3746   size_t waste = 0;
3747   uint i = 0;
3748   size_t used = 0;
3749   size_t capacity = 0;
3750 
3751   // Add up statistics for all chunks in this SpaceManager.
3752   for (ChunkIndex index = ZeroIndex;
3753        index < NumberOfInUseLists;
3754        index = next_chunk_index(index)) {
3755     for (Metachunk* curr = chunks_in_use(index);
3756          curr != NULL;
3757          curr = curr->next()) {
3758       out->print("%d) ", i++);
3759       curr->print_on(out);
3760       curr_total += curr->word_size();
3761       used += curr->used_word_size();
3762       capacity += curr->word_size();
3763       waste += curr->free_word_size() + curr->overhead();;
3764     }
3765   }
3766 
3767   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3768     if (block_freelists() != NULL) block_freelists()->print_on(out);
3769   }
3770 
3771   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3772   // Free space isn't wasted.
3773   waste -= free;
3774 
3775   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3776                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3777                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3778 }
3779 
3780 // MetaspaceAux
3781 
3782 
3783 size_t MetaspaceAux::_capacity_words[] = {0, 0};
3784 volatile size_t MetaspaceAux::_used_words[] = {0, 0};
3785 
3786 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
3787   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3788   return list == NULL ? 0 : list->free_bytes();
3789 }
3790 
3791 size_t MetaspaceAux::free_bytes() {
3792   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3793 }
3794 
3795 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3796   assert_lock_strong(SpaceManager::expand_lock());
3797   assert(words <= capacity_words(mdtype),
3798          "About to decrement below 0: words " SIZE_FORMAT
3799          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3800          words, mdtype, capacity_words(mdtype));
3801   _capacity_words[mdtype] -= words;
3802 }
3803 
3804 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3805   assert_lock_strong(SpaceManager::expand_lock());
3806   // Needs to be atomic
3807   _capacity_words[mdtype] += words;
3808 }
3809 
3810 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3811   assert(words <= used_words(mdtype),
3812          "About to decrement below 0: words " SIZE_FORMAT
3813          " is greater than _used_words[%u] " SIZE_FORMAT,
3814          words, mdtype, used_words(mdtype));
3815   // For CMS deallocation of the Metaspaces occurs during the
3816   // sweep which is a concurrent phase.  Protection by the expand_lock()
3817   // is not enough since allocation is on a per Metaspace basis
3818   // and protected by the Metaspace lock.
3819   Atomic::sub(words, &_used_words[mdtype]);
3820 }
3821 
3822 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3823   // _used_words tracks allocations for
3824   // each piece of metadata.  Those allocations are
3825   // generally done concurrently by different application
3826   // threads so must be done atomically.
3827   Atomic::add(words, &_used_words[mdtype]);
3828 }
3829 
3830 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
3831   size_t used = 0;
3832   ClassLoaderDataGraphMetaspaceIterator iter;
3833   while (iter.repeat()) {
3834     Metaspace* msp = iter.get_next();
3835     // Sum allocated_blocks_words for each metaspace
3836     if (msp != NULL) {
3837       used += msp->used_words_slow(mdtype);
3838     }
3839   }
3840   return used * BytesPerWord;
3841 }
3842 
3843 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
3844   size_t free = 0;
3845   ClassLoaderDataGraphMetaspaceIterator iter;
3846   while (iter.repeat()) {
3847     Metaspace* msp = iter.get_next();
3848     if (msp != NULL) {
3849       free += msp->free_words_slow(mdtype);
3850     }
3851   }
3852   return free * BytesPerWord;
3853 }
3854 
3855 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3856   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3857     return 0;
3858   }
3859   // Don't count the space in the freelists.  That space will be
3860   // added to the capacity calculation as needed.
3861   size_t capacity = 0;
3862   ClassLoaderDataGraphMetaspaceIterator iter;
3863   while (iter.repeat()) {
3864     Metaspace* msp = iter.get_next();
3865     if (msp != NULL) {
3866       capacity += msp->capacity_words_slow(mdtype);
3867     }
3868   }
3869   return capacity * BytesPerWord;
3870 }
3871 
3872 size_t MetaspaceAux::capacity_bytes_slow() {
3873 #ifdef PRODUCT
3874   // Use capacity_bytes() in PRODUCT instead of this function.
3875   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3876 #endif
3877   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3878   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3879   assert(capacity_bytes() == class_capacity + non_class_capacity,
3880          "bad accounting: capacity_bytes() " SIZE_FORMAT
3881          " class_capacity + non_class_capacity " SIZE_FORMAT
3882          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3883          capacity_bytes(), class_capacity + non_class_capacity,
3884          class_capacity, non_class_capacity);
3885 
3886   return class_capacity + non_class_capacity;
3887 }
3888 
3889 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
3890   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3891   return list == NULL ? 0 : list->reserved_bytes();
3892 }
3893 
3894 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
3895   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3896   return list == NULL ? 0 : list->committed_bytes();
3897 }
3898 
3899 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3900 
3901 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3902   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3903   if (chunk_manager == NULL) {
3904     return 0;
3905   }
3906   chunk_manager->slow_verify();
3907   return chunk_manager->free_chunks_total_words();
3908 }
3909 
3910 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3911   return free_chunks_total_words(mdtype) * BytesPerWord;
3912 }
3913 
3914 size_t MetaspaceAux::free_chunks_total_words() {
3915   return free_chunks_total_words(Metaspace::ClassType) +
3916          free_chunks_total_words(Metaspace::NonClassType);
3917 }
3918 
3919 size_t MetaspaceAux::free_chunks_total_bytes() {
3920   return free_chunks_total_words() * BytesPerWord;
3921 }
3922 
3923 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3924   return Metaspace::get_chunk_manager(mdtype) != NULL;
3925 }
3926 
3927 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3928   if (!has_chunk_free_list(mdtype)) {
3929     return MetaspaceChunkFreeListSummary();
3930   }
3931 
3932   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3933   return cm->chunk_free_list_summary();
3934 }
3935 
3936 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
3937   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3938                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3939 }
3940 
3941 void MetaspaceAux::print_on(outputStream* out) {
3942   Metaspace::MetadataType nct = Metaspace::NonClassType;
3943 
3944   out->print_cr(" Metaspace       "
3945                 "used "      SIZE_FORMAT "K, "
3946                 "capacity "  SIZE_FORMAT "K, "
3947                 "committed " SIZE_FORMAT "K, "
3948                 "reserved "  SIZE_FORMAT "K",
3949                 used_bytes()/K,
3950                 capacity_bytes()/K,
3951                 committed_bytes()/K,
3952                 reserved_bytes()/K);
3953 
3954   if (Metaspace::using_class_space()) {
3955     Metaspace::MetadataType ct = Metaspace::ClassType;
3956     out->print_cr("  class space    "
3957                   "used "      SIZE_FORMAT "K, "
3958                   "capacity "  SIZE_FORMAT "K, "
3959                   "committed " SIZE_FORMAT "K, "
3960                   "reserved "  SIZE_FORMAT "K",
3961                   used_bytes(ct)/K,
3962                   capacity_bytes(ct)/K,
3963                   committed_bytes(ct)/K,
3964                   reserved_bytes(ct)/K);
3965   }
3966 }
3967 
3968 // Print information for class space and data space separately.
3969 // This is almost the same as above.
3970 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3971   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3972   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3973   size_t used_bytes = used_bytes_slow(mdtype);
3974   size_t free_bytes = free_bytes_slow(mdtype);
3975   size_t used_and_free = used_bytes + free_bytes +
3976                            free_chunks_capacity_bytes;
3977   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3978              "K + unused in chunks " SIZE_FORMAT "K  + "
3979              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3980              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3981              used_bytes / K,
3982              free_bytes / K,
3983              free_chunks_capacity_bytes / K,
3984              used_and_free / K,
3985              capacity_bytes / K);
3986   // Accounting can only be correct if we got the values during a safepoint
3987   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3988 }
3989 
3990 // Print total fragmentation for class metaspaces
3991 void MetaspaceAux::print_class_waste(outputStream* out) {
3992   assert(Metaspace::using_class_space(), "class metaspace not used");
3993   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3994   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3995   ClassLoaderDataGraphMetaspaceIterator iter;
3996   while (iter.repeat()) {
3997     Metaspace* msp = iter.get_next();
3998     if (msp != NULL) {
3999       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4000       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4001       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4002       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
4003       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4004       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
4005       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4006     }
4007   }
4008   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4009                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4010                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4011                 "large count " SIZE_FORMAT,
4012                 cls_specialized_count, cls_specialized_waste,
4013                 cls_small_count, cls_small_waste,
4014                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4015 }
4016 
4017 // Print total fragmentation for data and class metaspaces separately
4018 void MetaspaceAux::print_waste(outputStream* out) {
4019   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4020   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4021 
4022   ClassLoaderDataGraphMetaspaceIterator iter;
4023   while (iter.repeat()) {
4024     Metaspace* msp = iter.get_next();
4025     if (msp != NULL) {
4026       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4027       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4028       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4029       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4030       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4031       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4032       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4033     }
4034   }
4035   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4036   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4037                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4038                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4039                         "large count " SIZE_FORMAT,
4040              specialized_count, specialized_waste, small_count,
4041              small_waste, medium_count, medium_waste, humongous_count);
4042   if (Metaspace::using_class_space()) {
4043     print_class_waste(out);
4044   }
4045 }
4046 
4047 class MetadataStats {
4048 private:
4049   size_t _capacity;
4050   size_t _used;
4051   size_t _free;
4052   size_t _waste;
4053 
4054 public:
4055   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4056   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4057   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4058 
4059   void add(const MetadataStats& stats) {
4060     _capacity += stats.capacity();
4061     _used += stats.used();
4062     _free += stats.free();
4063     _waste += stats.waste();
4064   }
4065 
4066   size_t capacity() const { return _capacity; }
4067   size_t used() const     { return _used; }
4068   size_t free() const     { return _free; }
4069   size_t waste() const    { return _waste; }
4070 
4071   void print_on(outputStream* out, size_t scale) const;
4072 };
4073 
4074 
4075 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4076   const char* unit = scale_unit(scale);
4077   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4078     (float)capacity() / scale, unit,
4079     (float)used() / scale, unit,
4080     (float)free() / scale, unit,
4081     (float)waste() / scale, unit);
4082 }
4083 
4084 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4085 private:
4086   outputStream*  _out;
4087   size_t         _scale;
4088 
4089   size_t         _total_count;
4090   MetadataStats  _total_metadata;
4091   MetadataStats  _total_class;
4092 
4093   size_t         _total_anon_count;
4094   MetadataStats  _total_anon_metadata;
4095   MetadataStats  _total_anon_class;
4096 
4097 public:
4098   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4099   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4100 
4101   ~PrintCLDMetaspaceInfoClosure() {
4102     print_summary();
4103   }
4104 
4105   void do_cld(ClassLoaderData* cld) {
4106     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4107 
4108     if (cld->is_unloading()) return;
4109     Metaspace* msp = cld->metaspace_or_null();
4110     if (msp == NULL) {
4111       return;
4112     }
4113 
4114     bool anonymous = false;
4115     if (cld->is_anonymous()) {
4116       _out->print_cr("ClassLoader: for anonymous class");
4117       anonymous = true;
4118     } else {
4119       ResourceMark rm;
4120       _out->print_cr("ClassLoader: %s", cld->loader_name());
4121     }
4122 
4123     print_metaspace(msp, anonymous);
4124     _out->cr();
4125   }
4126 
4127 private:
4128   void print_metaspace(Metaspace* msp, bool anonymous);
4129   void print_summary() const;
4130 };
4131 
4132 void PrintCLDMetaspaceInfoClosure::print_metaspace(Metaspace* msp, bool anonymous){
4133   assert(msp != NULL, "Sanity");
4134   SpaceManager* vsm = msp->vsm();
4135   const char* unit = scale_unit(_scale);
4136 
4137   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4138   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4139   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4140   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4141 
4142   _total_count ++;
4143   MetadataStats metadata_stats(capacity, used, free, waste);
4144   _total_metadata.add(metadata_stats);
4145 
4146   if (anonymous) {
4147     _total_anon_count ++;
4148     _total_anon_metadata.add(metadata_stats);
4149   }
4150 
4151   _out->print("  Metadata   ");
4152   metadata_stats.print_on(_out, _scale);
4153 
4154   if (Metaspace::using_class_space()) {
4155     vsm = msp->class_vsm();
4156 
4157     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4158     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4159     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4160     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4161 
4162     MetadataStats class_stats(capacity, used, free, waste);
4163     _total_class.add(class_stats);
4164 
4165     if (anonymous) {
4166       _total_anon_class.add(class_stats);
4167     }
4168 
4169     _out->print("  Class data ");
4170     class_stats.print_on(_out, _scale);
4171   }
4172 }
4173 
4174 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4175   const char* unit = scale_unit(_scale);
4176   _out->cr();
4177   _out->print_cr("Summary:");
4178 
4179   MetadataStats total;
4180   total.add(_total_metadata);
4181   total.add(_total_class);
4182 
4183   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4184   total.print_on(_out, _scale);
4185 
4186   _out->print("                    Metadata ");
4187   _total_metadata.print_on(_out, _scale);
4188 
4189   if (Metaspace::using_class_space()) {
4190     _out->print("                  Class data ");
4191     _total_class.print_on(_out, _scale);
4192   }
4193   _out->cr();
4194 
4195   MetadataStats total_anon;
4196   total_anon.add(_total_anon_metadata);
4197   total_anon.add(_total_anon_class);
4198 
4199   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4200   total_anon.print_on(_out, _scale);
4201 
4202   _out->print("                    Metadata ");
4203   _total_anon_metadata.print_on(_out, _scale);
4204 
4205   if (Metaspace::using_class_space()) {
4206     _out->print("                  Class data ");
4207     _total_anon_class.print_on(_out, _scale);
4208   }
4209 }
4210 
4211 void MetaspaceAux::print_metadata_for_nmt(outputStream* out, size_t scale) {
4212   const char* unit = scale_unit(scale);
4213   out->print_cr("Metaspaces:");
4214   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4215     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4216     committed_bytes(Metaspace::NonClassType) / scale, unit);
4217   if (Metaspace::using_class_space()) {
4218     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4219     reserved_bytes(Metaspace::ClassType) / scale, unit,
4220     committed_bytes(Metaspace::ClassType) / scale, unit);
4221   }
4222 
4223   out->cr();
4224   ChunkManager::print_all_chunkmanagers(out, scale);
4225 
4226   out->cr();
4227   out->print_cr("Per-classloader metadata:");
4228   out->cr();
4229 
4230   PrintCLDMetaspaceInfoClosure cl(out, scale);
4231   ClassLoaderDataGraph::cld_do(&cl);
4232 }
4233 
4234 
4235 // Dump global metaspace things from the end of ClassLoaderDataGraph
4236 void MetaspaceAux::dump(outputStream* out) {
4237   out->print_cr("All Metaspace:");
4238   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4239   out->print("class space: "); print_on(out, Metaspace::ClassType);
4240   print_waste(out);
4241 }
4242 
4243 // Prints an ASCII representation of the given space.
4244 void MetaspaceAux::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4245   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4246   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4247   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4248   if (vsl != NULL) {
4249     if (for_class) {
4250       if (!Metaspace::using_class_space()) {
4251         out->print_cr("No Class Space.");
4252         return;
4253       }
4254       out->print_raw("---- Metaspace Map (Class Space) ----");
4255     } else {
4256       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4257     }
4258     // Print legend:
4259     out->cr();
4260     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4261     out->cr();
4262     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4263     vsl->print_map(out);
4264     out->cr();
4265   }
4266 }
4267 
4268 void MetaspaceAux::verify_free_chunks() {
4269   Metaspace::chunk_manager_metadata()->verify();
4270   if (Metaspace::using_class_space()) {
4271     Metaspace::chunk_manager_class()->verify();
4272   }
4273 }
4274 
4275 void MetaspaceAux::verify_capacity() {
4276 #ifdef ASSERT
4277   size_t running_sum_capacity_bytes = capacity_bytes();
4278   // For purposes of the running sum of capacity, verify against capacity
4279   size_t capacity_in_use_bytes = capacity_bytes_slow();
4280   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4281          "capacity_words() * BytesPerWord " SIZE_FORMAT
4282          " capacity_bytes_slow()" SIZE_FORMAT,
4283          running_sum_capacity_bytes, capacity_in_use_bytes);
4284   for (Metaspace::MetadataType i = Metaspace::ClassType;
4285        i < Metaspace:: MetadataTypeCount;
4286        i = (Metaspace::MetadataType)(i + 1)) {
4287     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4288     assert(capacity_bytes(i) == capacity_in_use_bytes,
4289            "capacity_bytes(%u) " SIZE_FORMAT
4290            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4291            i, capacity_bytes(i), i, capacity_in_use_bytes);
4292   }
4293 #endif
4294 }
4295 
4296 void MetaspaceAux::verify_used() {
4297 #ifdef ASSERT
4298   size_t running_sum_used_bytes = used_bytes();
4299   // For purposes of the running sum of used, verify against used
4300   size_t used_in_use_bytes = used_bytes_slow();
4301   assert(used_bytes() == used_in_use_bytes,
4302          "used_bytes() " SIZE_FORMAT
4303          " used_bytes_slow()" SIZE_FORMAT,
4304          used_bytes(), used_in_use_bytes);
4305   for (Metaspace::MetadataType i = Metaspace::ClassType;
4306        i < Metaspace:: MetadataTypeCount;
4307        i = (Metaspace::MetadataType)(i + 1)) {
4308     size_t used_in_use_bytes = used_bytes_slow(i);
4309     assert(used_bytes(i) == used_in_use_bytes,
4310            "used_bytes(%u) " SIZE_FORMAT
4311            " used_bytes_slow(%u)" SIZE_FORMAT,
4312            i, used_bytes(i), i, used_in_use_bytes);
4313   }
4314 #endif
4315 }
4316 
4317 void MetaspaceAux::verify_metrics() {
4318   verify_capacity();
4319   verify_used();
4320 }
4321 
4322 
4323 // Metaspace methods
4324 
4325 size_t Metaspace::_first_chunk_word_size = 0;
4326 size_t Metaspace::_first_class_chunk_word_size = 0;
4327 
4328 size_t Metaspace::_commit_alignment = 0;
4329 size_t Metaspace::_reserve_alignment = 0;
4330 
4331 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
4332   initialize(lock, type);
4333 }
4334 
4335 Metaspace::~Metaspace() {
4336   delete _vsm;
4337   if (using_class_space()) {
4338     delete _class_vsm;
4339   }
4340 }
4341 
4342 VirtualSpaceList* Metaspace::_space_list = NULL;
4343 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4344 
4345 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4346 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4347 
4348 #define VIRTUALSPACEMULTIPLIER 2
4349 
4350 #ifdef _LP64
4351 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4352 
4353 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4354   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4355   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4356   // narrow_klass_base is the lower of the metaspace base and the cds base
4357   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4358   // between the lower base and higher address.
4359   address lower_base;
4360   address higher_address;
4361 #if INCLUDE_CDS
4362   if (UseSharedSpaces) {
4363     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4364                           (address)(metaspace_base + compressed_class_space_size()));
4365     lower_base = MIN2(metaspace_base, cds_base);
4366   } else
4367 #endif
4368   {
4369     higher_address = metaspace_base + compressed_class_space_size();
4370     lower_base = metaspace_base;
4371 
4372     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4373     // If compressed class space fits in lower 32G, we don't need a base.
4374     if (higher_address <= (address)klass_encoding_max) {
4375       lower_base = 0; // Effectively lower base is zero.
4376     }
4377   }
4378 
4379   Universe::set_narrow_klass_base(lower_base);
4380 
4381   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4382   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4383   // how dump time narrow_klass_shift is set. Although, CDS can work
4384   // with zero-shift mode also, to be consistent with AOT it uses
4385   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4386   // can be used at same time as AOT code.
4387   if (!UseSharedSpaces
4388       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4389     Universe::set_narrow_klass_shift(0);
4390   } else {
4391     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4392   }
4393   AOTLoader::set_narrow_klass_shift();
4394 }
4395 
4396 #if INCLUDE_CDS
4397 // Return TRUE if the specified metaspace_base and cds_base are close enough
4398 // to work with compressed klass pointers.
4399 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4400   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4401   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4402   address lower_base = MIN2((address)metaspace_base, cds_base);
4403   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4404                                 (address)(metaspace_base + compressed_class_space_size()));
4405   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4406 }
4407 #endif
4408 
4409 // Try to allocate the metaspace at the requested addr.
4410 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4411   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4412   assert(using_class_space(), "called improperly");
4413   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4414   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4415          "Metaspace size is too big");
4416   assert_is_aligned(requested_addr, _reserve_alignment);
4417   assert_is_aligned(cds_base, _reserve_alignment);
4418   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4419 
4420   // Don't use large pages for the class space.
4421   bool large_pages = false;
4422 
4423 #if !(defined(AARCH64) || defined(AIX))
4424   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4425                                              _reserve_alignment,
4426                                              large_pages,
4427                                              requested_addr);
4428 #else // AARCH64
4429   ReservedSpace metaspace_rs;
4430 
4431   // Our compressed klass pointers may fit nicely into the lower 32
4432   // bits.
4433   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4434     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4435                                  _reserve_alignment,
4436                                  large_pages,
4437                                  requested_addr);
4438   }
4439 
4440   if (! metaspace_rs.is_reserved()) {
4441     // Aarch64: Try to align metaspace so that we can decode a compressed
4442     // klass with a single MOVK instruction.  We can do this iff the
4443     // compressed class base is a multiple of 4G.
4444     // Aix: Search for a place where we can find memory. If we need to load
4445     // the base, 4G alignment is helpful, too.
4446     size_t increment = AARCH64_ONLY(4*)G;
4447     for (char *a = align_up(requested_addr, increment);
4448          a < (char*)(1024*G);
4449          a += increment) {
4450       if (a == (char *)(32*G)) {
4451         // Go faster from here on. Zero-based is no longer possible.
4452         increment = 4*G;
4453       }
4454 
4455 #if INCLUDE_CDS
4456       if (UseSharedSpaces
4457           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4458         // We failed to find an aligned base that will reach.  Fall
4459         // back to using our requested addr.
4460         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4461                                      _reserve_alignment,
4462                                      large_pages,
4463                                      requested_addr);
4464         break;
4465       }
4466 #endif
4467 
4468       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4469                                    _reserve_alignment,
4470                                    large_pages,
4471                                    a);
4472       if (metaspace_rs.is_reserved())
4473         break;
4474     }
4475   }
4476 
4477 #endif // AARCH64
4478 
4479   if (!metaspace_rs.is_reserved()) {
4480 #if INCLUDE_CDS
4481     if (UseSharedSpaces) {
4482       size_t increment = align_up(1*G, _reserve_alignment);
4483 
4484       // Keep trying to allocate the metaspace, increasing the requested_addr
4485       // by 1GB each time, until we reach an address that will no longer allow
4486       // use of CDS with compressed klass pointers.
4487       char *addr = requested_addr;
4488       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4489              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4490         addr = addr + increment;
4491         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4492                                      _reserve_alignment, large_pages, addr);
4493       }
4494     }
4495 #endif
4496     // If no successful allocation then try to allocate the space anywhere.  If
4497     // that fails then OOM doom.  At this point we cannot try allocating the
4498     // metaspace as if UseCompressedClassPointers is off because too much
4499     // initialization has happened that depends on UseCompressedClassPointers.
4500     // So, UseCompressedClassPointers cannot be turned off at this point.
4501     if (!metaspace_rs.is_reserved()) {
4502       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4503                                    _reserve_alignment, large_pages);
4504       if (!metaspace_rs.is_reserved()) {
4505         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4506                                               compressed_class_space_size()));
4507       }
4508     }
4509   }
4510 
4511   // If we got here then the metaspace got allocated.
4512   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4513 
4514 #if INCLUDE_CDS
4515   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4516   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4517     FileMapInfo::stop_sharing_and_unmap(
4518         "Could not allocate metaspace at a compatible address");
4519   }
4520 #endif
4521   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4522                                   UseSharedSpaces ? (address)cds_base : 0);
4523 
4524   initialize_class_space(metaspace_rs);
4525 
4526   LogTarget(Trace, gc, metaspace) lt;
4527   if (lt.is_enabled()) {
4528     ResourceMark rm;
4529     LogStream ls(lt);
4530     print_compressed_class_space(&ls, requested_addr);
4531   }
4532 }
4533 
4534 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4535   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4536                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4537   if (_class_space_list != NULL) {
4538     address base = (address)_class_space_list->current_virtual_space()->bottom();
4539     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4540                  compressed_class_space_size(), p2i(base));
4541     if (requested_addr != 0) {
4542       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4543     }
4544     st->cr();
4545   }
4546 }
4547 
4548 // For UseCompressedClassPointers the class space is reserved above the top of
4549 // the Java heap.  The argument passed in is at the base of the compressed space.
4550 void Metaspace::initialize_class_space(ReservedSpace rs) {
4551   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4552   assert(rs.size() >= CompressedClassSpaceSize,
4553          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4554   assert(using_class_space(), "Must be using class space");
4555   _class_space_list = new VirtualSpaceList(rs);
4556   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4557 
4558   if (!_class_space_list->initialization_succeeded()) {
4559     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4560   }
4561 }
4562 
4563 #endif
4564 
4565 void Metaspace::ergo_initialize() {
4566   if (DumpSharedSpaces) {
4567     // Using large pages when dumping the shared archive is currently not implemented.
4568     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4569   }
4570 
4571   size_t page_size = os::vm_page_size();
4572   if (UseLargePages && UseLargePagesInMetaspace) {
4573     page_size = os::large_page_size();
4574   }
4575 
4576   _commit_alignment  = page_size;
4577   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4578 
4579   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4580   // override if MaxMetaspaceSize was set on the command line or not.
4581   // This information is needed later to conform to the specification of the
4582   // java.lang.management.MemoryUsage API.
4583   //
4584   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4585   // globals.hpp to the aligned value, but this is not possible, since the
4586   // alignment depends on other flags being parsed.
4587   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4588 
4589   if (MetaspaceSize > MaxMetaspaceSize) {
4590     MetaspaceSize = MaxMetaspaceSize;
4591   }
4592 
4593   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4594 
4595   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4596 
4597   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4598   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4599 
4600   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4601 
4602   // Initial virtual space size will be calculated at global_initialize()
4603   size_t min_metaspace_sz =
4604       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4605   if (UseCompressedClassPointers) {
4606     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4607       if (min_metaspace_sz >= MaxMetaspaceSize) {
4608         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4609       } else {
4610         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4611                       MaxMetaspaceSize - min_metaspace_sz);
4612       }
4613     }
4614   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4615     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4616                   min_metaspace_sz);
4617   }
4618 
4619   set_compressed_class_space_size(CompressedClassSpaceSize);
4620 }
4621 
4622 void Metaspace::global_initialize() {
4623   MetaspaceGC::initialize();
4624 
4625 #if INCLUDE_CDS
4626   if (DumpSharedSpaces) {
4627     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4628   } else if (UseSharedSpaces) {
4629     // If any of the archived space fails to map, UseSharedSpaces
4630     // is reset to false. Fall through to the
4631     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4632     // metaspace.
4633     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4634   }
4635 
4636   if (!DumpSharedSpaces && !UseSharedSpaces)
4637 #endif // INCLUDE_CDS
4638   {
4639 #ifdef _LP64
4640     if (using_class_space()) {
4641       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4642       allocate_metaspace_compressed_klass_ptrs(base, 0);
4643     }
4644 #endif // _LP64
4645   }
4646 
4647   // Initialize these before initializing the VirtualSpaceList
4648   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4649   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4650   // Make the first class chunk bigger than a medium chunk so it's not put
4651   // on the medium chunk list.   The next chunk will be small and progress
4652   // from there.  This size calculated by -version.
4653   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4654                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4655   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4656   // Arbitrarily set the initial virtual space to a multiple
4657   // of the boot class loader size.
4658   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4659   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4660 
4661   // Initialize the list of virtual spaces.
4662   _space_list = new VirtualSpaceList(word_size);
4663   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4664 
4665   if (!_space_list->initialization_succeeded()) {
4666     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4667   }
4668 
4669   _tracer = new MetaspaceTracer();
4670 }
4671 
4672 void Metaspace::post_initialize() {
4673   MetaspaceGC::post_initialize();
4674 }
4675 
4676 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
4677   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4678   if (chunk != NULL) {
4679     // Add to this manager's list of chunks in use and current_chunk().
4680     get_space_manager(mdtype)->add_chunk(chunk, true);
4681   }
4682 }
4683 
4684 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
4685   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4686 
4687   // Get a chunk from the chunk freelist
4688   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4689 
4690   if (chunk == NULL) {
4691     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4692                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4693   }
4694 
4695   return chunk;
4696 }
4697 
4698 void Metaspace::verify_global_initialization() {
4699   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4700   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4701 
4702   if (using_class_space()) {
4703     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4704     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4705   }
4706 }
4707 
4708 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
4709   verify_global_initialization();
4710 
4711   // Allocate SpaceManager for metadata objects.
4712   _vsm = new SpaceManager(NonClassType, type, lock);
4713 
4714   if (using_class_space()) {
4715     // Allocate SpaceManager for classes.
4716     _class_vsm = new SpaceManager(ClassType, type, lock);
4717   }
4718 
4719   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4720 
4721   // Allocate chunk for metadata objects
4722   initialize_first_chunk(type, NonClassType);
4723 
4724   // Allocate chunk for class metadata objects
4725   if (using_class_space()) {
4726     initialize_first_chunk(type, ClassType);
4727   }
4728 }
4729 
4730 size_t Metaspace::align_word_size_up(size_t word_size) {
4731   size_t byte_size = word_size * wordSize;
4732   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4733 }
4734 
4735 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
4736   assert(!_frozen, "sanity");
4737   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4738   if (is_class_space_allocation(mdtype)) {
4739     return  class_vsm()->allocate(word_size);
4740   } else {
4741     return  vsm()->allocate(word_size);
4742   }
4743 }
4744 
4745 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
4746   assert(!_frozen, "sanity");
4747   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4748   assert(delta_bytes > 0, "Must be");
4749 
4750   size_t before = 0;
4751   size_t after = 0;
4752   MetaWord* res;
4753   bool incremented;
4754 
4755   // Each thread increments the HWM at most once. Even if the thread fails to increment
4756   // the HWM, an allocation is still attempted. This is because another thread must then
4757   // have incremented the HWM and therefore the allocation might still succeed.
4758   do {
4759     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4760     res = allocate(word_size, mdtype);
4761   } while (!incremented && res == NULL);
4762 
4763   if (incremented) {
4764     tracer()->report_gc_threshold(before, after,
4765                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4766     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4767   }
4768 
4769   return res;
4770 }
4771 
4772 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
4773   if (mdtype == ClassType) {
4774     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4775   } else {
4776     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4777   }
4778 }
4779 
4780 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
4781   assert(!_frozen, "sanity");
4782   if (mdtype == ClassType) {
4783     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4784   } else {
4785     return vsm()->sum_free_in_chunks_in_use();
4786   }
4787 }
4788 
4789 // Space capacity in the Metaspace.  It includes
4790 // space in the list of chunks from which allocations
4791 // have been made. Don't include space in the global freelist and
4792 // in the space available in the dictionary which
4793 // is already counted in some chunk.
4794 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
4795   if (mdtype == ClassType) {
4796     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4797   } else {
4798     return vsm()->sum_capacity_in_chunks_in_use();
4799   }
4800 }
4801 
4802 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
4803   return used_words_slow(mdtype) * BytesPerWord;
4804 }
4805 
4806 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
4807   return capacity_words_slow(mdtype) * BytesPerWord;
4808 }
4809 
4810 size_t Metaspace::allocated_blocks_bytes() const {
4811   return vsm()->allocated_blocks_bytes() +
4812       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4813 }
4814 
4815 size_t Metaspace::allocated_chunks_bytes() const {
4816   return vsm()->allocated_chunks_bytes() +
4817       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4818 }
4819 
4820 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4821   assert(!_frozen, "sanity");
4822   assert(!SafepointSynchronize::is_at_safepoint()
4823          || Thread::current()->is_VM_thread(), "should be the VM thread");
4824 
4825   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4826 
4827   if (is_class && using_class_space()) {
4828     class_vsm()->deallocate(ptr, word_size);
4829   } else {
4830     vsm()->deallocate(ptr, word_size);
4831   }
4832 }
4833 
4834 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4835                               MetaspaceObj::Type type, TRAPS) {
4836   assert(!_frozen, "sanity");
4837   if (HAS_PENDING_EXCEPTION) {
4838     assert(false, "Should not allocate with exception pending");
4839     return NULL;  // caller does a CHECK_NULL too
4840   }
4841 
4842   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4843         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4844 
4845   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4846 
4847   // Try to allocate metadata.
4848   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4849 
4850   if (result == NULL) {
4851     if (DumpSharedSpaces && THREAD->is_VM_thread()) {
4852       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4853           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4854       vm_exit(1);
4855     }
4856 
4857     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4858 
4859     // Allocation failed.
4860     if (is_init_completed()) {
4861       // Only start a GC if the bootstrapping has completed.
4862 
4863       // Try to clean out some memory and retry.
4864       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4865     }
4866   }
4867 
4868   if (result == NULL) {
4869     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4870   }
4871 
4872   // Zero initialize.
4873   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4874 
4875   return result;
4876 }
4877 
4878 size_t Metaspace::class_chunk_size(size_t word_size) {
4879   assert(using_class_space(), "Has to use class space");
4880   return class_vsm()->calc_chunk_size(word_size);
4881 }
4882 
4883 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4884   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4885 
4886   // If result is still null, we are out of memory.
4887   Log(gc, metaspace, freelist) log;
4888   if (log.is_info()) {
4889     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4890              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4891     ResourceMark rm;
4892     if (log.is_debug()) {
4893       if (loader_data->metaspace_or_null() != NULL) {
4894         LogStream ls(log.debug());
4895         loader_data->print_value_on(&ls);
4896       }
4897     }
4898     LogStream ls(log.info());
4899     MetaspaceAux::dump(&ls);
4900     MetaspaceAux::print_metaspace_map(&ls, mdtype);
4901     ChunkManager::print_all_chunkmanagers(&ls);
4902   }
4903 
4904   bool out_of_compressed_class_space = false;
4905   if (is_class_space_allocation(mdtype)) {
4906     Metaspace* metaspace = loader_data->metaspace_non_null();
4907     out_of_compressed_class_space =
4908       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
4909       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4910       CompressedClassSpaceSize;
4911   }
4912 
4913   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4914   const char* space_string = out_of_compressed_class_space ?
4915     "Compressed class space" : "Metaspace";
4916 
4917   report_java_out_of_memory(space_string);
4918 
4919   if (JvmtiExport::should_post_resource_exhausted()) {
4920     JvmtiExport::post_resource_exhausted(
4921         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4922         space_string);
4923   }
4924 
4925   if (!is_init_completed()) {
4926     vm_exit_during_initialization("OutOfMemoryError", space_string);
4927   }
4928 
4929   if (out_of_compressed_class_space) {
4930     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4931   } else {
4932     THROW_OOP(Universe::out_of_memory_error_metaspace());
4933   }
4934 }
4935 
4936 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4937   switch (mdtype) {
4938     case Metaspace::ClassType: return "Class";
4939     case Metaspace::NonClassType: return "Metadata";
4940     default:
4941       assert(false, "Got bad mdtype: %d", (int) mdtype);
4942       return NULL;
4943   }
4944 }
4945 
4946 void Metaspace::purge(MetadataType mdtype) {
4947   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4948 }
4949 
4950 void Metaspace::purge() {
4951   MutexLockerEx cl(SpaceManager::expand_lock(),
4952                    Mutex::_no_safepoint_check_flag);
4953   purge(NonClassType);
4954   if (using_class_space()) {
4955     purge(ClassType);
4956   }
4957 }
4958 
4959 void Metaspace::print_on(outputStream* out) const {
4960   // Print both class virtual space counts and metaspace.
4961   if (Verbose) {
4962     vsm()->print_on(out);
4963     if (using_class_space()) {
4964       class_vsm()->print_on(out);
4965     }
4966   }
4967 }
4968 
4969 bool Metaspace::contains(const void* ptr) {
4970   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4971     return true;
4972   }
4973   return contains_non_shared(ptr);
4974 }
4975 
4976 bool Metaspace::contains_non_shared(const void* ptr) {
4977   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4978      return true;
4979   }
4980 
4981   return get_space_list(NonClassType)->contains(ptr);
4982 }
4983 
4984 void Metaspace::verify() {
4985   vsm()->verify();
4986   if (using_class_space()) {
4987     class_vsm()->verify();
4988   }
4989 }
4990 
4991 void Metaspace::dump(outputStream* const out) const {
4992   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4993   vsm()->dump(out);
4994   if (using_class_space()) {
4995     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4996     class_vsm()->dump(out);
4997   }
4998 }
4999 
5000 #ifdef ASSERT
5001 static void do_verify_chunk(Metachunk* chunk) {
5002   guarantee(chunk != NULL, "Sanity");
5003   // Verify chunk itself; then verify that it is consistent with the
5004   // occupany map of its containing node.
5005   chunk->verify();
5006   VirtualSpaceNode* const vsn = chunk->container();
5007   OccupancyMap* const ocmap = vsn->occupancy_map();
5008   ocmap->verify_for_chunk(chunk);
5009 }
5010 #endif
5011 
5012 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5013   chunk->set_is_tagged_free(!inuse);
5014   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5015   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5016 }
5017 
5018 /////////////// Unit tests ///////////////
5019 
5020 #ifndef PRODUCT
5021 
5022 class TestMetaspaceAuxTest : AllStatic {
5023  public:
5024   static void test_reserved() {
5025     size_t reserved = MetaspaceAux::reserved_bytes();
5026 
5027     assert(reserved > 0, "assert");
5028 
5029     size_t committed  = MetaspaceAux::committed_bytes();
5030     assert(committed <= reserved, "assert");
5031 
5032     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
5033     assert(reserved_metadata > 0, "assert");
5034     assert(reserved_metadata <= reserved, "assert");
5035 
5036     if (UseCompressedClassPointers) {
5037       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
5038       assert(reserved_class > 0, "assert");
5039       assert(reserved_class < reserved, "assert");
5040     }
5041   }
5042 
5043   static void test_committed() {
5044     size_t committed = MetaspaceAux::committed_bytes();
5045 
5046     assert(committed > 0, "assert");
5047 
5048     size_t reserved  = MetaspaceAux::reserved_bytes();
5049     assert(committed <= reserved, "assert");
5050 
5051     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
5052     assert(committed_metadata > 0, "assert");
5053     assert(committed_metadata <= committed, "assert");
5054 
5055     if (UseCompressedClassPointers) {
5056       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
5057       assert(committed_class > 0, "assert");
5058       assert(committed_class < committed, "assert");
5059     }
5060   }
5061 
5062   static void test_virtual_space_list_large_chunk() {
5063     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5064     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5065     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5066     // vm_allocation_granularity aligned on Windows.
5067     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5068     large_size += (os::vm_page_size()/BytesPerWord);
5069     vs_list->get_new_chunk(large_size, 0);
5070   }
5071 
5072   static void test() {
5073     test_reserved();
5074     test_committed();
5075     test_virtual_space_list_large_chunk();
5076   }
5077 };
5078 
5079 void TestMetaspaceAux_test() {
5080   TestMetaspaceAuxTest::test();
5081 }
5082 
5083 class TestVirtualSpaceNodeTest {
5084   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5085                                           size_t& num_small_chunks,
5086                                           size_t& num_specialized_chunks) {
5087     num_medium_chunks = words_left / MediumChunk;
5088     words_left = words_left % MediumChunk;
5089 
5090     num_small_chunks = words_left / SmallChunk;
5091     words_left = words_left % SmallChunk;
5092     // how many specialized chunks can we get?
5093     num_specialized_chunks = words_left / SpecializedChunk;
5094     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5095   }
5096 
5097  public:
5098   static void test() {
5099     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5100     const size_t vsn_test_size_words = MediumChunk  * 4;
5101     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5102 
5103     // The chunk sizes must be multiples of eachother, or this will fail
5104     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5105     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5106 
5107     { // No committed memory in VSN
5108       ChunkManager cm(false);
5109       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5110       vsn.initialize();
5111       vsn.retire(&cm);
5112       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5113     }
5114 
5115     { // All of VSN is committed, half is used by chunks
5116       ChunkManager cm(false);
5117       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5118       vsn.initialize();
5119       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5120       vsn.get_chunk_vs(MediumChunk);
5121       vsn.get_chunk_vs(MediumChunk);
5122       vsn.retire(&cm);
5123       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5124       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5125     }
5126 
5127     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5128     // This doesn't work for systems with vm_page_size >= 16K.
5129     if (page_chunks < MediumChunk) {
5130       // 4 pages of VSN is committed, some is used by chunks
5131       ChunkManager cm(false);
5132       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5133 
5134       vsn.initialize();
5135       vsn.expand_by(page_chunks, page_chunks);
5136       vsn.get_chunk_vs(SmallChunk);
5137       vsn.get_chunk_vs(SpecializedChunk);
5138       vsn.retire(&cm);
5139 
5140       // committed - used = words left to retire
5141       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5142 
5143       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5144       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5145 
5146       assert(num_medium_chunks == 0, "should not get any medium chunks");
5147       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5148       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5149     }
5150 
5151     { // Half of VSN is committed, a humongous chunk is used
5152       ChunkManager cm(false);
5153       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5154       vsn.initialize();
5155       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5156       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5157       vsn.retire(&cm);
5158 
5159       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5160       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5161       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5162 
5163       assert(num_medium_chunks == 0, "should not get any medium chunks");
5164       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5165       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5166     }
5167 
5168   }
5169 
5170 #define assert_is_available_positive(word_size) \
5171   assert(vsn.is_available(word_size), \
5172          #word_size ": " PTR_FORMAT " bytes were not available in " \
5173          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5174          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5175 
5176 #define assert_is_available_negative(word_size) \
5177   assert(!vsn.is_available(word_size), \
5178          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5179          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5180          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5181 
5182   static void test_is_available_positive() {
5183     // Reserve some memory.
5184     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5185     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5186 
5187     // Commit some memory.
5188     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5189     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5190     assert(expanded, "Failed to commit");
5191 
5192     // Check that is_available accepts the committed size.
5193     assert_is_available_positive(commit_word_size);
5194 
5195     // Check that is_available accepts half the committed size.
5196     size_t expand_word_size = commit_word_size / 2;
5197     assert_is_available_positive(expand_word_size);
5198   }
5199 
5200   static void test_is_available_negative() {
5201     // Reserve some memory.
5202     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5203     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5204 
5205     // Commit some memory.
5206     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5207     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5208     assert(expanded, "Failed to commit");
5209 
5210     // Check that is_available doesn't accept a too large size.
5211     size_t two_times_commit_word_size = commit_word_size * 2;
5212     assert_is_available_negative(two_times_commit_word_size);
5213   }
5214 
5215   static void test_is_available_overflow() {
5216     // Reserve some memory.
5217     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5218     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5219 
5220     // Commit some memory.
5221     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5222     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5223     assert(expanded, "Failed to commit");
5224 
5225     // Calculate a size that will overflow the virtual space size.
5226     void* virtual_space_max = (void*)(uintptr_t)-1;
5227     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5228     size_t overflow_size = bottom_to_max + BytesPerWord;
5229     size_t overflow_word_size = overflow_size / BytesPerWord;
5230 
5231     // Check that is_available can handle the overflow.
5232     assert_is_available_negative(overflow_word_size);
5233   }
5234 
5235   static void test_is_available() {
5236     TestVirtualSpaceNodeTest::test_is_available_positive();
5237     TestVirtualSpaceNodeTest::test_is_available_negative();
5238     TestVirtualSpaceNodeTest::test_is_available_overflow();
5239   }
5240 };
5241 
5242 // The following test is placed here instead of a gtest / unittest file
5243 // because the ChunkManager class is only available in this file.
5244 void ChunkManager_test_list_index() {
5245   ChunkManager manager(true);
5246 
5247   // Test previous bug where a query for a humongous class metachunk,
5248   // incorrectly matched the non-class medium metachunk size.
5249   {
5250     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5251 
5252     ChunkIndex index = manager.list_index(MediumChunk);
5253 
5254     assert(index == HumongousIndex,
5255            "Requested size is larger than ClassMediumChunk,"
5256            " so should return HumongousIndex. Got index: %d", (int)index);
5257   }
5258 
5259   // Check the specified sizes as well.
5260   {
5261     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5262     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5263   }
5264   {
5265     ChunkIndex index = manager.list_index(ClassSmallChunk);
5266     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5267   }
5268   {
5269     ChunkIndex index = manager.list_index(ClassMediumChunk);
5270     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5271   }
5272   {
5273     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5274     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5275   }
5276 }
5277 
5278 #endif // !PRODUCT
5279 
5280 #ifdef ASSERT
5281 
5282 // The following test is placed here instead of a gtest / unittest file
5283 // because the ChunkManager class is only available in this file.
5284 class SpaceManagerTest : AllStatic {
5285   friend void SpaceManager_test_adjust_initial_chunk_size();
5286 
5287   static void test_adjust_initial_chunk_size(bool is_class) {
5288     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5289     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5290     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5291 
5292 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5293     do {                                                                         \
5294       size_t v = value;                                                          \
5295       size_t e = expected;                                                       \
5296       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5297              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5298     } while (0)
5299 
5300     // Smallest (specialized)
5301     test_adjust_initial_chunk_size(1,            smallest, is_class);
5302     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5303     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5304 
5305     // Small
5306     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5307     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5308     test_adjust_initial_chunk_size(normal,       normal, is_class);
5309 
5310     // Medium
5311     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5312     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5313     test_adjust_initial_chunk_size(medium,     medium, is_class);
5314 
5315     // Humongous
5316     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5317 
5318 #undef test_adjust_initial_chunk_size
5319   }
5320 
5321   static void test_adjust_initial_chunk_size() {
5322     test_adjust_initial_chunk_size(false);
5323     test_adjust_initial_chunk_size(true);
5324   }
5325 };
5326 
5327 void SpaceManager_test_adjust_initial_chunk_size() {
5328   SpaceManagerTest::test_adjust_initial_chunk_size();
5329 }
5330 
5331 #endif // ASSERT
5332 
5333 struct chunkmanager_statistics_t {
5334   int num_specialized_chunks;
5335   int num_small_chunks;
5336   int num_medium_chunks;
5337   int num_humongous_chunks;
5338 };
5339 
5340 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5341   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5342   ChunkManager::ChunkManagerStatistics stat;
5343   chunk_manager->get_statistics(&stat);
5344   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5345   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5346   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5347   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5348 }
5349 
5350 struct chunk_geometry_t {
5351   size_t specialized_chunk_word_size;
5352   size_t small_chunk_word_size;
5353   size_t medium_chunk_word_size;
5354 };
5355 
5356 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5357   if (mdType == Metaspace::NonClassType) {
5358     out->specialized_chunk_word_size = SpecializedChunk;
5359     out->small_chunk_word_size = SmallChunk;
5360     out->medium_chunk_word_size = MediumChunk;
5361   } else {
5362     out->specialized_chunk_word_size = ClassSpecializedChunk;
5363     out->small_chunk_word_size = ClassSmallChunk;
5364     out->medium_chunk_word_size = ClassMediumChunk;
5365   }
5366 }
5367