1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = DEBUG_ONLY(true) NOT_DEBUG(false);
  60 
  61 // Helper function that does a bunch of checks for a chunk.
  62 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  63 
  64 // Given a Metachunk, update its in-use information (both in the
  65 // chunk and the occupancy map).
  66 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  67 
  68 size_t const allocation_from_dictionary_limit = 4 * K;
  69 
  70 MetaWord* last_allocated = 0;
  71 
  72 size_t Metaspace::_compressed_class_space_size;
  73 const MetaspaceTracer* Metaspace::_tracer = NULL;
  74 
  75 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  76 
  77 enum ChunkSizes {    // in words.
  78   ClassSpecializedChunk = 128,
  79   SpecializedChunk = 128,
  80   ClassSmallChunk = 256,
  81   SmallChunk = 512,
  82   ClassMediumChunk = 4 * K,
  83   MediumChunk = 8 * K
  84 };
  85 
  86 // Returns size of this chunk type.
  87 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  88   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  89   size_t size = 0;
  90   if (is_class) {
  91     switch(chunktype) {
  92       case SpecializedIndex: size = ClassSpecializedChunk; break;
  93       case SmallIndex: size = ClassSmallChunk; break;
  94       case MediumIndex: size = ClassMediumChunk; break;
  95       default:
  96         ShouldNotReachHere();
  97     }
  98   } else {
  99     switch(chunktype) {
 100       case SpecializedIndex: size = SpecializedChunk; break;
 101       case SmallIndex: size = SmallChunk; break;
 102       case MediumIndex: size = MediumChunk; break;
 103       default:
 104         ShouldNotReachHere();
 105     }
 106   }
 107   return size;
 108 }
 109 
 110 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 111   if (is_class) {
 112     if (size == ClassSpecializedChunk) {
 113       return SpecializedIndex;
 114     } else if (size == ClassSmallChunk) {
 115       return SmallIndex;
 116     } else if (size == ClassMediumChunk) {
 117       return MediumIndex;
 118     } else if (size > ClassMediumChunk) {
 119       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 120       return HumongousIndex;
 121     }
 122   } else {
 123     if (size == SpecializedChunk) {
 124       return SpecializedIndex;
 125     } else if (size == SmallChunk) {
 126       return SmallIndex;
 127     } else if (size == MediumChunk) {
 128       return MediumIndex;
 129     } else if (size > MediumChunk) {
 130       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 131       return HumongousIndex;
 132     }
 133   }
 134   ShouldNotReachHere();
 135   return (ChunkIndex)-1;
 136 }
 137 
 138 
 139 static ChunkIndex next_chunk_index(ChunkIndex i) {
 140   assert(i < NumberOfInUseLists, "Out of bound");
 141   return (ChunkIndex) (i+1);
 142 }
 143 
 144 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 145   assert(i > ZeroIndex, "Out of bound");
 146   return (ChunkIndex) (i-1);
 147 }
 148 
 149 static const char* scale_unit(size_t scale) {
 150   switch(scale) {
 151     case 1: return "BYTES";
 152     case K: return "KB";
 153     case M: return "MB";
 154     case G: return "GB";
 155     default:
 156       ShouldNotReachHere();
 157       return NULL;
 158   }
 159 }
 160 
 161 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 162 uint MetaspaceGC::_shrink_factor = 0;
 163 bool MetaspaceGC::_should_concurrent_collect = false;
 164 
 165 typedef class FreeList<Metachunk> ChunkList;
 166 
 167 // Manages the global free lists of chunks.
 168 class ChunkManager : public CHeapObj<mtInternal> {
 169   friend class TestVirtualSpaceNodeTest;
 170 
 171   // Free list of chunks of different sizes.
 172   //   SpecializedChunk
 173   //   SmallChunk
 174   //   MediumChunk
 175   ChunkList _free_chunks[NumberOfFreeLists];
 176 
 177   // Whether or not this is the class chunkmanager.
 178   const bool _is_class;
 179 
 180   // Return non-humongous chunk list by its index.
 181   ChunkList* free_chunks(ChunkIndex index);
 182 
 183   // Returns non-humongous chunk list for the given chunk word size.
 184   ChunkList* find_free_chunks_list(size_t word_size);
 185 
 186   //   HumongousChunk
 187   ChunkTreeDictionary _humongous_dictionary;
 188 
 189   // Returns the humongous chunk dictionary.
 190   ChunkTreeDictionary* humongous_dictionary() {
 191     return &_humongous_dictionary;
 192   }
 193 
 194   // Size, in metaspace words, of all chunks managed by this ChunkManager
 195   size_t _free_chunks_total;
 196   // Number of chunks in this ChunkManager
 197   size_t _free_chunks_count;
 198 
 199   // Update counters after a chunk was added or removed removed.
 200   void account_for_added_chunk(const Metachunk* c);
 201   void account_for_removed_chunk(const Metachunk* c);
 202 
 203   // Debug support
 204 
 205   size_t sum_free_chunks();
 206   size_t sum_free_chunks_count();
 207 
 208   void locked_verify_free_chunks_total();
 209   void slow_locked_verify_free_chunks_total() {
 210     if (metaspace_slow_verify) {
 211       locked_verify_free_chunks_total();
 212     }
 213   }
 214   void locked_verify_free_chunks_count();
 215   void slow_locked_verify_free_chunks_count() {
 216     if (metaspace_slow_verify) {
 217       locked_verify_free_chunks_count();
 218     }
 219   }
 220   void verify_free_chunks_count();
 221 
 222   // Given a pointer to a chunk, attempts to merge it with neighboring
 223   // free chunks to form a bigger chunk. Returns true if successful.
 224   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 225 
 226   // Helper for chunk coalescation:
 227   //  Given an address range with 1-n chunks which are all supposed to be
 228   //  free and hence currently managed by this ChunkManager, remove them
 229   //  from this ChunkManager and mark them as invalid.
 230   // - This does not correct the occupancy map.
 231   // - This does not adjust the counters in ChunkManager.
 232   // - Does not adjust container count counter in containing VirtualSpaceNode.
 233   // Returns number of chunks removed.
 234   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 235 
 236  public:
 237 
 238   struct ChunkManagerStatistics {
 239     size_t num_by_type[NumberOfFreeLists];
 240     size_t single_size_by_type[NumberOfFreeLists];
 241     size_t total_size_by_type[NumberOfFreeLists];
 242     size_t num_humongous_chunks;
 243     size_t total_size_humongous_chunks;
 244   };
 245 
 246   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 247   void get_statistics(ChunkManagerStatistics* stat) const;
 248   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 249 
 250 
 251   ChunkManager(bool is_class)
 252       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 253     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 254     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 255     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 256   }
 257 
 258   // Add or delete (return) a chunk to the global freelist.
 259   Metachunk* chunk_freelist_allocate(size_t word_size);
 260 
 261   // Map a size to a list index assuming that there are lists
 262   // for special, small, medium, and humongous chunks.
 263   ChunkIndex list_index(size_t size);
 264 
 265   // Map a given index to the chunk size.
 266   size_t size_by_index(ChunkIndex index) const;
 267 
 268   bool is_class() const { return _is_class; }
 269 
 270   // Convenience accessors.
 271   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 272   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 273   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 274 
 275   // Take a chunk from the ChunkManager. The chunk is expected to be in
 276   // the chunk manager (the freelist if non-humongous, the dictionary if
 277   // humongous).
 278   void remove_chunk(Metachunk* chunk);
 279 
 280   // Return a single chunk of type index to the ChunkManager.
 281   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 282 
 283   // Add the simple linked list of chunks to the freelist of chunks
 284   // of type index.
 285   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 286 
 287   // Total of the space in the free chunks list
 288   size_t free_chunks_total_words();
 289   size_t free_chunks_total_bytes();
 290 
 291   // Number of chunks in the free chunks list
 292   size_t free_chunks_count();
 293 
 294   // Remove from a list by size.  Selects list based on size of chunk.
 295   Metachunk* free_chunks_get(size_t chunk_word_size);
 296 
 297 #define index_bounds_check(index)                                         \
 298   assert(index == SpecializedIndex ||                                     \
 299          index == SmallIndex ||                                           \
 300          index == MediumIndex ||                                          \
 301          index == HumongousIndex, "Bad index: %d", (int) index)
 302 
 303   size_t num_free_chunks(ChunkIndex index) const {
 304     index_bounds_check(index);
 305 
 306     if (index == HumongousIndex) {
 307       return _humongous_dictionary.total_free_blocks();
 308     }
 309 
 310     ssize_t count = _free_chunks[index].count();
 311     return count == -1 ? 0 : (size_t) count;
 312   }
 313 
 314   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 315     index_bounds_check(index);
 316 
 317     size_t word_size = 0;
 318     if (index == HumongousIndex) {
 319       word_size = _humongous_dictionary.total_size();
 320     } else {
 321       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 322       word_size = size_per_chunk_in_words * num_free_chunks(index);
 323     }
 324 
 325     return word_size * BytesPerWord;
 326   }
 327 
 328   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 329     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 330                                          num_free_chunks(SmallIndex),
 331                                          num_free_chunks(MediumIndex),
 332                                          num_free_chunks(HumongousIndex),
 333                                          size_free_chunks_in_bytes(SpecializedIndex),
 334                                          size_free_chunks_in_bytes(SmallIndex),
 335                                          size_free_chunks_in_bytes(MediumIndex),
 336                                          size_free_chunks_in_bytes(HumongousIndex));
 337   }
 338 
 339   // Debug support
 340   void verify();
 341   void slow_verify() {
 342     if (metaspace_slow_verify) {
 343       verify();
 344     }
 345   }
 346   void locked_verify();
 347   void slow_locked_verify() {
 348     if (metaspace_slow_verify) {
 349       locked_verify();
 350     }
 351   }
 352   void verify_free_chunks_total();
 353 
 354   void locked_print_free_chunks(outputStream* st);
 355   void locked_print_sum_free_chunks(outputStream* st);
 356 
 357   void print_on(outputStream* st) const;
 358 
 359   // Prints composition for both non-class and (if available)
 360   // class chunk manager.
 361   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 362 };
 363 
 364 class SmallBlocks : public CHeapObj<mtClass> {
 365   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 366   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 367 
 368  private:
 369   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 370 
 371   FreeList<Metablock>& list_at(size_t word_size) {
 372     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 373     return _small_lists[word_size - _small_block_min_size];
 374   }
 375 
 376  public:
 377   SmallBlocks() {
 378     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 379       uint k = i - _small_block_min_size;
 380       _small_lists[k].set_size(i);
 381     }
 382   }
 383 
 384   size_t total_size() const {
 385     size_t result = 0;
 386     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 387       uint k = i - _small_block_min_size;
 388       result = result + _small_lists[k].count() * _small_lists[k].size();
 389     }
 390     return result;
 391   }
 392 
 393   static uint small_block_max_size() { return _small_block_max_size; }
 394   static uint small_block_min_size() { return _small_block_min_size; }
 395 
 396   MetaWord* get_block(size_t word_size) {
 397     if (list_at(word_size).count() > 0) {
 398       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 399       return new_block;
 400     } else {
 401       return NULL;
 402     }
 403   }
 404   void return_block(Metablock* free_chunk, size_t word_size) {
 405     list_at(word_size).return_chunk_at_head(free_chunk, false);
 406     assert(list_at(word_size).count() > 0, "Should have a chunk");
 407   }
 408 
 409   void print_on(outputStream* st) const {
 410     st->print_cr("SmallBlocks:");
 411     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 412       uint k = i - _small_block_min_size;
 413       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 414     }
 415   }
 416 };
 417 
 418 // Used to manage the free list of Metablocks (a block corresponds
 419 // to the allocation of a quantum of metadata).
 420 class BlockFreelist : public CHeapObj<mtClass> {
 421   BlockTreeDictionary* const _dictionary;
 422   SmallBlocks* _small_blocks;
 423 
 424   // Only allocate and split from freelist if the size of the allocation
 425   // is at least 1/4th the size of the available block.
 426   const static int WasteMultiplier = 4;
 427 
 428   // Accessors
 429   BlockTreeDictionary* dictionary() const { return _dictionary; }
 430   SmallBlocks* small_blocks() {
 431     if (_small_blocks == NULL) {
 432       _small_blocks = new SmallBlocks();
 433     }
 434     return _small_blocks;
 435   }
 436 
 437  public:
 438   BlockFreelist();
 439   ~BlockFreelist();
 440 
 441   // Get and return a block to the free list
 442   MetaWord* get_block(size_t word_size);
 443   void return_block(MetaWord* p, size_t word_size);
 444 
 445   size_t total_size() const  {
 446     size_t result = dictionary()->total_size();
 447     if (_small_blocks != NULL) {
 448       result = result + _small_blocks->total_size();
 449     }
 450     return result;
 451   }
 452 
 453   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 454   void print_on(outputStream* st) const;
 455 };
 456 
 457 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 458 template <typename T> struct all_ones  { static const T value; };
 459 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 460 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 461 
 462 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 463 // keeps information about
 464 // - where a chunk starts
 465 // - whether a chunk is in-use or free
 466 // A bit in this bitmap represents one range of memory in the smallest
 467 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 468 class OccupancyMap : public CHeapObj<mtInternal> {
 469 
 470   // The address range this map covers.
 471   const MetaWord* const _reference_address;
 472   const size_t _word_size;
 473 
 474   // The word size of a specialized chunk, aka the number of words one
 475   // bit in this map represents.
 476   const size_t _smallest_chunk_word_size;
 477 
 478   // map data
 479   // Data are organized in two bit layers:
 480   // The first layer is the chunk-start-map. Here, a bit is set to mark
 481   // the corresponding region as the head of a chunk.
 482   // The second layer is the in-use-map. Here, a set bit indicates that
 483   // the corresponding belongs to a chunk which is in use.
 484   uint8_t* _map[2];
 485 
 486   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 487 
 488   // length, in bytes, of bitmap data
 489   size_t _map_size;
 490 
 491   // Returns true if bit at position pos at bit-layer layer is set.
 492   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 493     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 494     const unsigned byteoffset = pos / 8;
 495     assert(byteoffset < _map_size,
 496            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 497     const unsigned mask = 1 << (pos % 8);
 498     return (_map[layer][byteoffset] & mask) > 0;
 499   }
 500 
 501   // Changes bit at position pos at bit-layer layer to value v.
 502   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 503     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 504     const unsigned byteoffset = pos / 8;
 505     assert(byteoffset < _map_size,
 506            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 507     const unsigned mask = 1 << (pos % 8);
 508     if (v) {
 509       _map[layer][byteoffset] |= mask;
 510     } else {
 511       _map[layer][byteoffset] &= ~mask;
 512     }
 513   }
 514 
 515   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 516   // pos is 32/64 aligned and num_bits is 32/64.
 517   // This is the typical case when coalescing to medium chunks, whose size is
 518   // 32 or 64 times the specialized chunk size (depending on class or non class
 519   // case), so they occupy 64 bits which should be 64bit aligned, because
 520   // chunks are chunk-size aligned.
 521   template <typename T>
 522   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 523     assert(_map_size > 0, "not initialized");
 524     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 525     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 526     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 527     const size_t byteoffset = pos / 8;
 528     assert(byteoffset <= (_map_size - sizeof(T)),
 529            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 530     const T w = *(T*)(_map[layer] + byteoffset);
 531     return w > 0 ? true : false;
 532   }
 533 
 534   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 535   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 536     if (pos % 32 == 0 && num_bits == 32) {
 537       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 538     } else if (pos % 64 == 0 && num_bits == 64) {
 539       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 540     } else {
 541       for (unsigned n = 0; n < num_bits; n ++) {
 542         if (get_bit_at_position(pos + n, layer)) {
 543           return true;
 544         }
 545       }
 546     }
 547     return false;
 548   }
 549 
 550   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 551   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 552     assert(word_size % _smallest_chunk_word_size == 0,
 553         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 554     const unsigned pos = get_bitpos_for_address(p);
 555     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 556     return is_any_bit_set_in_region(pos, num_bits, layer);
 557   }
 558 
 559   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 560   // pos is 32/64 aligned and num_bits is 32/64.
 561   // This is the typical case when coalescing to medium chunks, whose size
 562   // is 32 or 64 times the specialized chunk size (depending on class or non
 563   // class case), so they occupy 64 bits which should be 64bit aligned,
 564   // because chunks are chunk-size aligned.
 565   template <typename T>
 566   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 567     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 568            (unsigned)(sizeof(T) * 8), pos);
 569     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 570            num_bits, (unsigned)(sizeof(T) * 8));
 571     const size_t byteoffset = pos / 8;
 572     assert(byteoffset <= (_map_size - sizeof(T)),
 573            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 574     T* const pw = (T*)(_map[layer] + byteoffset);
 575     *pw = v ? all_ones<T>::value : (T) 0;
 576   }
 577 
 578   // Set all bits in a region starting at pos to a value.
 579   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 580     assert(_map_size > 0, "not initialized");
 581     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 582     if (pos % 32 == 0 && num_bits == 32) {
 583       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 584     } else if (pos % 64 == 0 && num_bits == 64) {
 585       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 586     } else {
 587       for (unsigned n = 0; n < num_bits; n ++) {
 588         set_bit_at_position(pos + n, layer, v);
 589       }
 590     }
 591   }
 592 
 593   // Helper: sets all bits in a region [p, p+word_size).
 594   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 595     assert(word_size % _smallest_chunk_word_size == 0,
 596         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 597     const unsigned pos = get_bitpos_for_address(p);
 598     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 599     set_bits_of_region(pos, num_bits, layer, v);
 600   }
 601 
 602   // Helper: given an address, return the bit position representing that address.
 603   unsigned get_bitpos_for_address(const MetaWord* p) const {
 604     assert(_reference_address != NULL, "not initialized");
 605     assert(p >= _reference_address && p < _reference_address + _word_size,
 606            "Address %p out of range for occupancy map [%p..%p).",
 607             p, _reference_address, _reference_address + _word_size);
 608     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 609            "Address not aligned (%p).", p);
 610     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 611     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 612     return (unsigned) d;
 613   }
 614 
 615  public:
 616 
 617   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 618     _reference_address(reference_address), _word_size(word_size),
 619     _smallest_chunk_word_size(smallest_chunk_word_size) {
 620     assert(reference_address != NULL, "invalid reference address");
 621     assert(is_aligned(reference_address, smallest_chunk_word_size),
 622            "Reference address not aligned to smallest chunk size.");
 623     assert(is_aligned(word_size, smallest_chunk_word_size),
 624            "Word_size shall be a multiple of the smallest chunk size.");
 625     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 626     size_t num_bits = word_size / smallest_chunk_word_size;
 627     _map_size = (num_bits + 7) / 8;
 628     assert(_map_size * 8 >= num_bits, "sanity");
 629     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 630     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 631     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 632     memset(_map[1], 0, _map_size);
 633     memset(_map[0], 0, _map_size);
 634     // Sanity test: the first respectively last possible chunk start address in
 635     // the covered range shall map to the first and last bit in the bitmap.
 636     assert(get_bitpos_for_address(reference_address) == 0,
 637       "First chunk address in range must map to fist bit in bitmap.");
 638     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 639       "Last chunk address in range must map to last bit in bitmap.");
 640   }
 641 
 642   ~OccupancyMap() {
 643     os::free(_map[0]);
 644     os::free(_map[1]);
 645   }
 646 
 647   // Returns true if at address x a chunk is starting.
 648   bool chunk_starts_at_address(MetaWord* p) const {
 649     const unsigned pos = get_bitpos_for_address(p);
 650     return get_bit_at_position(pos, layer_chunk_start_map);
 651   }
 652 
 653   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 654     const unsigned pos = get_bitpos_for_address(p);
 655     set_bit_at_position(pos, layer_chunk_start_map, v);
 656   }
 657 
 658   // Removes all chunk-start-bits inside a region, typically as a
 659   // result of a coalescation.
 660   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 661     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 662   }
 663 
 664   // Returns true if there are life (in use) chunks in the region limited
 665   // by [p, p+word_size).
 666   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 667     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 668   }
 669 
 670   // Marks the region starting at p with the size word_size as in use
 671   // or free, depending on v.
 672   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 673     set_bits_of_region(p, word_size, layer_in_use_map, v);
 674   }
 675 
 676 #ifdef ASSERT
 677   // Verify occupancy map for the address range [from, to).
 678   // We need to tell it the address range, because the memory the
 679   // occupancy map is covering may not be fully comitted yet.
 680   void verify(MetaWord* from, MetaWord* to) {
 681     Metachunk* chunk = NULL;
 682     int nth_bit_for_chunk = 0;
 683     MetaWord* chunk_end = NULL;
 684     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 685       const unsigned pos = get_bitpos_for_address(p);
 686       // Check the chunk-starts-info:
 687       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 688         // Chunk start marked in bitmap.
 689         chunk = (Metachunk*) p;
 690         if (chunk_end != NULL) {
 691           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 692                  "the next chunk to start at %p).", p, chunk_end);
 693         }
 694         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 695         if (chunk->get_chunk_type() != HumongousIndex) {
 696           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 697         }
 698         chunk_end = p + chunk->word_size();
 699         nth_bit_for_chunk = 0;
 700         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 701       } else {
 702         // No chunk start marked in bitmap.
 703         assert(chunk != NULL, "Chunk should start at start of address range.");
 704         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 705         nth_bit_for_chunk ++;
 706       }
 707       // Check the in-use-info:
 708       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 709       if (in_use_bit) {
 710         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 711                chunk, nth_bit_for_chunk);
 712       } else {
 713         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 714                chunk, nth_bit_for_chunk);
 715       }
 716     }
 717   }
 718 
 719   // Verify that a given chunk is correctly accounted for in the bitmap.
 720   void verify_for_chunk(Metachunk* chunk) {
 721     assert(chunk_starts_at_address((MetaWord*) chunk),
 722            "No chunk start marked in map for chunk %p.", chunk);
 723     // For chunks larger than the minimal chunk size, no other chunk
 724     // must start in its area.
 725     if (chunk->word_size() > _smallest_chunk_word_size) {
 726       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 727                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 728              "No chunk must start within another chunk.");
 729     }
 730     if (!chunk->is_tagged_free()) {
 731       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 732              "Chunk %p is in use but marked as free in map (%d %d).",
 733              chunk, chunk->get_chunk_type(), chunk->get_origin());
 734     } else {
 735       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 736              "Chunk %p is free but marked as in-use in map (%d %d).",
 737              chunk, chunk->get_chunk_type(), chunk->get_origin());
 738     }
 739   }
 740 
 741 #endif // ASSERT
 742 
 743 };
 744 
 745 // A VirtualSpaceList node.
 746 class VirtualSpaceNode : public CHeapObj<mtClass> {
 747   friend class VirtualSpaceList;
 748 
 749   // Link to next VirtualSpaceNode
 750   VirtualSpaceNode* _next;
 751 
 752   // Whether this node is contained in class or metaspace.
 753   const bool _is_class;
 754 
 755   // total in the VirtualSpace
 756   MemRegion _reserved;
 757   ReservedSpace _rs;
 758   VirtualSpace _virtual_space;
 759   MetaWord* _top;
 760   // count of chunks contained in this VirtualSpace
 761   uintx _container_count;
 762 
 763   OccupancyMap* _occupancy_map;
 764 
 765   // Convenience functions to access the _virtual_space
 766   char* low()  const { return virtual_space()->low(); }
 767   char* high() const { return virtual_space()->high(); }
 768 
 769   // The first Metachunk will be allocated at the bottom of the
 770   // VirtualSpace
 771   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 772 
 773   // Committed but unused space in the virtual space
 774   size_t free_words_in_vs() const;
 775 
 776   // True if this node belongs to class metaspace.
 777   bool is_class() const { return _is_class; }
 778 
 779  public:
 780 
 781   VirtualSpaceNode(bool is_class, size_t byte_size);
 782   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 783     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 784   ~VirtualSpaceNode();
 785 
 786   // Convenience functions for logical bottom and end
 787   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 788   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 789 
 790   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 791   OccupancyMap* occupancy_map() { return _occupancy_map; }
 792 
 793   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 794 
 795   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 796   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 797 
 798   bool is_pre_committed() const { return _virtual_space.special(); }
 799 
 800   // address of next available space in _virtual_space;
 801   // Accessors
 802   VirtualSpaceNode* next() { return _next; }
 803   void set_next(VirtualSpaceNode* v) { _next = v; }
 804 
 805   void set_reserved(MemRegion const v) { _reserved = v; }
 806   void set_top(MetaWord* v) { _top = v; }
 807 
 808   // Accessors
 809   MemRegion* reserved() { return &_reserved; }
 810   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 811 
 812   // Returns true if "word_size" is available in the VirtualSpace
 813   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 814 
 815   MetaWord* top() const { return _top; }
 816   void inc_top(size_t word_size) { _top += word_size; }
 817 
 818   uintx container_count() { return _container_count; }
 819   void inc_container_count();
 820   void dec_container_count();
 821 #ifdef ASSERT
 822   uintx container_count_slow();
 823   void verify_container_count();
 824 #endif
 825 
 826   // used and capacity in this single entry in the list
 827   size_t used_words_in_vs() const;
 828   size_t capacity_words_in_vs() const;
 829 
 830   bool initialize();
 831 
 832   // get space from the virtual space
 833   Metachunk* take_from_committed(size_t chunk_word_size);
 834 
 835   // Allocate a chunk from the virtual space and return it.
 836   Metachunk* get_chunk_vs(size_t chunk_word_size);
 837 
 838   // Expands/shrinks the committed space in a virtual space.  Delegates
 839   // to Virtualspace
 840   bool expand_by(size_t min_words, size_t preferred_words);
 841 
 842   // In preparation for deleting this node, remove all the chunks
 843   // in the node from any freelist.
 844   void purge(ChunkManager* chunk_manager);
 845 
 846   // If an allocation doesn't fit in the current node a new node is created.
 847   // Allocate chunks out of the remaining committed space in this node
 848   // to avoid wasting that memory.
 849   // This always adds up because all the chunk sizes are multiples of
 850   // the smallest chunk size.
 851   void retire(ChunkManager* chunk_manager);
 852 
 853 #ifdef ASSERT
 854   // Debug support
 855   void mangle();
 856 #endif
 857 
 858   void print_on(outputStream* st) const;
 859   void print_map(outputStream* st, bool is_class) const;
 860 
 861   // Verify all chunks in this node.
 862   void verify();
 863 
 864 };
 865 
 866 #define assert_is_aligned(value, alignment)                  \
 867   assert(is_aligned((value), (alignment)),                   \
 868          SIZE_FORMAT_HEX " is not aligned to "               \
 869          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 870 
 871 // Decide if large pages should be committed when the memory is reserved.
 872 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 873   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 874     size_t words = bytes / BytesPerWord;
 875     bool is_class = false; // We never reserve large pages for the class space.
 876     if (MetaspaceGC::can_expand(words, is_class) &&
 877         MetaspaceGC::allowed_expansion() >= words) {
 878       return true;
 879     }
 880   }
 881 
 882   return false;
 883 }
 884 
 885   // byte_size is the size of the associated virtualspace.
 886 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 887   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 888   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 889   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 890   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 891 
 892   if (_rs.is_reserved()) {
 893     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 894     assert(_rs.size() != 0, "Catch if we get a 0 size");
 895     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 896     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 897 
 898     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 899   }
 900 }
 901 
 902 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 903   DEBUG_ONLY(this->verify();)
 904   Metachunk* chunk = first_chunk();
 905   Metachunk* invalid_chunk = (Metachunk*) top();
 906   while (chunk < invalid_chunk ) {
 907     assert(chunk->is_tagged_free(), "Should be tagged free");
 908     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 909     chunk_manager->remove_chunk(chunk);
 910     DEBUG_ONLY(chunk->remove_sentinel();)
 911     assert(chunk->next() == NULL &&
 912            chunk->prev() == NULL,
 913            "Was not removed from its list");
 914     chunk = (Metachunk*) next;
 915   }
 916 }
 917 
 918 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 919 
 920   if (bottom() == top()) {
 921     return;
 922   }
 923 
 924   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 925   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 926   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 927 
 928   int line_len = 100;
 929   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 930   line_len = (int)(section_len / spec_chunk_size);
 931 
 932 #ifdef ASSERT
 933 #define NUM_LINES 4
 934 #else
 935 #define NUM_LINES 2
 936 #endif
 937 
 938   char* lines[NUM_LINES];
 939   for (int i = 0; i < NUM_LINES; i ++) {
 940     lines[i] = (char*)os::malloc(line_len, mtInternal);
 941   }
 942   int pos = 0;
 943   const MetaWord* p = bottom();
 944   const Metachunk* chunk = (const Metachunk*)p;
 945   const MetaWord* chunk_end = p + chunk->word_size();
 946   while (p < top()) {
 947     if (pos == line_len) {
 948       pos = 0;
 949       for (int i = 0; i < NUM_LINES; i ++) {
 950         st->fill_to(22);
 951         st->print_raw(lines[i], line_len);
 952         st->cr();
 953       }
 954     }
 955     if (pos == 0) {
 956       st->print(PTR_FORMAT ":", p2i(p));
 957     }
 958     if (p == chunk_end) {
 959       chunk = (Metachunk*)p;
 960       chunk_end = p + chunk->word_size();
 961     }
 962     // line 1: chunk starting points (a dot if that area is a chunk start).
 963     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 964 
 965     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 966     // chunk is in use.
 967     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 968     if (chunk->word_size() == spec_chunk_size) {
 969       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 970     } else if (chunk->word_size() == small_chunk_size) {
 971       lines[1][pos] = chunk_is_free ? 's' : 'S';
 972     } else if (chunk->word_size() == med_chunk_size) {
 973       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 974     } else if (chunk->word_size() > med_chunk_size) {
 975       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 976     } else {
 977       ShouldNotReachHere();
 978     }
 979 
 980 #ifdef ASSERT
 981     // Line 3: chunk origin
 982     const ChunkOrigin origin = chunk->get_origin();
 983     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 984 
 985     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 986     //         but were never used.
 987     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 988 #endif
 989 
 990     p += spec_chunk_size;
 991     pos ++;
 992   }
 993   if (pos > 0) {
 994     for (int i = 0; i < NUM_LINES; i ++) {
 995       st->fill_to(22);
 996       st->print_raw(lines[i], line_len);
 997       st->cr();
 998     }
 999   }
1000   for (int i = 0; i < NUM_LINES; i ++) {
1001     os::free(lines[i]);
1002   }
1003 }
1004 
1005 
1006 #ifdef ASSERT
1007 uintx VirtualSpaceNode::container_count_slow() {
1008   uintx count = 0;
1009   Metachunk* chunk = first_chunk();
1010   Metachunk* invalid_chunk = (Metachunk*) top();
1011   while (chunk < invalid_chunk ) {
1012     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1013     do_verify_chunk(chunk);
1014     // Don't count the chunks on the free lists.  Those are
1015     // still part of the VirtualSpaceNode but not currently
1016     // counted.
1017     if (!chunk->is_tagged_free()) {
1018       count++;
1019     }
1020     chunk = (Metachunk*) next;
1021   }
1022   return count;
1023 }
1024 #endif
1025 
1026 // Verify all chunks in this list node.
1027 void VirtualSpaceNode::verify() {
1028   DEBUG_ONLY(verify_container_count();)
1029   Metachunk* chunk = first_chunk();
1030   Metachunk* invalid_chunk = (Metachunk*) top();
1031   // Iterate the chunks in this node and verify each chunk.
1032   // Also verify that space is ideally coalesced, i.e. we did not miss any coalescation chances (there shall be no free chunks
1033   // where a larger free chunk could exist).
1034   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1035   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1036   int num_free_chunks_since_last_med_boundary = -1;
1037   int num_free_chunks_since_last_small_boundary = -1;
1038   while (chunk < invalid_chunk ) {
1039     // verify each chunk.
1040     DEBUG_ONLY(do_verify_chunk(chunk);)
1041     // Test for missed coalescation opportunities: count number of free chunks since last chunk boundary.
1042     // Reset the counter when encountering a non-free chunk.
1043     if (chunk->get_chunk_type() != HumongousIndex) {
1044       if (chunk->is_tagged_free()) {
1045         if (is_aligned(chunk, size_small)) {
1046           assert(num_free_chunks_since_last_small_boundary <= 1,
1047                  "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1048           num_free_chunks_since_last_small_boundary = 0;
1049         } else if (num_free_chunks_since_last_small_boundary != -1) {
1050           num_free_chunks_since_last_small_boundary ++;
1051         }
1052         if (is_aligned(chunk, size_med)) {
1053           assert(num_free_chunks_since_last_med_boundary <= 1,
1054                  "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1055           num_free_chunks_since_last_med_boundary = 0;
1056         } else if (num_free_chunks_since_last_med_boundary != -1) {
1057           num_free_chunks_since_last_med_boundary ++;
1058         }
1059       } else {
1060         // Encountering a non-free chunk, reset counters.
1061         num_free_chunks_since_last_med_boundary = -1;
1062         num_free_chunks_since_last_small_boundary = -1;
1063       }
1064     } else {
1065       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1066       num_free_chunks_since_last_med_boundary = -1;
1067       num_free_chunks_since_last_small_boundary = -1;
1068     }
1069 
1070     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1071     chunk = (Metachunk*) next;
1072   }
1073   // Also verify the whole occupancy map
1074   DEBUG_ONLY(occupancy_map()->verify(this->bottom(), this->top());)
1075 }
1076 
1077 // List of VirtualSpaces for metadata allocation.
1078 class VirtualSpaceList : public CHeapObj<mtClass> {
1079   friend class VirtualSpaceNode;
1080 
1081   enum VirtualSpaceSizes {
1082     VirtualSpaceSize = 256 * K
1083   };
1084 
1085   // Head of the list
1086   VirtualSpaceNode* _virtual_space_list;
1087   // virtual space currently being used for allocations
1088   VirtualSpaceNode* _current_virtual_space;
1089 
1090   // Is this VirtualSpaceList used for the compressed class space
1091   bool _is_class;
1092 
1093   // Sum of reserved and committed memory in the virtual spaces
1094   size_t _reserved_words;
1095   size_t _committed_words;
1096 
1097   // Number of virtual spaces
1098   size_t _virtual_space_count;
1099 
1100   ~VirtualSpaceList();
1101 
1102   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1103 
1104   void set_virtual_space_list(VirtualSpaceNode* v) {
1105     _virtual_space_list = v;
1106   }
1107   void set_current_virtual_space(VirtualSpaceNode* v) {
1108     _current_virtual_space = v;
1109   }
1110 
1111   void link_vs(VirtualSpaceNode* new_entry);
1112 
1113   // Get another virtual space and add it to the list.  This
1114   // is typically prompted by a failed attempt to allocate a chunk
1115   // and is typically followed by the allocation of a chunk.
1116   bool create_new_virtual_space(size_t vs_word_size);
1117 
1118   // Chunk up the unused committed space in the current
1119   // virtual space and add the chunks to the free list.
1120   void retire_current_virtual_space();
1121 
1122  public:
1123   VirtualSpaceList(size_t word_size);
1124   VirtualSpaceList(ReservedSpace rs);
1125 
1126   size_t free_bytes();
1127 
1128   Metachunk* get_new_chunk(size_t chunk_word_size,
1129                            size_t suggested_commit_granularity);
1130 
1131   bool expand_node_by(VirtualSpaceNode* node,
1132                       size_t min_words,
1133                       size_t preferred_words);
1134 
1135   bool expand_by(size_t min_words,
1136                  size_t preferred_words);
1137 
1138   VirtualSpaceNode* current_virtual_space() {
1139     return _current_virtual_space;
1140   }
1141 
1142   bool is_class() const { return _is_class; }
1143 
1144   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1145 
1146   size_t reserved_words()  { return _reserved_words; }
1147   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1148   size_t committed_words() { return _committed_words; }
1149   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1150 
1151   void inc_reserved_words(size_t v);
1152   void dec_reserved_words(size_t v);
1153   void inc_committed_words(size_t v);
1154   void dec_committed_words(size_t v);
1155   void inc_virtual_space_count();
1156   void dec_virtual_space_count();
1157 
1158   bool contains(const void* ptr);
1159 
1160   // Unlink empty VirtualSpaceNodes and free it.
1161   void purge(ChunkManager* chunk_manager);
1162 
1163   void print_on(outputStream* st) const;
1164   void print_map(outputStream* st) const;
1165 
1166   class VirtualSpaceListIterator : public StackObj {
1167     VirtualSpaceNode* _virtual_spaces;
1168    public:
1169     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1170       _virtual_spaces(virtual_spaces) {}
1171 
1172     bool repeat() {
1173       return _virtual_spaces != NULL;
1174     }
1175 
1176     VirtualSpaceNode* get_next() {
1177       VirtualSpaceNode* result = _virtual_spaces;
1178       if (_virtual_spaces != NULL) {
1179         _virtual_spaces = _virtual_spaces->next();
1180       }
1181       return result;
1182     }
1183   };
1184 };
1185 
1186 class Metadebug : AllStatic {
1187   // Debugging support for Metaspaces
1188   static int _allocation_fail_alot_count;
1189 
1190  public:
1191 
1192   static void init_allocation_fail_alot_count();
1193 #ifdef ASSERT
1194   static bool test_metadata_failure();
1195 #endif
1196 };
1197 
1198 int Metadebug::_allocation_fail_alot_count = 0;
1199 
1200 //  SpaceManager - used by Metaspace to handle allocations
1201 class SpaceManager : public CHeapObj<mtClass> {
1202   friend class Metaspace;
1203   friend class Metadebug;
1204 
1205  private:
1206 
1207   // protects allocations
1208   Mutex* const _lock;
1209 
1210   // Type of metadata allocated.
1211   const Metaspace::MetadataType   _mdtype;
1212 
1213   // Type of metaspace
1214   const Metaspace::MetaspaceType  _space_type;
1215 
1216   // List of chunks in use by this SpaceManager.  Allocations
1217   // are done from the current chunk.  The list is used for deallocating
1218   // chunks when the SpaceManager is freed.
1219   Metachunk* _chunks_in_use[NumberOfInUseLists];
1220   Metachunk* _current_chunk;
1221 
1222   // Maximum number of small chunks to allocate to a SpaceManager
1223   static uint const _small_chunk_limit;
1224 
1225   // Maximum number of specialize chunks to allocate for anonymous
1226   // metadata space to a SpaceManager
1227   static uint const _anon_metadata_specialize_chunk_limit;
1228 
1229   // Sum of all space in allocated chunks
1230   size_t _allocated_blocks_words;
1231 
1232   // Sum of all allocated chunks
1233   size_t _allocated_chunks_words;
1234   size_t _allocated_chunks_count;
1235 
1236   // Free lists of blocks are per SpaceManager since they
1237   // are assumed to be in chunks in use by the SpaceManager
1238   // and all chunks in use by a SpaceManager are freed when
1239   // the class loader using the SpaceManager is collected.
1240   BlockFreelist* _block_freelists;
1241 
1242   // protects virtualspace and chunk expansions
1243   static const char*  _expand_lock_name;
1244   static const int    _expand_lock_rank;
1245   static Mutex* const _expand_lock;
1246 
1247  private:
1248   // Accessors
1249   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1250   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1251     _chunks_in_use[index] = v;
1252   }
1253 
1254   BlockFreelist* block_freelists() const { return _block_freelists; }
1255 
1256   Metaspace::MetadataType mdtype() { return _mdtype; }
1257 
1258   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1259   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1260 
1261   Metachunk* current_chunk() const { return _current_chunk; }
1262   void set_current_chunk(Metachunk* v) {
1263     _current_chunk = v;
1264   }
1265 
1266   Metachunk* find_current_chunk(size_t word_size);
1267 
1268   // Add chunk to the list of chunks in use
1269   void add_chunk(Metachunk* v, bool make_current);
1270   void retire_current_chunk();
1271 
1272   Mutex* lock() const { return _lock; }
1273 
1274  protected:
1275   void initialize();
1276 
1277  public:
1278   SpaceManager(Metaspace::MetadataType mdtype,
1279                Metaspace::MetaspaceType space_type,
1280                Mutex* lock);
1281   ~SpaceManager();
1282 
1283   enum ChunkMultiples {
1284     MediumChunkMultiple = 4
1285   };
1286 
1287   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1288   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1289   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1290 
1291   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1292 
1293   // Accessors
1294   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1295 
1296   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1297   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1298   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1299 
1300   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1301 
1302   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1303 
1304   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1305   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1306   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1307   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1308   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1309 
1310   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1311 
1312   static Mutex* expand_lock() { return _expand_lock; }
1313 
1314   // Increment the per Metaspace and global running sums for Metachunks
1315   // by the given size.  This is used when a Metachunk to added to
1316   // the in-use list.
1317   void inc_size_metrics(size_t words);
1318   // Increment the per Metaspace and global running sums Metablocks by the given
1319   // size.  This is used when a Metablock is allocated.
1320   void inc_used_metrics(size_t words);
1321   // Delete the portion of the running sums for this SpaceManager. That is,
1322   // the globals running sums for the Metachunks and Metablocks are
1323   // decremented for all the Metachunks in-use by this SpaceManager.
1324   void dec_total_from_size_metrics();
1325 
1326   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1327   // or return the unadjusted size if the requested size is humongous.
1328   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1329   size_t adjust_initial_chunk_size(size_t requested) const;
1330 
1331   // Get the initial chunks size for this metaspace type.
1332   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1333 
1334   size_t sum_capacity_in_chunks_in_use() const;
1335   size_t sum_used_in_chunks_in_use() const;
1336   size_t sum_free_in_chunks_in_use() const;
1337   size_t sum_waste_in_chunks_in_use() const;
1338   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1339 
1340   size_t sum_count_in_chunks_in_use();
1341   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1342 
1343   Metachunk* get_new_chunk(size_t chunk_word_size);
1344 
1345   // Block allocation and deallocation.
1346   // Allocates a block from the current chunk
1347   MetaWord* allocate(size_t word_size);
1348 
1349   // Helper for allocations
1350   MetaWord* allocate_work(size_t word_size);
1351 
1352   // Returns a block to the per manager freelist
1353   void deallocate(MetaWord* p, size_t word_size);
1354 
1355   // Based on the allocation size and a minimum chunk size,
1356   // returned chunk size (for expanding space for chunk allocation).
1357   size_t calc_chunk_size(size_t allocation_word_size);
1358 
1359   // Called when an allocation from the current chunk fails.
1360   // Gets a new chunk (may require getting a new virtual space),
1361   // and allocates from that chunk.
1362   MetaWord* grow_and_allocate(size_t word_size);
1363 
1364   // Notify memory usage to MemoryService.
1365   void track_metaspace_memory_usage();
1366 
1367   // debugging support.
1368 
1369   void dump(outputStream* const out) const;
1370   void print_on(outputStream* st) const;
1371   void locked_print_chunks_in_use_on(outputStream* st) const;
1372 
1373   void verify();
1374   void verify_chunk_size(Metachunk* chunk);
1375 #ifdef ASSERT
1376   void verify_allocated_blocks_words();
1377 #endif
1378 
1379   // This adjusts the size given to be greater than the minimum allocation size in
1380   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1381   size_t get_allocation_word_size(size_t word_size) {
1382     size_t byte_size = word_size * BytesPerWord;
1383 
1384     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1385     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1386 
1387     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1388     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1389 
1390     return raw_word_size;
1391   }
1392 };
1393 
1394 uint const SpaceManager::_small_chunk_limit = 4;
1395 uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
1396 
1397 const char* SpaceManager::_expand_lock_name =
1398   "SpaceManager chunk allocation lock";
1399 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1400 Mutex* const SpaceManager::_expand_lock =
1401   new Mutex(SpaceManager::_expand_lock_rank,
1402             SpaceManager::_expand_lock_name,
1403             Mutex::_allow_vm_block_flag,
1404             Monitor::_safepoint_check_never);
1405 
1406 void VirtualSpaceNode::inc_container_count() {
1407   assert_lock_strong(SpaceManager::expand_lock());
1408   _container_count++;
1409 }
1410 
1411 void VirtualSpaceNode::dec_container_count() {
1412   assert_lock_strong(SpaceManager::expand_lock());
1413   _container_count--;
1414 }
1415 
1416 #ifdef ASSERT
1417 void VirtualSpaceNode::verify_container_count() {
1418   assert(_container_count == container_count_slow(),
1419          "Inconsistency in container_count _container_count " UINTX_FORMAT
1420          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1421 }
1422 #endif
1423 
1424 // BlockFreelist methods
1425 
1426 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1427 
1428 BlockFreelist::~BlockFreelist() {
1429   delete _dictionary;
1430   if (_small_blocks != NULL) {
1431     delete _small_blocks;
1432   }
1433 }
1434 
1435 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1436   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1437 
1438   Metablock* free_chunk = ::new (p) Metablock(word_size);
1439   if (word_size < SmallBlocks::small_block_max_size()) {
1440     small_blocks()->return_block(free_chunk, word_size);
1441   } else {
1442   dictionary()->return_chunk(free_chunk);
1443 }
1444   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1445             SIZE_FORMAT, p2i(free_chunk), word_size);
1446 }
1447 
1448 MetaWord* BlockFreelist::get_block(size_t word_size) {
1449   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1450 
1451   // Try small_blocks first.
1452   if (word_size < SmallBlocks::small_block_max_size()) {
1453     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1454     // this space manager.
1455     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1456     if (new_block != NULL) {
1457       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1458               p2i(new_block), word_size);
1459       return new_block;
1460     }
1461   }
1462 
1463   if (word_size < BlockFreelist::min_dictionary_size()) {
1464     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1465     return NULL;
1466   }
1467 
1468   Metablock* free_block = dictionary()->get_chunk(word_size);
1469   if (free_block == NULL) {
1470     return NULL;
1471   }
1472 
1473   const size_t block_size = free_block->size();
1474   if (block_size > WasteMultiplier * word_size) {
1475     return_block((MetaWord*)free_block, block_size);
1476     return NULL;
1477   }
1478 
1479   MetaWord* new_block = (MetaWord*)free_block;
1480   assert(block_size >= word_size, "Incorrect size of block from freelist");
1481   const size_t unused = block_size - word_size;
1482   if (unused >= SmallBlocks::small_block_min_size()) {
1483     return_block(new_block + word_size, unused);
1484   }
1485 
1486   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1487             p2i(new_block), word_size);
1488   return new_block;
1489 }
1490 
1491 void BlockFreelist::print_on(outputStream* st) const {
1492   dictionary()->print_free_lists(st);
1493   if (_small_blocks != NULL) {
1494     _small_blocks->print_on(st);
1495   }
1496 }
1497 
1498 // VirtualSpaceNode methods
1499 
1500 VirtualSpaceNode::~VirtualSpaceNode() {
1501   _rs.release();
1502   if (_occupancy_map != NULL) {
1503     delete _occupancy_map;
1504   }
1505 #ifdef ASSERT
1506   size_t word_size = sizeof(*this) / BytesPerWord;
1507   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1508 #endif
1509 }
1510 
1511 size_t VirtualSpaceNode::used_words_in_vs() const {
1512   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1513 }
1514 
1515 // Space committed in the VirtualSpace
1516 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1517   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1518 }
1519 
1520 size_t VirtualSpaceNode::free_words_in_vs() const {
1521   return pointer_delta(end(), top(), sizeof(MetaWord));
1522 }
1523 
1524 // Allocates the chunk from the virtual space only.
1525 // This interface is also used internally for debugging.  Not all
1526 // chunks removed here are necessarily used for allocation.
1527 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1528   // Non-humongous chunks are to be allocated aligned to their chunk
1529   // size. So, start addresses of medium chunks are aligned to medium
1530   // chunk size, those of small chunks to small chunk size and so
1531   // forth. This facilitates free chunk coalescation and reduces
1532   // fragmentation. Chunk sizes are spec < small < medium, with each
1533   // larger chunk size being a multiple of the next smaller chunk
1534   // size.
1535   // Because of this alignment, me may need to create a number of padding
1536   // chunks. These chunks are created and added to the freelist.
1537 
1538   // The chunk manager to which we will give our padding chunks.
1539   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1540 
1541   // shorthands
1542   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1543   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1544   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1545 
1546   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1547          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1548 
1549   // Chunk alignment (in bytes) == chunk size unless humongous.
1550   // Humongous chunks are aligned to the smallest chunk size (spec).
1551   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1552                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1553 
1554   // Do we have enough space to create the requested chunk plus
1555   // any padding chunks needed?
1556   MetaWord* const next_aligned =
1557     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1558   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1559     return NULL;
1560   }
1561 
1562   // Before allocating the requested chunk, allocate padding chunks if necessary.
1563   // We only need to do this for small or medium chunks: specialized chunks are the
1564   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1565   // (implicitly, also aligned to smallest chunk size).
1566   if (chunk_word_size == med_word_size || chunk_word_size == small_word_size) {
1567 
1568     if (next_aligned > top()) {
1569       log_trace(gc, metaspace, freelist)("Coalescation (%s): creating padding chunks between %p and %p...",
1570           (is_class() ? "class space " : "metaspace"),
1571           top(), next_aligned);
1572     }
1573 
1574     // Allocate padding chunks.
1575     while (next_aligned > top()) {
1576       size_t padding_chunk_word_size = small_word_size;
1577       if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1578         assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1579         padding_chunk_word_size = spec_word_size;
1580       }
1581       MetaWord* here = top();
1582       assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1583       inc_top(padding_chunk_word_size);
1584 
1585       // Create new padding chunk.
1586       ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1587       assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1588 
1589       Metachunk* const padding_chunk =
1590         ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1591       assert(padding_chunk == (Metachunk*)here, "Sanity");
1592       DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1593       log_trace(gc, metaspace, freelist)("Coalescation (%s): created padding chunk at "
1594                                          PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1595                                          (is_class() ? "class space " : "metaspace"),
1596                                          p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1597 
1598       // Mark chunk start in occupancy map.
1599       occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1600 
1601       // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1602       // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1603       // will assert that).
1604       do_update_in_use_info_for_chunk(padding_chunk, true);
1605 
1606       // Return Chunk to freelist.
1607       inc_container_count();
1608       chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1609       // Please note: at this point, ChunkManager::return_single_chunk()
1610       // may have merged the padding chunk with neighboring chunks, so
1611       // it may have vanished at this point. Do not reference the padding
1612       // chunk beyond this point.
1613     }
1614 
1615   } // End: create padding chunks if necessary.
1616 
1617   // Now, top should be aligned correctly.
1618   assert_is_aligned(top(), required_chunk_alignment);
1619 
1620   // Bottom of the new chunk
1621   MetaWord* chunk_limit = top();
1622   assert(chunk_limit != NULL, "Not safe to call this method");
1623 
1624   // The virtual spaces are always expanded by the
1625   // commit granularity to enforce the following condition.
1626   // Without this the is_available check will not work correctly.
1627   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1628       "The committed memory doesn't match the expanded memory.");
1629 
1630   if (!is_available(chunk_word_size)) {
1631     LogTarget(Debug, gc, metaspace, freelist) lt;
1632     if (lt.is_enabled()) {
1633       LogStream ls(lt);
1634       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1635       // Dump some information about the virtual space that is nearly full
1636       print_on(&ls);
1637     }
1638     return NULL;
1639   }
1640 
1641   // Take the space  (bump top on the current virtual space).
1642   inc_top(chunk_word_size);
1643 
1644   // Initialize the chunk
1645   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1646   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1647   assert(result == (Metachunk*)chunk_limit, "Sanity");
1648   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1649   do_update_in_use_info_for_chunk(result, true);
1650 
1651   inc_container_count();
1652 
1653   DEBUG_ONLY(chunk_manager->locked_verify());
1654   DEBUG_ONLY(this->verify());
1655   DEBUG_ONLY(do_verify_chunk(result));
1656 
1657   DEBUG_ONLY(result->inc_use_count();)
1658 
1659   return result;
1660 }
1661 
1662 
1663 // Expand the virtual space (commit more of the reserved space)
1664 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1665   size_t min_bytes = min_words * BytesPerWord;
1666   size_t preferred_bytes = preferred_words * BytesPerWord;
1667 
1668   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1669 
1670   if (uncommitted < min_bytes) {
1671     return false;
1672   }
1673 
1674   size_t commit = MIN2(preferred_bytes, uncommitted);
1675   bool result = virtual_space()->expand_by(commit, false);
1676 
1677   if (result) {
1678     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1679               (is_class() ? "class" : "non-class"), commit);
1680   } else {
1681     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1682               (is_class() ? "class" : "non-class"), commit);
1683   }
1684 
1685   assert(result, "Failed to commit memory");
1686 
1687   return result;
1688 }
1689 
1690 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1691   assert_lock_strong(SpaceManager::expand_lock());
1692   Metachunk* result = take_from_committed(chunk_word_size);
1693   return result;
1694 }
1695 
1696 bool VirtualSpaceNode::initialize() {
1697 
1698   if (!_rs.is_reserved()) {
1699     return false;
1700   }
1701 
1702   // These are necessary restriction to make sure that the virtual space always
1703   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1704   // aligned only the middle alignment of the VirtualSpace is used.
1705   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1706   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1707 
1708   // ReservedSpaces marked as special will have the entire memory
1709   // pre-committed. Setting a committed size will make sure that
1710   // committed_size and actual_committed_size agrees.
1711   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1712 
1713   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1714                                             Metaspace::commit_alignment());
1715   if (result) {
1716     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1717         "Checking that the pre-committed memory was registered by the VirtualSpace");
1718 
1719     set_top((MetaWord*)virtual_space()->low());
1720     set_reserved(MemRegion((HeapWord*)_rs.base(),
1721                  (HeapWord*)(_rs.base() + _rs.size())));
1722 
1723     assert(reserved()->start() == (HeapWord*) _rs.base(),
1724            "Reserved start was not set properly " PTR_FORMAT
1725            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1726     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1727            "Reserved size was not set properly " SIZE_FORMAT
1728            " != " SIZE_FORMAT, reserved()->word_size(),
1729            _rs.size() / BytesPerWord);
1730   }
1731 
1732   // Initialize Occupancy Map.
1733   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1734   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1735 
1736   return result;
1737 }
1738 
1739 void VirtualSpaceNode::print_on(outputStream* st) const {
1740   size_t used = used_words_in_vs();
1741   size_t capacity = capacity_words_in_vs();
1742   VirtualSpace* vs = virtual_space();
1743   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1744            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1745            PTR_FORMAT ", " PTR_FORMAT ")",
1746            p2i(vs), capacity / K,
1747            capacity == 0 ? 0 : used * 100 / capacity,
1748            p2i(bottom()), p2i(top()), p2i(end()),
1749            p2i(vs->high_boundary()));
1750 }
1751 
1752 #ifdef ASSERT
1753 void VirtualSpaceNode::mangle() {
1754   size_t word_size = capacity_words_in_vs();
1755   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1756 }
1757 #endif // ASSERT
1758 
1759 // VirtualSpaceList methods
1760 // Space allocated from the VirtualSpace
1761 
1762 VirtualSpaceList::~VirtualSpaceList() {
1763   VirtualSpaceListIterator iter(virtual_space_list());
1764   while (iter.repeat()) {
1765     VirtualSpaceNode* vsl = iter.get_next();
1766     delete vsl;
1767   }
1768 }
1769 
1770 void VirtualSpaceList::inc_reserved_words(size_t v) {
1771   assert_lock_strong(SpaceManager::expand_lock());
1772   _reserved_words = _reserved_words + v;
1773 }
1774 void VirtualSpaceList::dec_reserved_words(size_t v) {
1775   assert_lock_strong(SpaceManager::expand_lock());
1776   _reserved_words = _reserved_words - v;
1777 }
1778 
1779 #define assert_committed_below_limit()                        \
1780   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1781          "Too much committed memory. Committed: " SIZE_FORMAT \
1782          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1783          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1784 
1785 void VirtualSpaceList::inc_committed_words(size_t v) {
1786   assert_lock_strong(SpaceManager::expand_lock());
1787   _committed_words = _committed_words + v;
1788 
1789   assert_committed_below_limit();
1790 }
1791 void VirtualSpaceList::dec_committed_words(size_t v) {
1792   assert_lock_strong(SpaceManager::expand_lock());
1793   _committed_words = _committed_words - v;
1794 
1795   assert_committed_below_limit();
1796 }
1797 
1798 void VirtualSpaceList::inc_virtual_space_count() {
1799   assert_lock_strong(SpaceManager::expand_lock());
1800   _virtual_space_count++;
1801 }
1802 void VirtualSpaceList::dec_virtual_space_count() {
1803   assert_lock_strong(SpaceManager::expand_lock());
1804   _virtual_space_count--;
1805 }
1806 
1807 void ChunkManager::remove_chunk(Metachunk* chunk) {
1808   size_t word_size = chunk->word_size();
1809   ChunkIndex index = list_index(word_size);
1810   if (index != HumongousIndex) {
1811     free_chunks(index)->remove_chunk(chunk);
1812   } else {
1813     humongous_dictionary()->remove_chunk(chunk);
1814   }
1815 
1816   // Chunk has been removed from the chunks free list, update counters.
1817   account_for_removed_chunk(chunk);
1818 }
1819 
1820 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1821   assert_lock_strong(SpaceManager::expand_lock());
1822   assert(chunk != NULL, "invalid chunk pointer");
1823   // Check for valid coalescation combinations.
1824   assert((chunk->get_chunk_type() == SpecializedIndex &&
1825           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1826          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1827         "Invalid chunk coalescation combination.");
1828 
1829   const size_t target_chunk_word_size =
1830     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1831 
1832   MetaWord* const p_coalescation_start =
1833     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1834   MetaWord* const p_coalescation_end =
1835     p_coalescation_start + target_chunk_word_size;
1836 
1837   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1838   VirtualSpaceNode* const vsn = chunk->container();
1839   OccupancyMap* const ocmap = vsn->occupancy_map();
1840 
1841   // The potential coalescation range shall be completely contained by the
1842   // committed range of the virtual space node.
1843   if (p_coalescation_start < vsn->bottom() || p_coalescation_end > vsn->top()) {
1844     return false;
1845   }
1846 
1847   // Only attempt to coalesce if at the start of the potential
1848   // coalescation range a chunk starts and at the end of the potential
1849   // coalescation range a chunk ends. If that is not the case - so, if
1850   // a chunk straddles either start or end of the coalescation range,
1851   // we cannot coalesce.  Note that this should only happen with
1852   // humongous chunks.
1853   if (!ocmap->chunk_starts_at_address(p_coalescation_start)) {
1854     return false;
1855   }
1856 
1857   // (A chunk ends at the coalescation range end either if this is the
1858   // end of the used area or if a new chunk starts right away.)
1859   if (p_coalescation_end < vsn->top()) {
1860     if (!ocmap->chunk_starts_at_address(p_coalescation_end)) {
1861       return false;
1862     }
1863   }
1864 
1865   // Now check if in the coalescation area there are still life chunks.
1866   if (ocmap->is_region_in_use(p_coalescation_start, target_chunk_word_size)) {
1867     return false;
1868   }
1869 
1870   // Success! Remove all chunks in this region...
1871   log_trace(gc, metaspace, freelist)("Coalescation (%s): coalescing chunks in area [%p-%p)...",
1872     (is_class() ? "class space" : "metaspace"),
1873     p_coalescation_start, p_coalescation_end);
1874 
1875   const int num_chunks_removed =
1876     remove_chunks_in_area(p_coalescation_start, target_chunk_word_size);
1877 
1878   // ... and create a single new bigger chunk.
1879   Metachunk* const p_new_chunk =
1880       ::new (p_coalescation_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1881   assert(p_new_chunk == (Metachunk*)p_coalescation_start, "Sanity");
1882   DEBUG_ONLY(p_new_chunk->set_origin(origin_coalescation);)
1883 
1884   log_trace(gc, metaspace, freelist)("Coalescation (%s): created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1885     (is_class() ? "class space" : "metaspace"),
1886     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1887 
1888   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1889   ocmap->wipe_chunk_start_bits_in_region(p_coalescation_start, target_chunk_word_size);
1890   ocmap->set_chunk_starts_at_address(p_coalescation_start, true);
1891 
1892   // Mark chunk as free. Note: it is not necessary to update the occupancy
1893   // map in-use map, because the old chunks were also free, so nothing
1894   // should have changed.
1895   p_new_chunk->set_is_tagged_free(true);
1896 
1897   // Add new chunk to its freelist.
1898   ChunkList* const list = free_chunks(target_chunk_type);
1899   list->return_chunk_at_head(p_new_chunk);
1900 
1901   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1902   // should not have changed, because the size of the space should be the same)
1903   _free_chunks_count -= num_chunks_removed;
1904   _free_chunks_count ++;
1905 
1906   // VirtualSpaceNode::container_count does not have to be modified:
1907   // it means "number of active (non-free) chunks", so coalescation of
1908   // free chunks should not affect that count.
1909 
1910   // At the end of a coalescation, run verification tests.
1911   DEBUG_ONLY(this->locked_verify());
1912   DEBUG_ONLY(vsn->verify());
1913 
1914   return true;
1915 }
1916 
1917 // Remove all chunks in the given area - the chunks are supposed to be free -
1918 // from their corresponding freelists. Mark them as invalid.
1919 // - This does not correct the occupancy map.
1920 // - This does not adjust the counters in ChunkManager.
1921 // - Does not adjust container count counter in containing VirtualSpaceNode
1922 // Returns number of chunks removed.
1923 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1924   assert(p != NULL && word_size > 0, "Invalid range.");
1925   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1926   assert_is_aligned(word_size, smallest_chunk_size);
1927 
1928   Metachunk* const start = (Metachunk*) p;
1929   const Metachunk* const end = (Metachunk*)(p + word_size);
1930   Metachunk* cur = start;
1931   int num_removed = 0;
1932   while (cur < end) {
1933     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1934     DEBUG_ONLY(do_verify_chunk(cur));
1935     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1936     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1937     log_trace(gc, metaspace, freelist)("Coalescation (%s): removing chunk %p, size " SIZE_FORMAT_HEX ".",
1938       (is_class() ? "class space" : "metaspace"),
1939       cur, cur->word_size() * sizeof(MetaWord));
1940     DEBUG_ONLY(cur->remove_sentinel();)
1941     // Note: cannot call ChunkManager::remove_chunk, because that
1942     // modifies the counters in ChunkManager, which we do not want. So
1943     // we call remove_chunk on the freelist directly (see also the
1944     // splitting function which does the same).
1945     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1946     list->remove_chunk(cur);
1947     num_removed ++;
1948     cur = next;
1949   }
1950   return num_removed;
1951 }
1952 
1953 // Walk the list of VirtualSpaceNodes and delete
1954 // nodes with a 0 container_count.  Remove Metachunks in
1955 // the node from their respective freelists.
1956 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1957   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1958   assert_lock_strong(SpaceManager::expand_lock());
1959   // Don't use a VirtualSpaceListIterator because this
1960   // list is being changed and a straightforward use of an iterator is not safe.
1961   VirtualSpaceNode* purged_vsl = NULL;
1962   VirtualSpaceNode* prev_vsl = virtual_space_list();
1963   VirtualSpaceNode* next_vsl = prev_vsl;
1964   while (next_vsl != NULL) {
1965     VirtualSpaceNode* vsl = next_vsl;
1966     DEBUG_ONLY(vsl->verify_container_count();)
1967     next_vsl = vsl->next();
1968     // Don't free the current virtual space since it will likely
1969     // be needed soon.
1970     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1971       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1972                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
1973       // Unlink it from the list
1974       if (prev_vsl == vsl) {
1975         // This is the case of the current node being the first node.
1976         assert(vsl == virtual_space_list(), "Expected to be the first node");
1977         set_virtual_space_list(vsl->next());
1978       } else {
1979         prev_vsl->set_next(vsl->next());
1980       }
1981 
1982       vsl->purge(chunk_manager);
1983       dec_reserved_words(vsl->reserved_words());
1984       dec_committed_words(vsl->committed_words());
1985       dec_virtual_space_count();
1986       purged_vsl = vsl;
1987       delete vsl;
1988     } else {
1989       prev_vsl = vsl;
1990     }
1991   }
1992 #ifdef ASSERT
1993   if (purged_vsl != NULL) {
1994     // List should be stable enough to use an iterator here.
1995     VirtualSpaceListIterator iter(virtual_space_list());
1996     while (iter.repeat()) {
1997       VirtualSpaceNode* vsl = iter.get_next();
1998       assert(vsl != purged_vsl, "Purge of vsl failed");
1999     }
2000   }
2001 #endif
2002 }
2003 
2004 
2005 // This function looks at the mmap regions in the metaspace without locking.
2006 // The chunks are added with store ordering and not deleted except for at
2007 // unloading time during a safepoint.
2008 bool VirtualSpaceList::contains(const void* ptr) {
2009   // List should be stable enough to use an iterator here because removing virtual
2010   // space nodes is only allowed at a safepoint.
2011   VirtualSpaceListIterator iter(virtual_space_list());
2012   while (iter.repeat()) {
2013     VirtualSpaceNode* vsn = iter.get_next();
2014     if (vsn->contains(ptr)) {
2015       return true;
2016     }
2017   }
2018   return false;
2019 }
2020 
2021 void VirtualSpaceList::retire_current_virtual_space() {
2022   assert_lock_strong(SpaceManager::expand_lock());
2023 
2024   VirtualSpaceNode* vsn = current_virtual_space();
2025 
2026   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2027                                   Metaspace::chunk_manager_metadata();
2028 
2029   vsn->retire(cm);
2030 }
2031 
2032 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2033   DEBUG_ONLY(verify_container_count();)
2034   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2035   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2036     ChunkIndex index = (ChunkIndex)i;
2037     size_t chunk_size = chunk_manager->size_by_index(index);
2038 
2039     while (free_words_in_vs() >= chunk_size) {
2040       Metachunk* chunk = get_chunk_vs(chunk_size);
2041       // Chunk will be allocated aligned, so allocation may require
2042       // additional padding chunks. That may cause above allocation to
2043       // fail. Just ignore the failed allocation and continue with the
2044       // next smaller chunk size. As the VirtualSpaceNode comitted
2045       // size should be a multiple of the smallest chunk size, we
2046       // should always be able to fill the VirtualSpace completely.
2047       if (chunk == NULL) {
2048         break;
2049       }
2050       chunk_manager->return_single_chunk(index, chunk);
2051     }
2052     DEBUG_ONLY(verify_container_count();)
2053   }
2054   assert(free_words_in_vs() == 0, "should be empty now");
2055 }
2056 
2057 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2058                                    _is_class(false),
2059                                    _virtual_space_list(NULL),
2060                                    _current_virtual_space(NULL),
2061                                    _reserved_words(0),
2062                                    _committed_words(0),
2063                                    _virtual_space_count(0) {
2064   MutexLockerEx cl(SpaceManager::expand_lock(),
2065                    Mutex::_no_safepoint_check_flag);
2066   create_new_virtual_space(word_size);
2067 }
2068 
2069 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2070                                    _is_class(true),
2071                                    _virtual_space_list(NULL),
2072                                    _current_virtual_space(NULL),
2073                                    _reserved_words(0),
2074                                    _committed_words(0),
2075                                    _virtual_space_count(0) {
2076   MutexLockerEx cl(SpaceManager::expand_lock(),
2077                    Mutex::_no_safepoint_check_flag);
2078   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2079   bool succeeded = class_entry->initialize();
2080   if (succeeded) {
2081     link_vs(class_entry);
2082   }
2083 }
2084 
2085 size_t VirtualSpaceList::free_bytes() {
2086   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2087 }
2088 
2089 // Allocate another meta virtual space and add it to the list.
2090 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2091   assert_lock_strong(SpaceManager::expand_lock());
2092 
2093   if (is_class()) {
2094     assert(false, "We currently don't support more than one VirtualSpace for"
2095                   " the compressed class space. The initialization of the"
2096                   " CCS uses another code path and should not hit this path.");
2097     return false;
2098   }
2099 
2100   if (vs_word_size == 0) {
2101     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2102     return false;
2103   }
2104 
2105   // Reserve the space
2106   size_t vs_byte_size = vs_word_size * BytesPerWord;
2107   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2108 
2109   // Allocate the meta virtual space and initialize it.
2110   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2111   if (!new_entry->initialize()) {
2112     delete new_entry;
2113     return false;
2114   } else {
2115     assert(new_entry->reserved_words() == vs_word_size,
2116         "Reserved memory size differs from requested memory size");
2117     // ensure lock-free iteration sees fully initialized node
2118     OrderAccess::storestore();
2119     link_vs(new_entry);
2120     return true;
2121   }
2122 }
2123 
2124 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2125   if (virtual_space_list() == NULL) {
2126       set_virtual_space_list(new_entry);
2127   } else {
2128     current_virtual_space()->set_next(new_entry);
2129   }
2130   set_current_virtual_space(new_entry);
2131   inc_reserved_words(new_entry->reserved_words());
2132   inc_committed_words(new_entry->committed_words());
2133   inc_virtual_space_count();
2134 #ifdef ASSERT
2135   new_entry->mangle();
2136 #endif
2137   LogTarget(Trace, gc, metaspace) lt;
2138   if (lt.is_enabled()) {
2139     LogStream ls(lt);
2140     VirtualSpaceNode* vsl = current_virtual_space();
2141     ResourceMark rm;
2142     vsl->print_on(&ls);
2143   }
2144 }
2145 
2146 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2147                                       size_t min_words,
2148                                       size_t preferred_words) {
2149   size_t before = node->committed_words();
2150 
2151   bool result = node->expand_by(min_words, preferred_words);
2152 
2153   size_t after = node->committed_words();
2154 
2155   // after and before can be the same if the memory was pre-committed.
2156   assert(after >= before, "Inconsistency");
2157   inc_committed_words(after - before);
2158 
2159   return result;
2160 }
2161 
2162 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2163   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2164   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2165   assert(min_words <= preferred_words, "Invalid arguments");
2166 
2167   const char* const class_or_not = (is_class() ? "class" : "non-class");
2168 
2169   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2170     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2171               class_or_not);
2172     return  false;
2173   }
2174 
2175   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2176   if (allowed_expansion_words < min_words) {
2177     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2178               class_or_not);
2179     return false;
2180   }
2181 
2182   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2183 
2184   // Commit more memory from the the current virtual space.
2185   bool vs_expanded = expand_node_by(current_virtual_space(),
2186                                     min_words,
2187                                     max_expansion_words);
2188   if (vs_expanded) {
2189      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2190                class_or_not);
2191      return true;
2192   }
2193   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2194             class_or_not);
2195   retire_current_virtual_space();
2196 
2197   // Get another virtual space.
2198   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2199   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2200 
2201   if (create_new_virtual_space(grow_vs_words)) {
2202     if (current_virtual_space()->is_pre_committed()) {
2203       // The memory was pre-committed, so we are done here.
2204       assert(min_words <= current_virtual_space()->committed_words(),
2205           "The new VirtualSpace was pre-committed, so it"
2206           "should be large enough to fit the alloc request.");
2207       return true;
2208     }
2209 
2210     return expand_node_by(current_virtual_space(),
2211                           min_words,
2212                           max_expansion_words);
2213   }
2214 
2215   return false;
2216 }
2217 
2218 // Given a chunk, calculate the largest possible padding space which
2219 // could be required when allocating it.
2220 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2221   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2222   if (chunk_type != HumongousIndex) {
2223     // Normal, non-humongous chunks are allocated at chunk size
2224     // boundaries, so the largest padding space required would be that
2225     // minus the smallest chunk size.
2226     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2227     return chunk_word_size - smallest_chunk_size;
2228   } else {
2229     // Humongous chunks are allocated at smallest-chunksize
2230     // boundaries, so there is no padding required.
2231     return 0;
2232   }
2233 }
2234 
2235 
2236 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2237 
2238   // Allocate a chunk out of the current virtual space.
2239   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2240 
2241   if (next != NULL) {
2242     return next;
2243   }
2244 
2245   // The expand amount is currently only determined by the requested sizes
2246   // and not how much committed memory is left in the current virtual space.
2247 
2248   // We must have enough space for the requested size and any
2249   // additional reqired padding chunks.
2250   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2251 
2252   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2253   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2254   if (min_word_size >= preferred_word_size) {
2255     // Can happen when humongous chunks are allocated.
2256     preferred_word_size = min_word_size;
2257   }
2258 
2259   bool expanded = expand_by(min_word_size, preferred_word_size);
2260   if (expanded) {
2261     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2262     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2263   }
2264 
2265    return next;
2266 }
2267 
2268 void VirtualSpaceList::print_on(outputStream* st) const {
2269   VirtualSpaceListIterator iter(virtual_space_list());
2270   while (iter.repeat()) {
2271     VirtualSpaceNode* node = iter.get_next();
2272     node->print_on(st);
2273   }
2274 }
2275 
2276 void VirtualSpaceList::print_map(outputStream* st) const {
2277   VirtualSpaceNode* list = virtual_space_list();
2278   VirtualSpaceListIterator iter(list);
2279   unsigned i = 0;
2280   while (iter.repeat()) {
2281     st->print_cr("Node %u:", i);
2282     VirtualSpaceNode* node = iter.get_next();
2283     node->print_map(st, this->is_class());
2284     i ++;
2285   }
2286 }
2287 
2288 // MetaspaceGC methods
2289 
2290 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2291 // Within the VM operation after the GC the attempt to allocate the metadata
2292 // should succeed.  If the GC did not free enough space for the metaspace
2293 // allocation, the HWM is increased so that another virtualspace will be
2294 // allocated for the metadata.  With perm gen the increase in the perm
2295 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2296 // metaspace policy uses those as the small and large steps for the HWM.
2297 //
2298 // After the GC the compute_new_size() for MetaspaceGC is called to
2299 // resize the capacity of the metaspaces.  The current implementation
2300 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2301 // to resize the Java heap by some GC's.  New flags can be implemented
2302 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2303 // free space is desirable in the metaspace capacity to decide how much
2304 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2305 // free space is desirable in the metaspace capacity before decreasing
2306 // the HWM.
2307 
2308 // Calculate the amount to increase the high water mark (HWM).
2309 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2310 // another expansion is not requested too soon.  If that is not
2311 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2312 // If that is still not enough, expand by the size of the allocation
2313 // plus some.
2314 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2315   size_t min_delta = MinMetaspaceExpansion;
2316   size_t max_delta = MaxMetaspaceExpansion;
2317   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2318 
2319   if (delta <= min_delta) {
2320     delta = min_delta;
2321   } else if (delta <= max_delta) {
2322     // Don't want to hit the high water mark on the next
2323     // allocation so make the delta greater than just enough
2324     // for this allocation.
2325     delta = max_delta;
2326   } else {
2327     // This allocation is large but the next ones are probably not
2328     // so increase by the minimum.
2329     delta = delta + min_delta;
2330   }
2331 
2332   assert_is_aligned(delta, Metaspace::commit_alignment());
2333 
2334   return delta;
2335 }
2336 
2337 size_t MetaspaceGC::capacity_until_GC() {
2338   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2339   assert(value >= MetaspaceSize, "Not initialized properly?");
2340   return value;
2341 }
2342 
2343 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2344   assert_is_aligned(v, Metaspace::commit_alignment());
2345 
2346   intptr_t capacity_until_GC = _capacity_until_GC;
2347   intptr_t new_value = capacity_until_GC + v;
2348 
2349   if (new_value < capacity_until_GC) {
2350     // The addition wrapped around, set new_value to aligned max value.
2351     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2352   }
2353 
2354   intptr_t expected = _capacity_until_GC;
2355   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2356 
2357   if (expected != actual) {
2358     return false;
2359   }
2360 
2361   if (new_cap_until_GC != NULL) {
2362     *new_cap_until_GC = new_value;
2363   }
2364   if (old_cap_until_GC != NULL) {
2365     *old_cap_until_GC = capacity_until_GC;
2366   }
2367   return true;
2368 }
2369 
2370 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2371   assert_is_aligned(v, Metaspace::commit_alignment());
2372 
2373   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2374 }
2375 
2376 void MetaspaceGC::initialize() {
2377   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2378   // we can't do a GC during initialization.
2379   _capacity_until_GC = MaxMetaspaceSize;
2380 }
2381 
2382 void MetaspaceGC::post_initialize() {
2383   // Reset the high-water mark once the VM initialization is done.
2384   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
2385 }
2386 
2387 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2388   // Check if the compressed class space is full.
2389   if (is_class && Metaspace::using_class_space()) {
2390     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
2391     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2392       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2393                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2394       return false;
2395     }
2396   }
2397 
2398   // Check if the user has imposed a limit on the metaspace memory.
2399   size_t committed_bytes = MetaspaceAux::committed_bytes();
2400   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2401     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2402               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2403     return false;
2404   }
2405 
2406   return true;
2407 }
2408 
2409 size_t MetaspaceGC::allowed_expansion() {
2410   size_t committed_bytes = MetaspaceAux::committed_bytes();
2411   size_t capacity_until_gc = capacity_until_GC();
2412 
2413   assert(capacity_until_gc >= committed_bytes,
2414          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2415          capacity_until_gc, committed_bytes);
2416 
2417   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2418   size_t left_until_GC = capacity_until_gc - committed_bytes;
2419   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2420   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2421             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2422             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2423 
2424   return left_to_commit / BytesPerWord;
2425 }
2426 
2427 void MetaspaceGC::compute_new_size() {
2428   assert(_shrink_factor <= 100, "invalid shrink factor");
2429   uint current_shrink_factor = _shrink_factor;
2430   _shrink_factor = 0;
2431 
2432   // Using committed_bytes() for used_after_gc is an overestimation, since the
2433   // chunk free lists are included in committed_bytes() and the memory in an
2434   // un-fragmented chunk free list is available for future allocations.
2435   // However, if the chunk free lists becomes fragmented, then the memory may
2436   // not be available for future allocations and the memory is therefore "in use".
2437   // Including the chunk free lists in the definition of "in use" is therefore
2438   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2439   // shrink below committed_bytes() and this has caused serious bugs in the past.
2440   const size_t used_after_gc = MetaspaceAux::committed_bytes();
2441   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2442 
2443   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2444   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2445 
2446   const double min_tmp = used_after_gc / maximum_used_percentage;
2447   size_t minimum_desired_capacity =
2448     (size_t)MIN2(min_tmp, double(max_uintx));
2449   // Don't shrink less than the initial generation size
2450   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2451                                   MetaspaceSize);
2452 
2453   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2454   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2455                            minimum_free_percentage, maximum_used_percentage);
2456   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2457 
2458 
2459   size_t shrink_bytes = 0;
2460   if (capacity_until_GC < minimum_desired_capacity) {
2461     // If we have less capacity below the metaspace HWM, then
2462     // increment the HWM.
2463     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2464     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2465     // Don't expand unless it's significant
2466     if (expand_bytes >= MinMetaspaceExpansion) {
2467       size_t new_capacity_until_GC = 0;
2468       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2469       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2470 
2471       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2472                                                new_capacity_until_GC,
2473                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2474       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2475                                minimum_desired_capacity / (double) K,
2476                                expand_bytes / (double) K,
2477                                MinMetaspaceExpansion / (double) K,
2478                                new_capacity_until_GC / (double) K);
2479     }
2480     return;
2481   }
2482 
2483   // No expansion, now see if we want to shrink
2484   // We would never want to shrink more than this
2485   assert(capacity_until_GC >= minimum_desired_capacity,
2486          SIZE_FORMAT " >= " SIZE_FORMAT,
2487          capacity_until_GC, minimum_desired_capacity);
2488   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2489 
2490   // Should shrinking be considered?
2491   if (MaxMetaspaceFreeRatio < 100) {
2492     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2493     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2494     const double max_tmp = used_after_gc / minimum_used_percentage;
2495     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2496     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2497                                     MetaspaceSize);
2498     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2499                              maximum_free_percentage, minimum_used_percentage);
2500     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2501                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2502 
2503     assert(minimum_desired_capacity <= maximum_desired_capacity,
2504            "sanity check");
2505 
2506     if (capacity_until_GC > maximum_desired_capacity) {
2507       // Capacity too large, compute shrinking size
2508       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2509       // We don't want shrink all the way back to initSize if people call
2510       // System.gc(), because some programs do that between "phases" and then
2511       // we'd just have to grow the heap up again for the next phase.  So we
2512       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2513       // on the third call, and 100% by the fourth call.  But if we recompute
2514       // size without shrinking, it goes back to 0%.
2515       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2516 
2517       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2518 
2519       assert(shrink_bytes <= max_shrink_bytes,
2520              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2521              shrink_bytes, max_shrink_bytes);
2522       if (current_shrink_factor == 0) {
2523         _shrink_factor = 10;
2524       } else {
2525         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2526       }
2527       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2528                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2529       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2530                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2531     }
2532   }
2533 
2534   // Don't shrink unless it's significant
2535   if (shrink_bytes >= MinMetaspaceExpansion &&
2536       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2537     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2538     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2539                                              new_capacity_until_GC,
2540                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2541   }
2542 }
2543 
2544 // Metadebug methods
2545 
2546 void Metadebug::init_allocation_fail_alot_count() {
2547   if (MetadataAllocationFailALot) {
2548     _allocation_fail_alot_count =
2549       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2550   }
2551 }
2552 
2553 #ifdef ASSERT
2554 bool Metadebug::test_metadata_failure() {
2555   if (MetadataAllocationFailALot &&
2556       Threads::is_vm_complete()) {
2557     if (_allocation_fail_alot_count > 0) {
2558       _allocation_fail_alot_count--;
2559     } else {
2560       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2561       init_allocation_fail_alot_count();
2562       return true;
2563     }
2564   }
2565   return false;
2566 }
2567 #endif
2568 
2569 // ChunkManager methods
2570 size_t ChunkManager::free_chunks_total_words() {
2571   return _free_chunks_total;
2572 }
2573 
2574 size_t ChunkManager::free_chunks_total_bytes() {
2575   return free_chunks_total_words() * BytesPerWord;
2576 }
2577 
2578 // Update internal accounting after a chunk was added
2579 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2580   assert_lock_strong(SpaceManager::expand_lock());
2581   _free_chunks_count ++;
2582   _free_chunks_total += c->word_size();
2583 }
2584 
2585 // Update internal accounting after a chunk was removed
2586 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2587   assert_lock_strong(SpaceManager::expand_lock());
2588   assert(_free_chunks_count >= 1,
2589     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2590   assert(_free_chunks_total >= c->word_size(),
2591     "ChunkManager::_free_chunks_total: about to go negative"
2592      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2593   _free_chunks_count --;
2594   _free_chunks_total -= c->word_size();
2595 }
2596 
2597 size_t ChunkManager::free_chunks_count() {
2598 #ifdef ASSERT
2599   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2600     MutexLockerEx cl(SpaceManager::expand_lock(),
2601                      Mutex::_no_safepoint_check_flag);
2602     // This lock is only needed in debug because the verification
2603     // of the _free_chunks_totals walks the list of free chunks
2604     slow_locked_verify_free_chunks_count();
2605   }
2606 #endif
2607   return _free_chunks_count;
2608 }
2609 
2610 ChunkIndex ChunkManager::list_index(size_t size) {
2611   if (size_by_index(SpecializedIndex) == size) {
2612     return SpecializedIndex;
2613   }
2614   if (size_by_index(SmallIndex) == size) {
2615     return SmallIndex;
2616   }
2617   const size_t med_size = size_by_index(MediumIndex);
2618   if (med_size == size) {
2619     return MediumIndex;
2620   }
2621 
2622   assert(size > med_size, "Not a humongous chunk");
2623   return HumongousIndex;
2624 }
2625 
2626 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2627   index_bounds_check(index);
2628   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2629   return _free_chunks[index].size();
2630 }
2631 
2632 void ChunkManager::locked_verify_free_chunks_total() {
2633   assert_lock_strong(SpaceManager::expand_lock());
2634   assert(sum_free_chunks() == _free_chunks_total,
2635          "_free_chunks_total " SIZE_FORMAT " is not the"
2636          " same as sum " SIZE_FORMAT, _free_chunks_total,
2637          sum_free_chunks());
2638 }
2639 
2640 void ChunkManager::verify_free_chunks_total() {
2641   MutexLockerEx cl(SpaceManager::expand_lock(),
2642                      Mutex::_no_safepoint_check_flag);
2643   locked_verify_free_chunks_total();
2644 }
2645 
2646 void ChunkManager::locked_verify_free_chunks_count() {
2647   assert_lock_strong(SpaceManager::expand_lock());
2648   assert(sum_free_chunks_count() == _free_chunks_count,
2649          "_free_chunks_count " SIZE_FORMAT " is not the"
2650          " same as sum " SIZE_FORMAT, _free_chunks_count,
2651          sum_free_chunks_count());
2652 }
2653 
2654 void ChunkManager::verify_free_chunks_count() {
2655 #ifdef ASSERT
2656   MutexLockerEx cl(SpaceManager::expand_lock(),
2657                      Mutex::_no_safepoint_check_flag);
2658   locked_verify_free_chunks_count();
2659 #endif
2660 }
2661 
2662 void ChunkManager::verify() {
2663   MutexLockerEx cl(SpaceManager::expand_lock(),
2664                      Mutex::_no_safepoint_check_flag);
2665   locked_verify();
2666 }
2667 
2668 void ChunkManager::locked_verify() {
2669   locked_verify_free_chunks_count();
2670   locked_verify_free_chunks_total();
2671   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2672     ChunkList* list = free_chunks(i);
2673     if (list != NULL) {
2674       Metachunk* chunk = list->head();
2675       while (chunk) {
2676         DEBUG_ONLY(do_verify_chunk(chunk);)
2677         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2678         chunk = chunk->next();
2679       }
2680     }
2681   }
2682 }
2683 
2684 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2685   assert_lock_strong(SpaceManager::expand_lock());
2686   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2687                 _free_chunks_total, _free_chunks_count);
2688 }
2689 
2690 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2691   assert_lock_strong(SpaceManager::expand_lock());
2692   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2693                 sum_free_chunks(), sum_free_chunks_count());
2694 }
2695 
2696 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2697   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2698          "Bad index: %d", (int)index);
2699 
2700   return &_free_chunks[index];
2701 }
2702 
2703 // These methods that sum the free chunk lists are used in printing
2704 // methods that are used in product builds.
2705 size_t ChunkManager::sum_free_chunks() {
2706   assert_lock_strong(SpaceManager::expand_lock());
2707   size_t result = 0;
2708   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2709     ChunkList* list = free_chunks(i);
2710 
2711     if (list == NULL) {
2712       continue;
2713     }
2714 
2715     result = result + list->count() * list->size();
2716   }
2717   result = result + humongous_dictionary()->total_size();
2718   return result;
2719 }
2720 
2721 size_t ChunkManager::sum_free_chunks_count() {
2722   assert_lock_strong(SpaceManager::expand_lock());
2723   size_t count = 0;
2724   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2725     ChunkList* list = free_chunks(i);
2726     if (list == NULL) {
2727       continue;
2728     }
2729     count = count + list->count();
2730   }
2731   count = count + humongous_dictionary()->total_free_blocks();
2732   return count;
2733 }
2734 
2735 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2736   ChunkIndex index = list_index(word_size);
2737   assert(index < HumongousIndex, "No humongous list");
2738   return free_chunks(index);
2739 }
2740 
2741 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2742   assert_lock_strong(SpaceManager::expand_lock());
2743 
2744   slow_locked_verify();
2745 
2746   Metachunk* chunk = NULL;
2747   if (list_index(word_size) != HumongousIndex) {
2748     ChunkList* free_list = find_free_chunks_list(word_size);
2749     assert(free_list != NULL, "Sanity check");
2750 
2751     chunk = free_list->head();
2752 
2753     // Coalescation: Split large chunks into smaller chunks if there
2754     // are no smaller chunks, just large chunks. This is the
2755     // counterpart of the coalescing-upon-chunk-return.
2756     if (chunk == NULL) {
2757 
2758       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2759 
2760       // Is there a larger chunk we could split?
2761       Metachunk* larger_chunk = NULL;
2762       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2763       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2764         larger_chunk = free_chunks(larger_chunk_index)->head();
2765         if (larger_chunk == NULL) {
2766           larger_chunk_index = next_chunk_index(larger_chunk_index);
2767         }
2768       }
2769 
2770       if (larger_chunk != NULL) {
2771         assert(larger_chunk->word_size() > word_size, "Sanity");
2772         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2773 
2774         // We found a larger chunk. Lets split it up:
2775         // - remove old chunk
2776         // - in its place, create new smaller chunks, with at least one chunk
2777         //   being of target size, the others sized as large as possible. This
2778         //   is to make sure the resulting chunks are "as coalesced as possible"
2779         //   (similar to VirtualSpaceNode::retire()).
2780         // Note: during this operation both ChunkManager and VirtualSpaceNode
2781         //  are temporarily invalid, so be careful with asserts.
2782 
2783         log_trace(gc, metaspace, freelist)("Coalescation (%s): splitting chunk " PTR_FORMAT
2784            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2785           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2786           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2787 
2788         MetaWord* const region_start = (MetaWord*)larger_chunk;
2789         const size_t region_word_len = larger_chunk->word_size();
2790         MetaWord* const region_end = region_start + region_word_len;
2791         VirtualSpaceNode* const vsn = larger_chunk->container();
2792         OccupancyMap* const ocmap = vsn->occupancy_map();
2793 
2794         // Remove old chunk.
2795         free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2796         DEBUG_ONLY(larger_chunk->remove_sentinel();)
2797 
2798         DEBUG_ONLY(larger_chunk = NULL); // Prevent access from here on and wipe area.
2799         DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2800 
2801         // In its place create first the target chunk...
2802         MetaWord* p = region_start;
2803         chunk = ::new (p) Metachunk(target_chunk_index, is_class(), word_size, vsn);
2804         assert(chunk == (Metachunk*)p, "Sanity");
2805         DEBUG_ONLY(chunk->set_origin(origin_split);)
2806 
2807         // Note: we do not need to mark its start in the occupancy map
2808         // because it coincides with the old chunk start.
2809 
2810         // We are about to return it, so mark it in use and update vsn count.
2811         do_update_in_use_info_for_chunk(chunk, true);
2812         account_for_removed_chunk(chunk);
2813         vsn->inc_container_count();
2814 
2815         // This chunk should now be valid and can be verified.
2816         DEBUG_ONLY(do_verify_chunk(chunk));
2817 
2818         // In the remaining space create the remainder chunks.
2819         p += chunk->word_size();
2820         assert(p < region_end, "Sanity");
2821 
2822         while (p < region_end) {
2823 
2824           // Find the largest chunk size which fits the alignment requirements at address p.
2825           ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2826           size_t this_chunk_word_size = 0;
2827           for(;;) {
2828             this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2829             if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2830               break;
2831             } else {
2832               this_chunk_index = prev_chunk_index(this_chunk_index);
2833               assert(this_chunk_index >= target_chunk_index, "Sanity");
2834             }
2835           }
2836 
2837           assert(this_chunk_word_size >= word_size, "Sanity");
2838           assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2839           assert(p + this_chunk_word_size <= region_end, "Sanity");
2840 
2841           // Create splitting chunk.
2842           Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2843           assert(this_chunk == (Metachunk*)p, "Sanity");
2844           DEBUG_ONLY(this_chunk->set_origin(origin_split);)
2845           ocmap->set_chunk_starts_at_address(p, true);
2846           do_update_in_use_info_for_chunk(this_chunk, false);
2847 
2848           // This chunk should be valid and can be verified.
2849           DEBUG_ONLY(do_verify_chunk(this_chunk));
2850 
2851           // Return this chunk to freelist and correct counter.
2852           free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2853           _free_chunks_count ++;
2854 
2855           log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2856             SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2857             p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2858             p2i(region_start), p2i(region_end));
2859 
2860           p += this_chunk_word_size;
2861 
2862         }
2863 
2864         // ChunkManager and VirtualSpaceNode should be valid at this point.
2865         DEBUG_ONLY(this->locked_verify());
2866 
2867         // This will also walk the chunks in the address range and
2868         // verify that we left no "holes".
2869         DEBUG_ONLY(vsn->verify());
2870         DEBUG_ONLY(chunk->container()->verify_container_count());
2871 
2872         log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get (%s): Returning chunk "
2873           PTR_FORMAT ", word size (" SIZE_FORMAT_HEX " (%s)",
2874           (is_class() ? "class space" : "metaspace"), p2i(chunk), chunk->word_size(), chunk_size_name(chunk->get_chunk_type()));
2875 
2876         DEBUG_ONLY(chunk->inc_use_count();)
2877         return chunk;
2878       }
2879 
2880     }
2881 
2882     if (chunk == NULL) {
2883       return NULL;
2884     }
2885 
2886     // Remove the chunk as the head of the list.
2887     free_list->remove_chunk(chunk);
2888 
2889     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
2890                                        p2i(free_list), p2i(chunk), chunk->word_size());
2891   } else {
2892     chunk = humongous_dictionary()->get_chunk(word_size);
2893 
2894     if (chunk == NULL) {
2895       return NULL;
2896     }
2897 
2898     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2899                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2900   }
2901 
2902   // Chunk has been removed from the chunk manager; update counters.
2903   account_for_removed_chunk(chunk);
2904 
2905   // Remove it from the links to this freelist
2906   chunk->set_next(NULL);
2907   chunk->set_prev(NULL);
2908 
2909   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
2910   // work.
2911   do_update_in_use_info_for_chunk(chunk, true);
2912   chunk->container()->inc_container_count();
2913   DEBUG_ONLY(chunk->container()->verify_container_count());
2914 
2915   DEBUG_ONLY(slow_locked_verify());
2916   DEBUG_ONLY(chunk->inc_use_count();)
2917   return chunk;
2918 }
2919 
2920 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2921   assert_lock_strong(SpaceManager::expand_lock());
2922   slow_locked_verify();
2923 
2924   // Take from the beginning of the list
2925   Metachunk* chunk = free_chunks_get(word_size);
2926   if (chunk == NULL) {
2927     return NULL;
2928   }
2929 
2930   assert((word_size <= chunk->word_size()) ||
2931          (list_index(chunk->word_size()) == HumongousIndex),
2932          "Non-humongous variable sized chunk");
2933   LogTarget(Debug, gc, metaspace, freelist) lt;
2934   if (lt.is_enabled()) {
2935     size_t list_count;
2936     if (list_index(word_size) < HumongousIndex) {
2937       ChunkList* list = find_free_chunks_list(word_size);
2938       list_count = list->count();
2939     } else {
2940       list_count = humongous_dictionary()->total_count();
2941     }
2942     LogStream ls(lt);
2943     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2944              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2945     ResourceMark rm;
2946     locked_print_free_chunks(&ls);
2947   }
2948 
2949   return chunk;
2950 }
2951 
2952 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2953   assert_lock_strong(SpaceManager::expand_lock());
2954   DEBUG_ONLY(do_verify_chunk(chunk);)
2955   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
2956   assert(chunk != NULL, "Expected chunk.");
2957   assert(chunk->container() != NULL, "Container should have been set.");
2958   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2959   index_bounds_check(index);
2960 
2961   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2962   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2963   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2964   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
2965 
2966   if (index != HumongousIndex) {
2967     // Return non-humongous chunk to freelist.
2968     ChunkList* list = free_chunks(index);
2969     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
2970     list->return_chunk_at_head(chunk);
2971     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
2972         chunk_size_name(index), p2i(chunk));
2973   } else {
2974     // Return humongous chunk to dictionary.
2975     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2976     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2977            "Humongous chunk has wrong alignment.");
2978     _humongous_dictionary.return_chunk(chunk);
2979     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2980         chunk_size_name(index), p2i(chunk), chunk->word_size());
2981   }
2982   chunk->container()->dec_container_count();
2983   do_update_in_use_info_for_chunk(chunk, false);
2984 
2985   // Chunk has been added; update counters.
2986   account_for_added_chunk(chunk);
2987 
2988   // Attempt coalesce returned chunks with its neighboring chunks:
2989   // if this chunk is small or special, attempt to coalesce to a medium chunk.
2990   if (index == SmallIndex || index == SpecializedIndex) {
2991     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
2992       // This did not work. But if this chunk is special, we still may form a small chunk?
2993       if (index == SpecializedIndex) {
2994         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
2995           // give up.
2996         }
2997       }
2998     }
2999   }
3000 
3001 }
3002 
3003 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3004   index_bounds_check(index);
3005   if (chunks == NULL) {
3006     return;
3007   }
3008   LogTarget(Trace, gc, metaspace, freelist) log;
3009   if (log.is_enabled()) { // tracing
3010     log.print("returning list of %s chunks...", chunk_size_name(index));
3011   }
3012   unsigned num_chunks_returned = 0;
3013   size_t size_chunks_returned = 0;
3014   Metachunk* cur = chunks;
3015   while (cur != NULL) {
3016     // Capture the next link before it is changed
3017     // by the call to return_chunk_at_head();
3018     Metachunk* next = cur->next();
3019     if (log.is_enabled()) { // tracing
3020       num_chunks_returned ++;
3021       size_chunks_returned += cur->word_size();
3022     }
3023     return_single_chunk(index, cur);
3024     cur = next;
3025   }
3026   if (log.is_enabled()) { // tracing
3027     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3028         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3029     if (index != HumongousIndex) {
3030       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3031     } else {
3032       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3033     }
3034   }
3035 }
3036 
3037 void ChunkManager::print_on(outputStream* out) const {
3038   _humongous_dictionary.report_statistics(out);
3039 }
3040 
3041 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3042   assert_lock_strong(SpaceManager::expand_lock());
3043   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3044     stat->num_by_type[i] = num_free_chunks(i);
3045     stat->single_size_by_type[i] = size_by_index(i);
3046     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3047   }
3048   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3049   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3050 }
3051 
3052 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3053   MutexLockerEx cl(SpaceManager::expand_lock(),
3054                    Mutex::_no_safepoint_check_flag);
3055   locked_get_statistics(stat);
3056 }
3057 
3058 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3059   size_t total = 0;
3060   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3061 
3062   const char* unit = scale_unit(scale);
3063   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3064     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3065                    stat->num_by_type[i], chunk_size_name(i),
3066                    stat->single_size_by_type[i]);
3067     if (scale == 1) {
3068       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3069     } else {
3070       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3071     }
3072 
3073     total += stat->total_size_by_type[i];
3074   }
3075 
3076 
3077   total += stat->total_size_humongous_chunks;
3078 
3079   if (scale == 1) {
3080     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3081     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3082 
3083     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3084   } else {
3085     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3086     stat->num_humongous_chunks,
3087     (float)stat->total_size_humongous_chunks / scale, unit);
3088 
3089     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3090   }
3091 
3092 }
3093 
3094 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3095   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3096 
3097   // Note: keep lock protection only to retrieving statistics; keep printing
3098   // out of lock protection
3099   ChunkManagerStatistics stat;
3100   out->print_cr("Chunkmanager (non-class):");
3101   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3102   if (non_class_cm != NULL) {
3103     non_class_cm->get_statistics(&stat);
3104     ChunkManager::print_statistics(&stat, out, scale);
3105   } else {
3106     out->print_cr("unavailable.");
3107   }
3108   out->print_cr("Chunkmanager (class):");
3109   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3110   if (class_cm != NULL) {
3111     class_cm->get_statistics(&stat);
3112     ChunkManager::print_statistics(&stat, out, scale);
3113   } else {
3114     out->print_cr("unavailable.");
3115   }
3116 }
3117 
3118 // SpaceManager methods
3119 
3120 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3121   size_t chunk_sizes[] = {
3122       specialized_chunk_size(is_class_space),
3123       small_chunk_size(is_class_space),
3124       medium_chunk_size(is_class_space)
3125   };
3126 
3127   // Adjust up to one of the fixed chunk sizes ...
3128   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3129     if (requested <= chunk_sizes[i]) {
3130       return chunk_sizes[i];
3131     }
3132   }
3133 
3134   // ... or return the size as a humongous chunk.
3135   return requested;
3136 }
3137 
3138 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3139   return adjust_initial_chunk_size(requested, is_class());
3140 }
3141 
3142 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3143   size_t requested;
3144 
3145   if (is_class()) {
3146     switch (type) {
3147     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3148     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3149     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3150     default:                                 requested = ClassSmallChunk; break;
3151     }
3152   } else {
3153     switch (type) {
3154     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3155     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3156     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3157     default:                                 requested = SmallChunk; break;
3158     }
3159   }
3160 
3161   // Adjust to one of the fixed chunk sizes (unless humongous)
3162   const size_t adjusted = adjust_initial_chunk_size(requested);
3163 
3164   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3165          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3166 
3167   return adjusted;
3168 }
3169 
3170 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3171   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3172   size_t free = 0;
3173   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3174     Metachunk* chunk = chunks_in_use(i);
3175     while (chunk != NULL) {
3176       free += chunk->free_word_size();
3177       chunk = chunk->next();
3178     }
3179   }
3180   return free;
3181 }
3182 
3183 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3184   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3185   size_t result = 0;
3186   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3187    result += sum_waste_in_chunks_in_use(i);
3188   }
3189 
3190   return result;
3191 }
3192 
3193 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3194   size_t result = 0;
3195   Metachunk* chunk = chunks_in_use(index);
3196   // Count the free space in all the chunk but not the
3197   // current chunk from which allocations are still being done.
3198   while (chunk != NULL) {
3199     if (chunk != current_chunk()) {
3200       result += chunk->free_word_size();
3201     }
3202     chunk = chunk->next();
3203   }
3204   return result;
3205 }
3206 
3207 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3208   // For CMS use "allocated_chunks_words()" which does not need the
3209   // Metaspace lock.  For the other collectors sum over the
3210   // lists.  Use both methods as a check that "allocated_chunks_words()"
3211   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3212   // to use in the product and allocated_chunks_words() should be used
3213   // but allow for  checking that allocated_chunks_words() returns the same
3214   // value as sum_capacity_in_chunks_in_use() which is the definitive
3215   // answer.
3216   if (UseConcMarkSweepGC) {
3217     return allocated_chunks_words();
3218   } else {
3219     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3220     size_t sum = 0;
3221     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3222       Metachunk* chunk = chunks_in_use(i);
3223       while (chunk != NULL) {
3224         sum += chunk->word_size();
3225         chunk = chunk->next();
3226       }
3227     }
3228   return sum;
3229   }
3230 }
3231 
3232 size_t SpaceManager::sum_count_in_chunks_in_use() {
3233   size_t count = 0;
3234   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3235     count = count + sum_count_in_chunks_in_use(i);
3236   }
3237 
3238   return count;
3239 }
3240 
3241 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3242   size_t count = 0;
3243   Metachunk* chunk = chunks_in_use(i);
3244   while (chunk != NULL) {
3245     count++;
3246     chunk = chunk->next();
3247   }
3248   return count;
3249 }
3250 
3251 
3252 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3253   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3254   size_t used = 0;
3255   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3256     Metachunk* chunk = chunks_in_use(i);
3257     while (chunk != NULL) {
3258       used += chunk->used_word_size();
3259       chunk = chunk->next();
3260     }
3261   }
3262   return used;
3263 }
3264 
3265 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3266 
3267   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3268     Metachunk* chunk = chunks_in_use(i);
3269     st->print("SpaceManager: %s " PTR_FORMAT,
3270                  chunk_size_name(i), p2i(chunk));
3271     if (chunk != NULL) {
3272       st->print_cr(" free " SIZE_FORMAT,
3273                    chunk->free_word_size());
3274     } else {
3275       st->cr();
3276     }
3277   }
3278 
3279   chunk_manager()->locked_print_free_chunks(st);
3280   chunk_manager()->locked_print_sum_free_chunks(st);
3281 }
3282 
3283 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3284 
3285   // Decide between a small chunk and a medium chunk.  Up to
3286   // _small_chunk_limit small chunks can be allocated.
3287   // After that a medium chunk is preferred.
3288   size_t chunk_word_size;
3289 
3290   // Special case for anonymous metadata space.
3291   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3292   // rarely about 4K (64-bits JVM).
3293   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3294   // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
3295   // from 60+% to around 30%.
3296   if (_space_type == Metaspace::AnonymousMetaspaceType &&
3297       _mdtype == Metaspace::NonClassType &&
3298       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
3299       word_size + Metachunk::overhead() <= SpecializedChunk) {
3300     return SpecializedChunk;
3301   }
3302 
3303   if (chunks_in_use(MediumIndex) == NULL &&
3304       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3305     chunk_word_size = (size_t) small_chunk_size();
3306     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3307       chunk_word_size = medium_chunk_size();
3308     }
3309   } else {
3310     chunk_word_size = medium_chunk_size();
3311   }
3312 
3313   // Might still need a humongous chunk.  Enforce
3314   // humongous allocations sizes to be aligned up to
3315   // the smallest chunk size.
3316   size_t if_humongous_sized_chunk =
3317     align_up(word_size + Metachunk::overhead(),
3318                   smallest_chunk_size());
3319   chunk_word_size =
3320     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3321 
3322   assert(!SpaceManager::is_humongous(word_size) ||
3323          chunk_word_size == if_humongous_sized_chunk,
3324          "Size calculation is wrong, word_size " SIZE_FORMAT
3325          " chunk_word_size " SIZE_FORMAT,
3326          word_size, chunk_word_size);
3327   Log(gc, metaspace, alloc) log;
3328   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3329     log.debug("Metadata humongous allocation:");
3330     log.debug("  word_size " PTR_FORMAT, word_size);
3331     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3332     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3333   }
3334   return chunk_word_size;
3335 }
3336 
3337 void SpaceManager::track_metaspace_memory_usage() {
3338   if (is_init_completed()) {
3339     if (is_class()) {
3340       MemoryService::track_compressed_class_memory_usage();
3341     }
3342     MemoryService::track_metaspace_memory_usage();
3343   }
3344 }
3345 
3346 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3347   assert(vs_list()->current_virtual_space() != NULL,
3348          "Should have been set");
3349   assert(current_chunk() == NULL ||
3350          current_chunk()->allocate(word_size) == NULL,
3351          "Don't need to expand");
3352   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3353 
3354   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3355     size_t words_left = 0;
3356     size_t words_used = 0;
3357     if (current_chunk() != NULL) {
3358       words_left = current_chunk()->free_word_size();
3359       words_used = current_chunk()->used_word_size();
3360     }
3361     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3362                                        word_size, words_used, words_left);
3363   }
3364 
3365   // Get another chunk
3366   size_t chunk_word_size = calc_chunk_size(word_size);
3367   Metachunk* next = get_new_chunk(chunk_word_size);
3368 
3369   MetaWord* mem = NULL;
3370 
3371   // If a chunk was available, add it to the in-use chunk list
3372   // and do an allocation from it.
3373   if (next != NULL) {
3374     // Add to this manager's list of chunks in use.
3375     add_chunk(next, false);
3376     mem = next->allocate(word_size);
3377   }
3378 
3379   // Track metaspace memory usage statistic.
3380   track_metaspace_memory_usage();
3381 
3382   return mem;
3383 }
3384 
3385 void SpaceManager::print_on(outputStream* st) const {
3386 
3387   for (ChunkIndex i = ZeroIndex;
3388        i < NumberOfInUseLists ;
3389        i = next_chunk_index(i) ) {
3390     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3391                  p2i(chunks_in_use(i)),
3392                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3393   }
3394   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3395                " Humongous " SIZE_FORMAT,
3396                sum_waste_in_chunks_in_use(SmallIndex),
3397                sum_waste_in_chunks_in_use(MediumIndex),
3398                sum_waste_in_chunks_in_use(HumongousIndex));
3399   // block free lists
3400   if (block_freelists() != NULL) {
3401     st->print_cr("total in block free lists " SIZE_FORMAT,
3402       block_freelists()->total_size());
3403   }
3404 }
3405 
3406 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3407                            Metaspace::MetaspaceType space_type,
3408                            Mutex* lock) :
3409   _mdtype(mdtype),
3410   _space_type(space_type),
3411   _allocated_blocks_words(0),
3412   _allocated_chunks_words(0),
3413   _allocated_chunks_count(0),
3414   _block_freelists(NULL),
3415   _lock(lock)
3416 {
3417   initialize();
3418 }
3419 
3420 void SpaceManager::inc_size_metrics(size_t words) {
3421   assert_lock_strong(SpaceManager::expand_lock());
3422   // Total of allocated Metachunks and allocated Metachunks count
3423   // for each SpaceManager
3424   _allocated_chunks_words = _allocated_chunks_words + words;
3425   _allocated_chunks_count++;
3426   // Global total of capacity in allocated Metachunks
3427   MetaspaceAux::inc_capacity(mdtype(), words);
3428   // Global total of allocated Metablocks.
3429   // used_words_slow() includes the overhead in each
3430   // Metachunk so include it in the used when the
3431   // Metachunk is first added (so only added once per
3432   // Metachunk).
3433   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
3434 }
3435 
3436 void SpaceManager::inc_used_metrics(size_t words) {
3437   // Add to the per SpaceManager total
3438   Atomic::add(words, &_allocated_blocks_words);
3439   // Add to the global total
3440   MetaspaceAux::inc_used(mdtype(), words);
3441 }
3442 
3443 void SpaceManager::dec_total_from_size_metrics() {
3444   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
3445   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
3446   // Also deduct the overhead per Metachunk
3447   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3448 }
3449 
3450 void SpaceManager::initialize() {
3451   Metadebug::init_allocation_fail_alot_count();
3452   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3453     _chunks_in_use[i] = NULL;
3454   }
3455   _current_chunk = NULL;
3456   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3457 }
3458 
3459 SpaceManager::~SpaceManager() {
3460   // This call this->_lock which can't be done while holding expand_lock()
3461   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3462          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3463          " allocated_chunks_words() " SIZE_FORMAT,
3464          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3465 
3466   MutexLockerEx fcl(SpaceManager::expand_lock(),
3467                     Mutex::_no_safepoint_check_flag);
3468 
3469   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3470          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3471          " allocated_chunks_count() " SIZE_FORMAT,
3472          sum_count_in_chunks_in_use(), allocated_chunks_count());
3473 
3474   chunk_manager()->slow_locked_verify();
3475 
3476   dec_total_from_size_metrics();
3477 
3478   Log(gc, metaspace, freelist) log;
3479   if (log.is_trace()) {
3480     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3481     ResourceMark rm;
3482     LogStream ls(log.trace());
3483     locked_print_chunks_in_use_on(&ls);
3484     if (block_freelists() != NULL) {
3485       block_freelists()->print_on(&ls);
3486     }
3487   }
3488 
3489   // Add all the chunks in use by this space manager
3490   // to the global list of free chunks.
3491 
3492   // Follow each list of chunks-in-use and add them to the
3493   // free lists.  Each list is NULL terminated.
3494 
3495   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3496     Metachunk* chunks = chunks_in_use(i);
3497     chunk_manager()->return_chunk_list(i, chunks);
3498     set_chunks_in_use(i, NULL);
3499   }
3500 
3501   chunk_manager()->slow_locked_verify();
3502 
3503   if (_block_freelists != NULL) {
3504     delete _block_freelists;
3505   }
3506 }
3507 
3508 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3509   assert_lock_strong(_lock);
3510   // Allocations and deallocations are in raw_word_size
3511   size_t raw_word_size = get_allocation_word_size(word_size);
3512   // Lazily create a block_freelist
3513   if (block_freelists() == NULL) {
3514     _block_freelists = new BlockFreelist();
3515   }
3516   block_freelists()->return_block(p, raw_word_size);
3517 }
3518 
3519 // Adds a chunk to the list of chunks in use.
3520 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3521 
3522   assert(new_chunk != NULL, "Should not be NULL");
3523   assert(new_chunk->next() == NULL, "Should not be on a list");
3524 
3525   new_chunk->reset_empty();
3526 
3527   // Find the correct list and and set the current
3528   // chunk for that list.
3529   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3530 
3531   if (index != HumongousIndex) {
3532     retire_current_chunk();
3533     set_current_chunk(new_chunk);
3534     new_chunk->set_next(chunks_in_use(index));
3535     set_chunks_in_use(index, new_chunk);
3536   } else {
3537     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3538     // small, so small will be null.  Link this first chunk as the current
3539     // chunk.
3540     if (make_current) {
3541       // Set as the current chunk but otherwise treat as a humongous chunk.
3542       set_current_chunk(new_chunk);
3543     }
3544     // Link at head.  The _current_chunk only points to a humongous chunk for
3545     // the null class loader metaspace (class and data virtual space managers)
3546     // any humongous chunks so will not point to the tail
3547     // of the humongous chunks list.
3548     new_chunk->set_next(chunks_in_use(HumongousIndex));
3549     set_chunks_in_use(HumongousIndex, new_chunk);
3550 
3551     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3552   }
3553 
3554   // Add to the running sum of capacity
3555   inc_size_metrics(new_chunk->word_size());
3556 
3557   assert(new_chunk->is_empty(), "Not ready for reuse");
3558   Log(gc, metaspace, freelist) log;
3559   if (log.is_trace()) {
3560     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3561     ResourceMark rm;
3562     LogStream ls(log.trace());
3563     new_chunk->print_on(&ls);
3564     chunk_manager()->locked_print_free_chunks(&ls);
3565   }
3566 }
3567 
3568 void SpaceManager::retire_current_chunk() {
3569   if (current_chunk() != NULL) {
3570     size_t remaining_words = current_chunk()->free_word_size();
3571     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3572       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3573       deallocate(ptr, remaining_words);
3574       inc_used_metrics(remaining_words);
3575     }
3576   }
3577 }
3578 
3579 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3580   // Get a chunk from the chunk freelist
3581   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3582 
3583   if (next == NULL) {
3584     next = vs_list()->get_new_chunk(chunk_word_size,
3585                                     medium_chunk_bunch());
3586   }
3587 
3588   Log(gc, metaspace, alloc) log;
3589   if (log.is_debug() && next != NULL &&
3590       SpaceManager::is_humongous(next->word_size())) {
3591     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3592   }
3593 
3594   return next;
3595 }
3596 
3597 MetaWord* SpaceManager::allocate(size_t word_size) {
3598   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3599   size_t raw_word_size = get_allocation_word_size(word_size);
3600   BlockFreelist* fl =  block_freelists();
3601   MetaWord* p = NULL;
3602   // Allocation from the dictionary is expensive in the sense that
3603   // the dictionary has to be searched for a size.  Don't allocate
3604   // from the dictionary until it starts to get fat.  Is this
3605   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3606   // for allocations.  Do some profiling.  JJJ
3607   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3608     p = fl->get_block(raw_word_size);
3609   }
3610   if (p == NULL) {
3611     p = allocate_work(raw_word_size);
3612   }
3613 
3614   return p;
3615 }
3616 
3617 // Returns the address of spaced allocated for "word_size".
3618 // This methods does not know about blocks (Metablocks)
3619 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3620   assert_lock_strong(_lock);
3621 #ifdef ASSERT
3622   if (Metadebug::test_metadata_failure()) {
3623     return NULL;
3624   }
3625 #endif
3626   // Is there space in the current chunk?
3627   MetaWord* result = NULL;
3628 
3629   if (current_chunk() != NULL) {
3630     result = current_chunk()->allocate(word_size);
3631   }
3632 
3633   if (result == NULL) {
3634     result = grow_and_allocate(word_size);
3635   }
3636 
3637   if (result != NULL) {
3638     inc_used_metrics(word_size);
3639     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3640            "Head of the list is being allocated");
3641   }
3642 
3643   return result;
3644 }
3645 
3646 void SpaceManager::verify() {
3647   // If there are blocks in the dictionary, then
3648   // verification of chunks does not work since
3649   // being in the dictionary alters a chunk.
3650   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
3651     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3652       Metachunk* curr = chunks_in_use(i);
3653       while (curr != NULL) {
3654         DEBUG_ONLY(do_verify_chunk(curr);)
3655         assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3656         curr = curr->next();
3657       }
3658     }
3659   }
3660 }
3661 
3662 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3663   assert(is_humongous(chunk->word_size()) ||
3664          chunk->word_size() == medium_chunk_size() ||
3665          chunk->word_size() == small_chunk_size() ||
3666          chunk->word_size() == specialized_chunk_size(),
3667          "Chunk size is wrong");
3668   return;
3669 }
3670 
3671 #ifdef ASSERT
3672 void SpaceManager::verify_allocated_blocks_words() {
3673   // Verification is only guaranteed at a safepoint.
3674   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3675     "Verification can fail if the applications is running");
3676   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3677          "allocation total is not consistent " SIZE_FORMAT
3678          " vs " SIZE_FORMAT,
3679          allocated_blocks_words(), sum_used_in_chunks_in_use());
3680 }
3681 
3682 #endif
3683 
3684 void SpaceManager::dump(outputStream* const out) const {
3685   size_t curr_total = 0;
3686   size_t waste = 0;
3687   uint i = 0;
3688   size_t used = 0;
3689   size_t capacity = 0;
3690 
3691   // Add up statistics for all chunks in this SpaceManager.
3692   for (ChunkIndex index = ZeroIndex;
3693        index < NumberOfInUseLists;
3694        index = next_chunk_index(index)) {
3695     for (Metachunk* curr = chunks_in_use(index);
3696          curr != NULL;
3697          curr = curr->next()) {
3698       out->print("%d) ", i++);
3699       curr->print_on(out);
3700       curr_total += curr->word_size();
3701       used += curr->used_word_size();
3702       capacity += curr->word_size();
3703       waste += curr->free_word_size() + curr->overhead();;
3704     }
3705   }
3706 
3707   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3708     if (block_freelists() != NULL) block_freelists()->print_on(out);
3709   }
3710 
3711   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3712   // Free space isn't wasted.
3713   waste -= free;
3714 
3715   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3716                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3717                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3718 }
3719 
3720 // MetaspaceAux
3721 
3722 
3723 size_t MetaspaceAux::_capacity_words[] = {0, 0};
3724 volatile size_t MetaspaceAux::_used_words[] = {0, 0};
3725 
3726 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
3727   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3728   return list == NULL ? 0 : list->free_bytes();
3729 }
3730 
3731 size_t MetaspaceAux::free_bytes() {
3732   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3733 }
3734 
3735 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3736   assert_lock_strong(SpaceManager::expand_lock());
3737   assert(words <= capacity_words(mdtype),
3738          "About to decrement below 0: words " SIZE_FORMAT
3739          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3740          words, mdtype, capacity_words(mdtype));
3741   _capacity_words[mdtype] -= words;
3742 }
3743 
3744 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3745   assert_lock_strong(SpaceManager::expand_lock());
3746   // Needs to be atomic
3747   _capacity_words[mdtype] += words;
3748 }
3749 
3750 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3751   assert(words <= used_words(mdtype),
3752          "About to decrement below 0: words " SIZE_FORMAT
3753          " is greater than _used_words[%u] " SIZE_FORMAT,
3754          words, mdtype, used_words(mdtype));
3755   // For CMS deallocation of the Metaspaces occurs during the
3756   // sweep which is a concurrent phase.  Protection by the expand_lock()
3757   // is not enough since allocation is on a per Metaspace basis
3758   // and protected by the Metaspace lock.
3759   Atomic::sub(words, &_used_words[mdtype]);
3760 }
3761 
3762 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3763   // _used_words tracks allocations for
3764   // each piece of metadata.  Those allocations are
3765   // generally done concurrently by different application
3766   // threads so must be done atomically.
3767   Atomic::add(words, &_used_words[mdtype]);
3768 }
3769 
3770 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
3771   size_t used = 0;
3772   ClassLoaderDataGraphMetaspaceIterator iter;
3773   while (iter.repeat()) {
3774     Metaspace* msp = iter.get_next();
3775     // Sum allocated_blocks_words for each metaspace
3776     if (msp != NULL) {
3777       used += msp->used_words_slow(mdtype);
3778     }
3779   }
3780   return used * BytesPerWord;
3781 }
3782 
3783 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
3784   size_t free = 0;
3785   ClassLoaderDataGraphMetaspaceIterator iter;
3786   while (iter.repeat()) {
3787     Metaspace* msp = iter.get_next();
3788     if (msp != NULL) {
3789       free += msp->free_words_slow(mdtype);
3790     }
3791   }
3792   return free * BytesPerWord;
3793 }
3794 
3795 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3796   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3797     return 0;
3798   }
3799   // Don't count the space in the freelists.  That space will be
3800   // added to the capacity calculation as needed.
3801   size_t capacity = 0;
3802   ClassLoaderDataGraphMetaspaceIterator iter;
3803   while (iter.repeat()) {
3804     Metaspace* msp = iter.get_next();
3805     if (msp != NULL) {
3806       capacity += msp->capacity_words_slow(mdtype);
3807     }
3808   }
3809   return capacity * BytesPerWord;
3810 }
3811 
3812 size_t MetaspaceAux::capacity_bytes_slow() {
3813 #ifdef PRODUCT
3814   // Use capacity_bytes() in PRODUCT instead of this function.
3815   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3816 #endif
3817   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3818   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3819   assert(capacity_bytes() == class_capacity + non_class_capacity,
3820          "bad accounting: capacity_bytes() " SIZE_FORMAT
3821          " class_capacity + non_class_capacity " SIZE_FORMAT
3822          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3823          capacity_bytes(), class_capacity + non_class_capacity,
3824          class_capacity, non_class_capacity);
3825 
3826   return class_capacity + non_class_capacity;
3827 }
3828 
3829 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
3830   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3831   return list == NULL ? 0 : list->reserved_bytes();
3832 }
3833 
3834 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
3835   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3836   return list == NULL ? 0 : list->committed_bytes();
3837 }
3838 
3839 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3840 
3841 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3842   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3843   if (chunk_manager == NULL) {
3844     return 0;
3845   }
3846   chunk_manager->slow_verify();
3847   return chunk_manager->free_chunks_total_words();
3848 }
3849 
3850 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3851   return free_chunks_total_words(mdtype) * BytesPerWord;
3852 }
3853 
3854 size_t MetaspaceAux::free_chunks_total_words() {
3855   return free_chunks_total_words(Metaspace::ClassType) +
3856          free_chunks_total_words(Metaspace::NonClassType);
3857 }
3858 
3859 size_t MetaspaceAux::free_chunks_total_bytes() {
3860   return free_chunks_total_words() * BytesPerWord;
3861 }
3862 
3863 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3864   return Metaspace::get_chunk_manager(mdtype) != NULL;
3865 }
3866 
3867 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3868   if (!has_chunk_free_list(mdtype)) {
3869     return MetaspaceChunkFreeListSummary();
3870   }
3871 
3872   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3873   return cm->chunk_free_list_summary();
3874 }
3875 
3876 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
3877   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3878                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3879 }
3880 
3881 void MetaspaceAux::print_on(outputStream* out) {
3882   Metaspace::MetadataType nct = Metaspace::NonClassType;
3883 
3884   out->print_cr(" Metaspace       "
3885                 "used "      SIZE_FORMAT "K, "
3886                 "capacity "  SIZE_FORMAT "K, "
3887                 "committed " SIZE_FORMAT "K, "
3888                 "reserved "  SIZE_FORMAT "K",
3889                 used_bytes()/K,
3890                 capacity_bytes()/K,
3891                 committed_bytes()/K,
3892                 reserved_bytes()/K);
3893 
3894   if (Metaspace::using_class_space()) {
3895     Metaspace::MetadataType ct = Metaspace::ClassType;
3896     out->print_cr("  class space    "
3897                   "used "      SIZE_FORMAT "K, "
3898                   "capacity "  SIZE_FORMAT "K, "
3899                   "committed " SIZE_FORMAT "K, "
3900                   "reserved "  SIZE_FORMAT "K",
3901                   used_bytes(ct)/K,
3902                   capacity_bytes(ct)/K,
3903                   committed_bytes(ct)/K,
3904                   reserved_bytes(ct)/K);
3905   }
3906 }
3907 
3908 // Print information for class space and data space separately.
3909 // This is almost the same as above.
3910 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3911   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3912   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3913   size_t used_bytes = used_bytes_slow(mdtype);
3914   size_t free_bytes = free_bytes_slow(mdtype);
3915   size_t used_and_free = used_bytes + free_bytes +
3916                            free_chunks_capacity_bytes;
3917   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3918              "K + unused in chunks " SIZE_FORMAT "K  + "
3919              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3920              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3921              used_bytes / K,
3922              free_bytes / K,
3923              free_chunks_capacity_bytes / K,
3924              used_and_free / K,
3925              capacity_bytes / K);
3926   // Accounting can only be correct if we got the values during a safepoint
3927   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3928 }
3929 
3930 // Print total fragmentation for class metaspaces
3931 void MetaspaceAux::print_class_waste(outputStream* out) {
3932   assert(Metaspace::using_class_space(), "class metaspace not used");
3933   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3934   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3935   ClassLoaderDataGraphMetaspaceIterator iter;
3936   while (iter.repeat()) {
3937     Metaspace* msp = iter.get_next();
3938     if (msp != NULL) {
3939       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3940       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3941       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3942       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3943       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3944       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3945       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3946     }
3947   }
3948   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3949                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3950                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3951                 "large count " SIZE_FORMAT,
3952                 cls_specialized_count, cls_specialized_waste,
3953                 cls_small_count, cls_small_waste,
3954                 cls_medium_count, cls_medium_waste, cls_humongous_count);
3955 }
3956 
3957 // Print total fragmentation for data and class metaspaces separately
3958 void MetaspaceAux::print_waste(outputStream* out) {
3959   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3960   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3961 
3962   ClassLoaderDataGraphMetaspaceIterator iter;
3963   while (iter.repeat()) {
3964     Metaspace* msp = iter.get_next();
3965     if (msp != NULL) {
3966       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3967       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3968       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3969       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3970       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3971       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
3972       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3973     }
3974   }
3975   out->print_cr("Total fragmentation waste (words) doesn't count free space");
3976   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3977                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3978                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3979                         "large count " SIZE_FORMAT,
3980              specialized_count, specialized_waste, small_count,
3981              small_waste, medium_count, medium_waste, humongous_count);
3982   if (Metaspace::using_class_space()) {
3983     print_class_waste(out);
3984   }
3985 }
3986 
3987 class MetadataStats VALUE_OBJ_CLASS_SPEC {
3988 private:
3989   size_t _capacity;
3990   size_t _used;
3991   size_t _free;
3992   size_t _waste;
3993 
3994 public:
3995   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
3996   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
3997   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
3998 
3999   void add(const MetadataStats& stats) {
4000     _capacity += stats.capacity();
4001     _used += stats.used();
4002     _free += stats.free();
4003     _waste += stats.waste();
4004   }
4005 
4006   size_t capacity() const { return _capacity; }
4007   size_t used() const     { return _used; }
4008   size_t free() const     { return _free; }
4009   size_t waste() const    { return _waste; }
4010 
4011   void print_on(outputStream* out, size_t scale) const;
4012 };
4013 
4014 
4015 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4016   const char* unit = scale_unit(scale);
4017   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4018     (float)capacity() / scale, unit,
4019     (float)used() / scale, unit,
4020     (float)free() / scale, unit,
4021     (float)waste() / scale, unit);
4022 }
4023 
4024 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4025 private:
4026   outputStream*  _out;
4027   size_t         _scale;
4028 
4029   size_t         _total_count;
4030   MetadataStats  _total_metadata;
4031   MetadataStats  _total_class;
4032 
4033   size_t         _total_anon_count;
4034   MetadataStats  _total_anon_metadata;
4035   MetadataStats  _total_anon_class;
4036 
4037 public:
4038   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4039   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4040 
4041   ~PrintCLDMetaspaceInfoClosure() {
4042     print_summary();
4043   }
4044 
4045   void do_cld(ClassLoaderData* cld) {
4046     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4047 
4048     if (cld->is_unloading()) return;
4049     Metaspace* msp = cld->metaspace_or_null();
4050     if (msp == NULL) {
4051       return;
4052     }
4053 
4054     bool anonymous = false;
4055     if (cld->is_anonymous()) {
4056       _out->print_cr("ClassLoader: for anonymous class");
4057       anonymous = true;
4058     } else {
4059       ResourceMark rm;
4060       _out->print_cr("ClassLoader: %s", cld->loader_name());
4061     }
4062 
4063     print_metaspace(msp, anonymous);
4064     _out->cr();
4065   }
4066 
4067 private:
4068   void print_metaspace(Metaspace* msp, bool anonymous);
4069   void print_summary() const;
4070 };
4071 
4072 void PrintCLDMetaspaceInfoClosure::print_metaspace(Metaspace* msp, bool anonymous){
4073   assert(msp != NULL, "Sanity");
4074   SpaceManager* vsm = msp->vsm();
4075   const char* unit = scale_unit(_scale);
4076 
4077   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4078   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4079   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4080   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4081 
4082   _total_count ++;
4083   MetadataStats metadata_stats(capacity, used, free, waste);
4084   _total_metadata.add(metadata_stats);
4085 
4086   if (anonymous) {
4087     _total_anon_count ++;
4088     _total_anon_metadata.add(metadata_stats);
4089   }
4090 
4091   _out->print("  Metadata   ");
4092   metadata_stats.print_on(_out, _scale);
4093 
4094   if (Metaspace::using_class_space()) {
4095     vsm = msp->class_vsm();
4096 
4097     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4098     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4099     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4100     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4101 
4102     MetadataStats class_stats(capacity, used, free, waste);
4103     _total_class.add(class_stats);
4104 
4105     if (anonymous) {
4106       _total_anon_class.add(class_stats);
4107     }
4108 
4109     _out->print("  Class data ");
4110     class_stats.print_on(_out, _scale);
4111   }
4112 }
4113 
4114 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4115   const char* unit = scale_unit(_scale);
4116   _out->cr();
4117   _out->print_cr("Summary:");
4118 
4119   MetadataStats total;
4120   total.add(_total_metadata);
4121   total.add(_total_class);
4122 
4123   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4124   total.print_on(_out, _scale);
4125 
4126   _out->print("                    Metadata ");
4127   _total_metadata.print_on(_out, _scale);
4128 
4129   if (Metaspace::using_class_space()) {
4130     _out->print("                  Class data ");
4131     _total_class.print_on(_out, _scale);
4132   }
4133   _out->cr();
4134 
4135   MetadataStats total_anon;
4136   total_anon.add(_total_anon_metadata);
4137   total_anon.add(_total_anon_class);
4138 
4139   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4140   total_anon.print_on(_out, _scale);
4141 
4142   _out->print("                    Metadata ");
4143   _total_anon_metadata.print_on(_out, _scale);
4144 
4145   if (Metaspace::using_class_space()) {
4146     _out->print("                  Class data ");
4147     _total_anon_class.print_on(_out, _scale);
4148   }
4149 }
4150 
4151 void MetaspaceAux::print_metadata_for_nmt(outputStream* out, size_t scale) {
4152   const char* unit = scale_unit(scale);
4153   out->print_cr("Metaspaces:");
4154   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4155     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4156     committed_bytes(Metaspace::NonClassType) / scale, unit);
4157   if (Metaspace::using_class_space()) {
4158     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4159     reserved_bytes(Metaspace::ClassType) / scale, unit,
4160     committed_bytes(Metaspace::ClassType) / scale, unit);
4161   }
4162 
4163   out->cr();
4164   ChunkManager::print_all_chunkmanagers(out, scale);
4165 
4166   out->cr();
4167   out->print_cr("Per-classloader metadata:");
4168   out->cr();
4169 
4170   PrintCLDMetaspaceInfoClosure cl(out, scale);
4171   ClassLoaderDataGraph::cld_do(&cl);
4172 }
4173 
4174 
4175 // Dump global metaspace things from the end of ClassLoaderDataGraph
4176 void MetaspaceAux::dump(outputStream* out) {
4177   out->print_cr("All Metaspace:");
4178   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4179   out->print("class space: "); print_on(out, Metaspace::ClassType);
4180   print_waste(out);
4181 }
4182 
4183 // Prints an ASCII representation of the given space.
4184 void MetaspaceAux::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4185   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4186   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4187   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4188   if (vsl != NULL) {
4189     if (for_class) {
4190       if (!Metaspace::using_class_space()) {
4191         out->print_cr("No Class Space.");
4192         return;
4193       }
4194       out->print_raw("---- Metaspace Map (Class Space) ----");
4195     } else {
4196       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4197     }
4198     // Print legend:
4199     out->cr();
4200     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4201     out->cr();
4202     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4203     vsl->print_map(out);
4204     out->cr();
4205   }
4206 }
4207 
4208 void MetaspaceAux::verify_free_chunks() {
4209   Metaspace::chunk_manager_metadata()->verify();
4210   if (Metaspace::using_class_space()) {
4211     Metaspace::chunk_manager_class()->verify();
4212   }
4213 }
4214 
4215 void MetaspaceAux::verify_capacity() {
4216 #ifdef ASSERT
4217   size_t running_sum_capacity_bytes = capacity_bytes();
4218   // For purposes of the running sum of capacity, verify against capacity
4219   size_t capacity_in_use_bytes = capacity_bytes_slow();
4220   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4221          "capacity_words() * BytesPerWord " SIZE_FORMAT
4222          " capacity_bytes_slow()" SIZE_FORMAT,
4223          running_sum_capacity_bytes, capacity_in_use_bytes);
4224   for (Metaspace::MetadataType i = Metaspace::ClassType;
4225        i < Metaspace:: MetadataTypeCount;
4226        i = (Metaspace::MetadataType)(i + 1)) {
4227     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4228     assert(capacity_bytes(i) == capacity_in_use_bytes,
4229            "capacity_bytes(%u) " SIZE_FORMAT
4230            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4231            i, capacity_bytes(i), i, capacity_in_use_bytes);
4232   }
4233 #endif
4234 }
4235 
4236 void MetaspaceAux::verify_used() {
4237 #ifdef ASSERT
4238   size_t running_sum_used_bytes = used_bytes();
4239   // For purposes of the running sum of used, verify against used
4240   size_t used_in_use_bytes = used_bytes_slow();
4241   assert(used_bytes() == used_in_use_bytes,
4242          "used_bytes() " SIZE_FORMAT
4243          " used_bytes_slow()" SIZE_FORMAT,
4244          used_bytes(), used_in_use_bytes);
4245   for (Metaspace::MetadataType i = Metaspace::ClassType;
4246        i < Metaspace:: MetadataTypeCount;
4247        i = (Metaspace::MetadataType)(i + 1)) {
4248     size_t used_in_use_bytes = used_bytes_slow(i);
4249     assert(used_bytes(i) == used_in_use_bytes,
4250            "used_bytes(%u) " SIZE_FORMAT
4251            " used_bytes_slow(%u)" SIZE_FORMAT,
4252            i, used_bytes(i), i, used_in_use_bytes);
4253   }
4254 #endif
4255 }
4256 
4257 void MetaspaceAux::verify_metrics() {
4258   verify_capacity();
4259   verify_used();
4260 }
4261 
4262 
4263 // Metaspace methods
4264 
4265 size_t Metaspace::_first_chunk_word_size = 0;
4266 size_t Metaspace::_first_class_chunk_word_size = 0;
4267 
4268 size_t Metaspace::_commit_alignment = 0;
4269 size_t Metaspace::_reserve_alignment = 0;
4270 
4271 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
4272   initialize(lock, type);
4273 }
4274 
4275 Metaspace::~Metaspace() {
4276   delete _vsm;
4277   if (using_class_space()) {
4278     delete _class_vsm;
4279   }
4280 }
4281 
4282 VirtualSpaceList* Metaspace::_space_list = NULL;
4283 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4284 
4285 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4286 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4287 
4288 #define VIRTUALSPACEMULTIPLIER 2
4289 
4290 #ifdef _LP64
4291 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4292 
4293 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4294   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4295   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4296   // narrow_klass_base is the lower of the metaspace base and the cds base
4297   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4298   // between the lower base and higher address.
4299   address lower_base;
4300   address higher_address;
4301 #if INCLUDE_CDS
4302   if (UseSharedSpaces) {
4303     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4304                           (address)(metaspace_base + compressed_class_space_size()));
4305     lower_base = MIN2(metaspace_base, cds_base);
4306   } else
4307 #endif
4308   {
4309     higher_address = metaspace_base + compressed_class_space_size();
4310     lower_base = metaspace_base;
4311 
4312     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4313     // If compressed class space fits in lower 32G, we don't need a base.
4314     if (higher_address <= (address)klass_encoding_max) {
4315       lower_base = 0; // Effectively lower base is zero.
4316     }
4317   }
4318 
4319   Universe::set_narrow_klass_base(lower_base);
4320 
4321   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4322   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4323   // how dump time narrow_klass_shift is set. Although, CDS can work
4324   // with zero-shift mode also, to be consistent with AOT it uses
4325   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4326   // can be used at same time as AOT code.
4327   if (!UseSharedSpaces
4328       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4329     Universe::set_narrow_klass_shift(0);
4330   } else {
4331     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4332   }
4333   AOTLoader::set_narrow_klass_shift();
4334 }
4335 
4336 #if INCLUDE_CDS
4337 // Return TRUE if the specified metaspace_base and cds_base are close enough
4338 // to work with compressed klass pointers.
4339 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4340   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4341   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4342   address lower_base = MIN2((address)metaspace_base, cds_base);
4343   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4344                                 (address)(metaspace_base + compressed_class_space_size()));
4345   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4346 }
4347 #endif
4348 
4349 // Try to allocate the metaspace at the requested addr.
4350 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4351   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4352   assert(using_class_space(), "called improperly");
4353   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4354   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4355          "Metaspace size is too big");
4356   assert_is_aligned(requested_addr, _reserve_alignment);
4357   assert_is_aligned(cds_base, _reserve_alignment);
4358   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4359 
4360   // Don't use large pages for the class space.
4361   bool large_pages = false;
4362 
4363 #if !(defined(AARCH64) || defined(AIX))
4364   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4365                                              _reserve_alignment,
4366                                              large_pages,
4367                                              requested_addr);
4368 #else // AARCH64
4369   ReservedSpace metaspace_rs;
4370 
4371   // Our compressed klass pointers may fit nicely into the lower 32
4372   // bits.
4373   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4374     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4375                                  _reserve_alignment,
4376                                  large_pages,
4377                                  requested_addr);
4378   }
4379 
4380   if (! metaspace_rs.is_reserved()) {
4381     // Aarch64: Try to align metaspace so that we can decode a compressed
4382     // klass with a single MOVK instruction.  We can do this iff the
4383     // compressed class base is a multiple of 4G.
4384     // Aix: Search for a place where we can find memory. If we need to load
4385     // the base, 4G alignment is helpful, too.
4386     size_t increment = AARCH64_ONLY(4*)G;
4387     for (char *a = align_up(requested_addr, increment);
4388          a < (char*)(1024*G);
4389          a += increment) {
4390       if (a == (char *)(32*G)) {
4391         // Go faster from here on. Zero-based is no longer possible.
4392         increment = 4*G;
4393       }
4394 
4395 #if INCLUDE_CDS
4396       if (UseSharedSpaces
4397           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4398         // We failed to find an aligned base that will reach.  Fall
4399         // back to using our requested addr.
4400         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4401                                      _reserve_alignment,
4402                                      large_pages,
4403                                      requested_addr);
4404         break;
4405       }
4406 #endif
4407 
4408       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4409                                    _reserve_alignment,
4410                                    large_pages,
4411                                    a);
4412       if (metaspace_rs.is_reserved())
4413         break;
4414     }
4415   }
4416 
4417 #endif // AARCH64
4418 
4419   if (!metaspace_rs.is_reserved()) {
4420 #if INCLUDE_CDS
4421     if (UseSharedSpaces) {
4422       size_t increment = align_up(1*G, _reserve_alignment);
4423 
4424       // Keep trying to allocate the metaspace, increasing the requested_addr
4425       // by 1GB each time, until we reach an address that will no longer allow
4426       // use of CDS with compressed klass pointers.
4427       char *addr = requested_addr;
4428       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4429              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4430         addr = addr + increment;
4431         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4432                                      _reserve_alignment, large_pages, addr);
4433       }
4434     }
4435 #endif
4436     // If no successful allocation then try to allocate the space anywhere.  If
4437     // that fails then OOM doom.  At this point we cannot try allocating the
4438     // metaspace as if UseCompressedClassPointers is off because too much
4439     // initialization has happened that depends on UseCompressedClassPointers.
4440     // So, UseCompressedClassPointers cannot be turned off at this point.
4441     if (!metaspace_rs.is_reserved()) {
4442       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4443                                    _reserve_alignment, large_pages);
4444       if (!metaspace_rs.is_reserved()) {
4445         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4446                                               compressed_class_space_size()));
4447       }
4448     }
4449   }
4450 
4451   // If we got here then the metaspace got allocated.
4452   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4453 
4454 #if INCLUDE_CDS
4455   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4456   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4457     FileMapInfo::stop_sharing_and_unmap(
4458         "Could not allocate metaspace at a compatible address");
4459   }
4460 #endif
4461   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4462                                   UseSharedSpaces ? (address)cds_base : 0);
4463 
4464   initialize_class_space(metaspace_rs);
4465 
4466   LogTarget(Trace, gc, metaspace) lt;
4467   if (lt.is_enabled()) {
4468     ResourceMark rm;
4469     LogStream ls(lt);
4470     print_compressed_class_space(&ls, requested_addr);
4471   }
4472 }
4473 
4474 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4475   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4476                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4477   if (_class_space_list != NULL) {
4478     address base = (address)_class_space_list->current_virtual_space()->bottom();
4479     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4480                  compressed_class_space_size(), p2i(base));
4481     if (requested_addr != 0) {
4482       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4483     }
4484     st->cr();
4485   }
4486 }
4487 
4488 // For UseCompressedClassPointers the class space is reserved above the top of
4489 // the Java heap.  The argument passed in is at the base of the compressed space.
4490 void Metaspace::initialize_class_space(ReservedSpace rs) {
4491   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4492   assert(rs.size() >= CompressedClassSpaceSize,
4493          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4494   assert(using_class_space(), "Must be using class space");
4495   _class_space_list = new VirtualSpaceList(rs);
4496   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4497 
4498   if (!_class_space_list->initialization_succeeded()) {
4499     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4500   }
4501 }
4502 
4503 #endif
4504 
4505 void Metaspace::ergo_initialize() {
4506   if (DumpSharedSpaces) {
4507     // Using large pages when dumping the shared archive is currently not implemented.
4508     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4509   }
4510 
4511   size_t page_size = os::vm_page_size();
4512   if (UseLargePages && UseLargePagesInMetaspace) {
4513     page_size = os::large_page_size();
4514   }
4515 
4516   _commit_alignment  = page_size;
4517   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4518 
4519   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4520   // override if MaxMetaspaceSize was set on the command line or not.
4521   // This information is needed later to conform to the specification of the
4522   // java.lang.management.MemoryUsage API.
4523   //
4524   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4525   // globals.hpp to the aligned value, but this is not possible, since the
4526   // alignment depends on other flags being parsed.
4527   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4528 
4529   if (MetaspaceSize > MaxMetaspaceSize) {
4530     MetaspaceSize = MaxMetaspaceSize;
4531   }
4532 
4533   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4534 
4535   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4536 
4537   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4538   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4539 
4540   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4541 
4542   // Initial virtual space size will be calculated at global_initialize()
4543   size_t min_metaspace_sz =
4544       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4545   if (UseCompressedClassPointers) {
4546     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4547       if (min_metaspace_sz >= MaxMetaspaceSize) {
4548         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4549       } else {
4550         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4551                       MaxMetaspaceSize - min_metaspace_sz);
4552       }
4553     }
4554   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4555     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4556                   min_metaspace_sz);
4557   }
4558 
4559   set_compressed_class_space_size(CompressedClassSpaceSize);
4560 }
4561 
4562 void Metaspace::global_initialize() {
4563   MetaspaceGC::initialize();
4564 
4565 #if INCLUDE_CDS
4566   if (DumpSharedSpaces) {
4567     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4568   } else if (UseSharedSpaces) {
4569     // If any of the archived space fails to map, UseSharedSpaces
4570     // is reset to false. Fall through to the
4571     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4572     // metaspace.
4573     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4574   }
4575 
4576   if (!DumpSharedSpaces && !UseSharedSpaces)
4577 #endif // INCLUDE_CDS
4578   {
4579 #ifdef _LP64
4580     if (using_class_space()) {
4581       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4582       allocate_metaspace_compressed_klass_ptrs(base, 0);
4583     }
4584 #endif // _LP64
4585   }
4586 
4587   // Initialize these before initializing the VirtualSpaceList
4588   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4589   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4590   // Make the first class chunk bigger than a medium chunk so it's not put
4591   // on the medium chunk list.   The next chunk will be small and progress
4592   // from there.  This size calculated by -version.
4593   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4594                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4595   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4596   // Arbitrarily set the initial virtual space to a multiple
4597   // of the boot class loader size.
4598   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4599   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4600 
4601   // Initialize the list of virtual spaces.
4602   _space_list = new VirtualSpaceList(word_size);
4603   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4604 
4605   if (!_space_list->initialization_succeeded()) {
4606     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4607   }
4608 
4609   _tracer = new MetaspaceTracer();
4610 }
4611 
4612 void Metaspace::post_initialize() {
4613   MetaspaceGC::post_initialize();
4614 }
4615 
4616 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
4617   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4618   if (chunk != NULL) {
4619     // Add to this manager's list of chunks in use and current_chunk().
4620     get_space_manager(mdtype)->add_chunk(chunk, true);
4621   }
4622 }
4623 
4624 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
4625   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4626 
4627   // Get a chunk from the chunk freelist
4628   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4629 
4630   if (chunk == NULL) {
4631     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4632                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4633   }
4634 
4635   return chunk;
4636 }
4637 
4638 void Metaspace::verify_global_initialization() {
4639   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4640   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4641 
4642   if (using_class_space()) {
4643     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4644     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4645   }
4646 }
4647 
4648 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
4649   verify_global_initialization();
4650 
4651   // Allocate SpaceManager for metadata objects.
4652   _vsm = new SpaceManager(NonClassType, type, lock);
4653 
4654   if (using_class_space()) {
4655     // Allocate SpaceManager for classes.
4656     _class_vsm = new SpaceManager(ClassType, type, lock);
4657   }
4658 
4659   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4660 
4661   // Allocate chunk for metadata objects
4662   initialize_first_chunk(type, NonClassType);
4663 
4664   // Allocate chunk for class metadata objects
4665   if (using_class_space()) {
4666     initialize_first_chunk(type, ClassType);
4667   }
4668 }
4669 
4670 size_t Metaspace::align_word_size_up(size_t word_size) {
4671   size_t byte_size = word_size * wordSize;
4672   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4673 }
4674 
4675 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
4676   assert(!_frozen, "sanity");
4677   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4678   if (is_class_space_allocation(mdtype)) {
4679     return  class_vsm()->allocate(word_size);
4680   } else {
4681     return  vsm()->allocate(word_size);
4682   }
4683 }
4684 
4685 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
4686   assert(!_frozen, "sanity");
4687   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4688   assert(delta_bytes > 0, "Must be");
4689 
4690   size_t before = 0;
4691   size_t after = 0;
4692   MetaWord* res;
4693   bool incremented;
4694 
4695   // Each thread increments the HWM at most once. Even if the thread fails to increment
4696   // the HWM, an allocation is still attempted. This is because another thread must then
4697   // have incremented the HWM and therefore the allocation might still succeed.
4698   do {
4699     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4700     res = allocate(word_size, mdtype);
4701   } while (!incremented && res == NULL);
4702 
4703   if (incremented) {
4704     tracer()->report_gc_threshold(before, after,
4705                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4706     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4707   }
4708 
4709   return res;
4710 }
4711 
4712 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
4713   if (mdtype == ClassType) {
4714     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4715   } else {
4716     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4717   }
4718 }
4719 
4720 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
4721   assert(!_frozen, "sanity");
4722   if (mdtype == ClassType) {
4723     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4724   } else {
4725     return vsm()->sum_free_in_chunks_in_use();
4726   }
4727 }
4728 
4729 // Space capacity in the Metaspace.  It includes
4730 // space in the list of chunks from which allocations
4731 // have been made. Don't include space in the global freelist and
4732 // in the space available in the dictionary which
4733 // is already counted in some chunk.
4734 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
4735   if (mdtype == ClassType) {
4736     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4737   } else {
4738     return vsm()->sum_capacity_in_chunks_in_use();
4739   }
4740 }
4741 
4742 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
4743   return used_words_slow(mdtype) * BytesPerWord;
4744 }
4745 
4746 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
4747   return capacity_words_slow(mdtype) * BytesPerWord;
4748 }
4749 
4750 size_t Metaspace::allocated_blocks_bytes() const {
4751   return vsm()->allocated_blocks_bytes() +
4752       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4753 }
4754 
4755 size_t Metaspace::allocated_chunks_bytes() const {
4756   return vsm()->allocated_chunks_bytes() +
4757       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4758 }
4759 
4760 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4761   assert(!_frozen, "sanity");
4762   assert(!SafepointSynchronize::is_at_safepoint()
4763          || Thread::current()->is_VM_thread(), "should be the VM thread");
4764 
4765   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4766 
4767   if (is_class && using_class_space()) {
4768     class_vsm()->deallocate(ptr, word_size);
4769   } else {
4770     vsm()->deallocate(ptr, word_size);
4771   }
4772 }
4773 
4774 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4775                               MetaspaceObj::Type type, TRAPS) {
4776   assert(!_frozen, "sanity");
4777   if (HAS_PENDING_EXCEPTION) {
4778     assert(false, "Should not allocate with exception pending");
4779     return NULL;  // caller does a CHECK_NULL too
4780   }
4781 
4782   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4783         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4784 
4785   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4786 
4787   // Try to allocate metadata.
4788   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4789 
4790   if (result == NULL) {
4791     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4792 
4793     // Allocation failed.
4794     if (is_init_completed()) {
4795       // Only start a GC if the bootstrapping has completed.
4796 
4797       // Try to clean out some memory and retry.
4798       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4799     }
4800   }
4801 
4802   if (result == NULL) {
4803     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4804   }
4805 
4806   // Zero initialize.
4807   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4808 
4809   return result;
4810 }
4811 
4812 size_t Metaspace::class_chunk_size(size_t word_size) {
4813   assert(using_class_space(), "Has to use class space");
4814   return class_vsm()->calc_chunk_size(word_size);
4815 }
4816 
4817 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4818   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4819 
4820   // If result is still null, we are out of memory.
4821   Log(gc, metaspace, freelist) log;
4822   if (log.is_info()) {
4823     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4824              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4825     ResourceMark rm;
4826     if (log.is_debug()) {
4827       if (loader_data->metaspace_or_null() != NULL) {
4828         LogStream ls(log.debug());
4829         loader_data->dump(&ls);
4830       }
4831     }
4832     LogStream ls(log.info());
4833     MetaspaceAux::dump(&ls);
4834     MetaspaceAux::print_metaspace_map(&ls, mdtype);
4835     ChunkManager::print_all_chunkmanagers(&ls);
4836   }
4837 
4838   bool out_of_compressed_class_space = false;
4839   if (is_class_space_allocation(mdtype)) {
4840     Metaspace* metaspace = loader_data->metaspace_non_null();
4841     out_of_compressed_class_space =
4842       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
4843       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4844       CompressedClassSpaceSize;
4845   }
4846 
4847   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4848   const char* space_string = out_of_compressed_class_space ?
4849     "Compressed class space" : "Metaspace";
4850 
4851   report_java_out_of_memory(space_string);
4852 
4853   if (JvmtiExport::should_post_resource_exhausted()) {
4854     JvmtiExport::post_resource_exhausted(
4855         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4856         space_string);
4857   }
4858 
4859   if (!is_init_completed()) {
4860     vm_exit_during_initialization("OutOfMemoryError", space_string);
4861   }
4862 
4863   if (out_of_compressed_class_space) {
4864     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4865   } else {
4866     THROW_OOP(Universe::out_of_memory_error_metaspace());
4867   }
4868 }
4869 
4870 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4871   switch (mdtype) {
4872     case Metaspace::ClassType: return "Class";
4873     case Metaspace::NonClassType: return "Metadata";
4874     default:
4875       assert(false, "Got bad mdtype: %d", (int) mdtype);
4876       return NULL;
4877   }
4878 }
4879 
4880 void Metaspace::purge(MetadataType mdtype) {
4881   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4882 }
4883 
4884 void Metaspace::purge() {
4885   MutexLockerEx cl(SpaceManager::expand_lock(),
4886                    Mutex::_no_safepoint_check_flag);
4887   purge(NonClassType);
4888   if (using_class_space()) {
4889     purge(ClassType);
4890   }
4891 }
4892 
4893 void Metaspace::print_on(outputStream* out) const {
4894   // Print both class virtual space counts and metaspace.
4895   if (Verbose) {
4896     vsm()->print_on(out);
4897     if (using_class_space()) {
4898       class_vsm()->print_on(out);
4899     }
4900   }
4901 }
4902 
4903 bool Metaspace::contains(const void* ptr) {
4904   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4905     return true;
4906   }
4907   return contains_non_shared(ptr);
4908 }
4909 
4910 bool Metaspace::contains_non_shared(const void* ptr) {
4911   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4912      return true;
4913   }
4914 
4915   return get_space_list(NonClassType)->contains(ptr);
4916 }
4917 
4918 void Metaspace::verify() {
4919   vsm()->verify();
4920   if (using_class_space()) {
4921     class_vsm()->verify();
4922   }
4923 }
4924 
4925 void Metaspace::dump(outputStream* const out) const {
4926   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4927   vsm()->dump(out);
4928   if (using_class_space()) {
4929     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4930     class_vsm()->dump(out);
4931   }
4932 }
4933 
4934 #ifdef ASSERT
4935 static void do_verify_chunk(Metachunk* chunk) {
4936   guarantee(chunk != NULL, "Sanity");
4937   // Verify chunk itself; then verify that it is consistent with the
4938   // occupany map of its containing node.
4939   chunk->verify();
4940   VirtualSpaceNode* const vsn = chunk->container();
4941   OccupancyMap* const ocmap = vsn->occupancy_map();
4942   ocmap->verify_for_chunk(chunk);
4943 }
4944 #endif
4945 
4946 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4947   chunk->set_is_tagged_free(!inuse);
4948   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4949   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4950 }
4951 
4952 /////////////// Unit tests ///////////////
4953 
4954 #ifndef PRODUCT
4955 
4956 class TestMetaspaceAuxTest : AllStatic {
4957  public:
4958   static void test_reserved() {
4959     size_t reserved = MetaspaceAux::reserved_bytes();
4960 
4961     assert(reserved > 0, "assert");
4962 
4963     size_t committed  = MetaspaceAux::committed_bytes();
4964     assert(committed <= reserved, "assert");
4965 
4966     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
4967     assert(reserved_metadata > 0, "assert");
4968     assert(reserved_metadata <= reserved, "assert");
4969 
4970     if (UseCompressedClassPointers) {
4971       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
4972       assert(reserved_class > 0, "assert");
4973       assert(reserved_class < reserved, "assert");
4974     }
4975   }
4976 
4977   static void test_committed() {
4978     size_t committed = MetaspaceAux::committed_bytes();
4979 
4980     assert(committed > 0, "assert");
4981 
4982     size_t reserved  = MetaspaceAux::reserved_bytes();
4983     assert(committed <= reserved, "assert");
4984 
4985     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
4986     assert(committed_metadata > 0, "assert");
4987     assert(committed_metadata <= committed, "assert");
4988 
4989     if (UseCompressedClassPointers) {
4990       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
4991       assert(committed_class > 0, "assert");
4992       assert(committed_class < committed, "assert");
4993     }
4994   }
4995 
4996   static void test_virtual_space_list_large_chunk() {
4997     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
4998     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4999     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5000     // vm_allocation_granularity aligned on Windows.
5001     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5002     large_size += (os::vm_page_size()/BytesPerWord);
5003     vs_list->get_new_chunk(large_size, 0);
5004   }
5005 
5006   static void test() {
5007     test_reserved();
5008     test_committed();
5009     test_virtual_space_list_large_chunk();
5010   }
5011 };
5012 
5013 void TestMetaspaceAux_test() {
5014   TestMetaspaceAuxTest::test();
5015 }
5016 
5017 class TestVirtualSpaceNodeTest {
5018   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5019                                           size_t& num_small_chunks,
5020                                           size_t& num_specialized_chunks) {
5021     num_medium_chunks = words_left / MediumChunk;
5022     words_left = words_left % MediumChunk;
5023 
5024     num_small_chunks = words_left / SmallChunk;
5025     words_left = words_left % SmallChunk;
5026     // how many specialized chunks can we get?
5027     num_specialized_chunks = words_left / SpecializedChunk;
5028     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5029   }
5030 
5031  public:
5032   static void test() {
5033     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5034     const size_t vsn_test_size_words = MediumChunk  * 4;
5035     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5036 
5037     // The chunk sizes must be multiples of eachother, or this will fail
5038     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5039     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5040 
5041     { // No committed memory in VSN
5042       ChunkManager cm(false);
5043       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5044       vsn.initialize();
5045       vsn.retire(&cm);
5046       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5047     }
5048 
5049     { // All of VSN is committed, half is used by chunks
5050       ChunkManager cm(false);
5051       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5052       vsn.initialize();
5053       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5054       vsn.get_chunk_vs(MediumChunk);
5055       vsn.get_chunk_vs(MediumChunk);
5056       vsn.retire(&cm);
5057       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5058       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5059     }
5060 
5061     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5062     // This doesn't work for systems with vm_page_size >= 16K.
5063     if (page_chunks < MediumChunk) {
5064       // 4 pages of VSN is committed, some is used by chunks
5065       ChunkManager cm(false);
5066       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5067 
5068       vsn.initialize();
5069       vsn.expand_by(page_chunks, page_chunks);
5070       vsn.get_chunk_vs(SmallChunk);
5071       vsn.get_chunk_vs(SpecializedChunk);
5072       vsn.retire(&cm);
5073 
5074       // committed - used = words left to retire
5075       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5076 
5077       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5078       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5079 
5080       assert(num_medium_chunks == 0, "should not get any medium chunks");
5081       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5082       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5083     }
5084 
5085     { // Half of VSN is committed, a humongous chunk is used
5086       ChunkManager cm(false);
5087       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5088       vsn.initialize();
5089       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5090       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5091       vsn.retire(&cm);
5092 
5093       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5094       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5095       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5096 
5097       assert(num_medium_chunks == 0, "should not get any medium chunks");
5098       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5099       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5100     }
5101 
5102   }
5103 
5104 #define assert_is_available_positive(word_size) \
5105   assert(vsn.is_available(word_size), \
5106          #word_size ": " PTR_FORMAT " bytes were not available in " \
5107          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5108          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5109 
5110 #define assert_is_available_negative(word_size) \
5111   assert(!vsn.is_available(word_size), \
5112          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5113          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5114          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5115 
5116   static void test_is_available_positive() {
5117     // Reserve some memory.
5118     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5119     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5120 
5121     // Commit some memory.
5122     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5123     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5124     assert(expanded, "Failed to commit");
5125 
5126     // Check that is_available accepts the committed size.
5127     assert_is_available_positive(commit_word_size);
5128 
5129     // Check that is_available accepts half the committed size.
5130     size_t expand_word_size = commit_word_size / 2;
5131     assert_is_available_positive(expand_word_size);
5132   }
5133 
5134   static void test_is_available_negative() {
5135     // Reserve some memory.
5136     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5137     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5138 
5139     // Commit some memory.
5140     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5141     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5142     assert(expanded, "Failed to commit");
5143 
5144     // Check that is_available doesn't accept a too large size.
5145     size_t two_times_commit_word_size = commit_word_size * 2;
5146     assert_is_available_negative(two_times_commit_word_size);
5147   }
5148 
5149   static void test_is_available_overflow() {
5150     // Reserve some memory.
5151     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5152     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5153 
5154     // Commit some memory.
5155     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5156     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5157     assert(expanded, "Failed to commit");
5158 
5159     // Calculate a size that will overflow the virtual space size.
5160     void* virtual_space_max = (void*)(uintptr_t)-1;
5161     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5162     size_t overflow_size = bottom_to_max + BytesPerWord;
5163     size_t overflow_word_size = overflow_size / BytesPerWord;
5164 
5165     // Check that is_available can handle the overflow.
5166     assert_is_available_negative(overflow_word_size);
5167   }
5168 
5169   static void test_is_available() {
5170     TestVirtualSpaceNodeTest::test_is_available_positive();
5171     TestVirtualSpaceNodeTest::test_is_available_negative();
5172     TestVirtualSpaceNodeTest::test_is_available_overflow();
5173   }
5174 };
5175 
5176 // The following test is placed here instead of a gtest / unittest file
5177 // because the ChunkManager class is only available in this file.
5178 void ChunkManager_test_list_index() {
5179   ChunkManager manager(true);
5180 
5181   // Test previous bug where a query for a humongous class metachunk,
5182   // incorrectly matched the non-class medium metachunk size.
5183   {
5184     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5185 
5186     ChunkIndex index = manager.list_index(MediumChunk);
5187 
5188     assert(index == HumongousIndex,
5189            "Requested size is larger than ClassMediumChunk,"
5190            " so should return HumongousIndex. Got index: %d", (int)index);
5191   }
5192 
5193   // Check the specified sizes as well.
5194   {
5195     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5196     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5197   }
5198   {
5199     ChunkIndex index = manager.list_index(ClassSmallChunk);
5200     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5201   }
5202   {
5203     ChunkIndex index = manager.list_index(ClassMediumChunk);
5204     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5205   }
5206   {
5207     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5208     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5209   }
5210 }
5211 
5212 #endif // !PRODUCT
5213 
5214 #ifdef ASSERT
5215 
5216 // The following test is placed here instead of a gtest / unittest file
5217 // because the ChunkManager class is only available in this file.
5218 class SpaceManagerTest : AllStatic {
5219   friend void SpaceManager_test_adjust_initial_chunk_size();
5220 
5221   static void test_adjust_initial_chunk_size(bool is_class) {
5222     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5223     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5224     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5225 
5226 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5227     do {                                                                         \
5228       size_t v = value;                                                          \
5229       size_t e = expected;                                                       \
5230       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5231              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5232     } while (0)
5233 
5234     // Smallest (specialized)
5235     test_adjust_initial_chunk_size(1,            smallest, is_class);
5236     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5237     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5238 
5239     // Small
5240     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5241     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5242     test_adjust_initial_chunk_size(normal,       normal, is_class);
5243 
5244     // Medium
5245     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5246     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5247     test_adjust_initial_chunk_size(medium,     medium, is_class);
5248 
5249     // Humongous
5250     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5251 
5252 #undef test_adjust_initial_chunk_size
5253   }
5254 
5255   static void test_adjust_initial_chunk_size() {
5256     test_adjust_initial_chunk_size(false);
5257     test_adjust_initial_chunk_size(true);
5258   }
5259 };
5260 
5261 void SpaceManager_test_adjust_initial_chunk_size() {
5262   SpaceManagerTest::test_adjust_initial_chunk_size();
5263 }
5264 
5265 #endif // ASSERT
5266 
5267 struct chunkmanager_statistics_t {
5268   int num_specialized_chunks;
5269   int num_small_chunks;
5270   int num_medium_chunks;
5271   int num_humongous_chunks;
5272 };
5273 
5274 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5275   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5276   ChunkManager::ChunkManagerStatistics stat;
5277   chunk_manager->get_statistics(&stat);
5278   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5279   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5280   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5281   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5282 }
5283 
5284 struct chunk_geometry_t {
5285   size_t specialized_chunk_word_size;
5286   size_t small_chunk_word_size;
5287   size_t medium_chunk_word_size;
5288 };
5289 
5290 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5291   if (mdType == Metaspace::NonClassType) {
5292     out->specialized_chunk_word_size = SpecializedChunk;
5293     out->small_chunk_word_size = SmallChunk;
5294     out->medium_chunk_word_size = MediumChunk;
5295   } else {
5296     out->specialized_chunk_word_size = ClassSpecializedChunk;
5297     out->small_chunk_word_size = ClassSmallChunk;
5298     out->medium_chunk_word_size = ClassMediumChunk;
5299   }
5300 }
5301