1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = false;
  60 
  61 // Helper function that does a bunch of checks for a chunk.
  62 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  63 
  64 // Given a Metachunk, update its in-use information (both in the
  65 // chunk and the occupancy map).
  66 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  67 
  68 size_t const allocation_from_dictionary_limit = 4 * K;
  69 
  70 MetaWord* last_allocated = 0;
  71 
  72 size_t Metaspace::_compressed_class_space_size;
  73 const MetaspaceTracer* Metaspace::_tracer = NULL;
  74 
  75 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  76 
  77 enum ChunkSizes {    // in words.
  78   ClassSpecializedChunk = 128,
  79   SpecializedChunk = 128,
  80   ClassSmallChunk = 256,
  81   SmallChunk = 512,
  82   ClassMediumChunk = 4 * K,
  83   MediumChunk = 8 * K
  84 };
  85 
  86 // Returns size of this chunk type.
  87 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  88   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  89   size_t size = 0;
  90   if (is_class) {
  91     switch(chunktype) {
  92       case SpecializedIndex: size = ClassSpecializedChunk; break;
  93       case SmallIndex: size = ClassSmallChunk; break;
  94       case MediumIndex: size = ClassMediumChunk; break;
  95       default:
  96         ShouldNotReachHere();
  97     }
  98   } else {
  99     switch(chunktype) {
 100       case SpecializedIndex: size = SpecializedChunk; break;
 101       case SmallIndex: size = SmallChunk; break;
 102       case MediumIndex: size = MediumChunk; break;
 103       default:
 104         ShouldNotReachHere();
 105     }
 106   }
 107   return size;
 108 }
 109 
 110 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 111   if (is_class) {
 112     if (size == ClassSpecializedChunk) {
 113       return SpecializedIndex;
 114     } else if (size == ClassSmallChunk) {
 115       return SmallIndex;
 116     } else if (size == ClassMediumChunk) {
 117       return MediumIndex;
 118     } else if (size > ClassMediumChunk) {
 119       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 120       return HumongousIndex;
 121     }
 122   } else {
 123     if (size == SpecializedChunk) {
 124       return SpecializedIndex;
 125     } else if (size == SmallChunk) {
 126       return SmallIndex;
 127     } else if (size == MediumChunk) {
 128       return MediumIndex;
 129     } else if (size > MediumChunk) {
 130       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 131       return HumongousIndex;
 132     }
 133   }
 134   ShouldNotReachHere();
 135   return (ChunkIndex)-1;
 136 }
 137 
 138 
 139 static ChunkIndex next_chunk_index(ChunkIndex i) {
 140   assert(i < NumberOfInUseLists, "Out of bound");
 141   return (ChunkIndex) (i+1);
 142 }
 143 
 144 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 145   assert(i > ZeroIndex, "Out of bound");
 146   return (ChunkIndex) (i-1);
 147 }
 148 
 149 static const char* scale_unit(size_t scale) {
 150   switch(scale) {
 151     case 1: return "BYTES";
 152     case K: return "KB";
 153     case M: return "MB";
 154     case G: return "GB";
 155     default:
 156       ShouldNotReachHere();
 157       return NULL;
 158   }
 159 }
 160 
 161 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 162 uint MetaspaceGC::_shrink_factor = 0;
 163 bool MetaspaceGC::_should_concurrent_collect = false;
 164 
 165 typedef class FreeList<Metachunk> ChunkList;
 166 
 167 // Manages the global free lists of chunks.
 168 class ChunkManager : public CHeapObj<mtInternal> {
 169   friend class TestVirtualSpaceNodeTest;
 170 
 171   // Free list of chunks of different sizes.
 172   //   SpecializedChunk
 173   //   SmallChunk
 174   //   MediumChunk
 175   ChunkList _free_chunks[NumberOfFreeLists];
 176 
 177   // Whether or not this is the class chunkmanager.
 178   const bool _is_class;
 179 
 180   // Return non-humongous chunk list by its index.
 181   ChunkList* free_chunks(ChunkIndex index);
 182 
 183   // Returns non-humongous chunk list for the given chunk word size.
 184   ChunkList* find_free_chunks_list(size_t word_size);
 185 
 186   //   HumongousChunk
 187   ChunkTreeDictionary _humongous_dictionary;
 188 
 189   // Returns the humongous chunk dictionary.
 190   ChunkTreeDictionary* humongous_dictionary() {
 191     return &_humongous_dictionary;
 192   }
 193 
 194   // Size, in metaspace words, of all chunks managed by this ChunkManager
 195   size_t _free_chunks_total;
 196   // Number of chunks in this ChunkManager
 197   size_t _free_chunks_count;
 198 
 199   // Update counters after a chunk was added or removed removed.
 200   void account_for_added_chunk(const Metachunk* c);
 201   void account_for_removed_chunk(const Metachunk* c);
 202 
 203   // Debug support
 204 
 205   size_t sum_free_chunks();
 206   size_t sum_free_chunks_count();
 207 
 208   void locked_verify_free_chunks_total();
 209   void slow_locked_verify_free_chunks_total() {
 210     if (metaspace_slow_verify) {
 211       locked_verify_free_chunks_total();
 212     }
 213   }
 214   void locked_verify_free_chunks_count();
 215   void slow_locked_verify_free_chunks_count() {
 216     if (metaspace_slow_verify) {
 217       locked_verify_free_chunks_count();
 218     }
 219   }
 220   void verify_free_chunks_count();
 221 
 222   // Given a pointer to a chunk, attempts to merge it with neighboring
 223   // free chunks to form a bigger chunk. Returns true if successful.
 224   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 225 
 226   // Helper for chunk merging:
 227   //  Given an address range with 1-n chunks which are all supposed to be
 228   //  free and hence currently managed by this ChunkManager, remove them
 229   //  from this ChunkManager and mark them as invalid.
 230   // - This does not correct the occupancy map.
 231   // - This does not adjust the counters in ChunkManager.
 232   // - Does not adjust container count counter in containing VirtualSpaceNode.
 233   // Returns number of chunks removed.
 234   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 235 
 236   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 237   // split up the larger chunk into n smaller chunks, at least one of which should be
 238   // the target chunk of target chunk size. The smaller chunks, including the target
 239   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 240   // Note that this chunk is supposed to be removed from the freelist right away.
 241   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 242 
 243  public:
 244 
 245   struct ChunkManagerStatistics {
 246     size_t num_by_type[NumberOfFreeLists];
 247     size_t single_size_by_type[NumberOfFreeLists];
 248     size_t total_size_by_type[NumberOfFreeLists];
 249     size_t num_humongous_chunks;
 250     size_t total_size_humongous_chunks;
 251   };
 252 
 253   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 254   void get_statistics(ChunkManagerStatistics* stat) const;
 255   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 256 
 257 
 258   ChunkManager(bool is_class)
 259       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 260     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 261     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 262     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 263   }
 264 
 265   // Add or delete (return) a chunk to the global freelist.
 266   Metachunk* chunk_freelist_allocate(size_t word_size);
 267 
 268   // Map a size to a list index assuming that there are lists
 269   // for special, small, medium, and humongous chunks.
 270   ChunkIndex list_index(size_t size);
 271 
 272   // Map a given index to the chunk size.
 273   size_t size_by_index(ChunkIndex index) const;
 274 
 275   bool is_class() const { return _is_class; }
 276 
 277   // Convenience accessors.
 278   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 279   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 280   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 281 
 282   // Take a chunk from the ChunkManager. The chunk is expected to be in
 283   // the chunk manager (the freelist if non-humongous, the dictionary if
 284   // humongous).
 285   void remove_chunk(Metachunk* chunk);
 286 
 287   // Return a single chunk of type index to the ChunkManager.
 288   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 289 
 290   // Add the simple linked list of chunks to the freelist of chunks
 291   // of type index.
 292   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 293 
 294   // Total of the space in the free chunks list
 295   size_t free_chunks_total_words();
 296   size_t free_chunks_total_bytes();
 297 
 298   // Number of chunks in the free chunks list
 299   size_t free_chunks_count();
 300 
 301   // Remove from a list by size.  Selects list based on size of chunk.
 302   Metachunk* free_chunks_get(size_t chunk_word_size);
 303 
 304 #define index_bounds_check(index)                                         \
 305   assert(index == SpecializedIndex ||                                     \
 306          index == SmallIndex ||                                           \
 307          index == MediumIndex ||                                          \
 308          index == HumongousIndex, "Bad index: %d", (int) index)
 309 
 310   size_t num_free_chunks(ChunkIndex index) const {
 311     index_bounds_check(index);
 312 
 313     if (index == HumongousIndex) {
 314       return _humongous_dictionary.total_free_blocks();
 315     }
 316 
 317     ssize_t count = _free_chunks[index].count();
 318     return count == -1 ? 0 : (size_t) count;
 319   }
 320 
 321   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 322     index_bounds_check(index);
 323 
 324     size_t word_size = 0;
 325     if (index == HumongousIndex) {
 326       word_size = _humongous_dictionary.total_size();
 327     } else {
 328       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 329       word_size = size_per_chunk_in_words * num_free_chunks(index);
 330     }
 331 
 332     return word_size * BytesPerWord;
 333   }
 334 
 335   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 336     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 337                                          num_free_chunks(SmallIndex),
 338                                          num_free_chunks(MediumIndex),
 339                                          num_free_chunks(HumongousIndex),
 340                                          size_free_chunks_in_bytes(SpecializedIndex),
 341                                          size_free_chunks_in_bytes(SmallIndex),
 342                                          size_free_chunks_in_bytes(MediumIndex),
 343                                          size_free_chunks_in_bytes(HumongousIndex));
 344   }
 345 
 346   // Debug support
 347   void verify();
 348   void slow_verify() {
 349     if (metaspace_slow_verify) {
 350       verify();
 351     }
 352   }
 353   void locked_verify();
 354   void slow_locked_verify() {
 355     if (metaspace_slow_verify) {
 356       locked_verify();
 357     }
 358   }
 359   void verify_free_chunks_total();
 360 
 361   void locked_print_free_chunks(outputStream* st);
 362   void locked_print_sum_free_chunks(outputStream* st);
 363 
 364   void print_on(outputStream* st) const;
 365 
 366   // Prints composition for both non-class and (if available)
 367   // class chunk manager.
 368   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 369 };
 370 
 371 class SmallBlocks : public CHeapObj<mtClass> {
 372   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 373   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 374 
 375  private:
 376   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 377 
 378   FreeList<Metablock>& list_at(size_t word_size) {
 379     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 380     return _small_lists[word_size - _small_block_min_size];
 381   }
 382 
 383  public:
 384   SmallBlocks() {
 385     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 386       uint k = i - _small_block_min_size;
 387       _small_lists[k].set_size(i);
 388     }
 389   }
 390 
 391   size_t total_size() const {
 392     size_t result = 0;
 393     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 394       uint k = i - _small_block_min_size;
 395       result = result + _small_lists[k].count() * _small_lists[k].size();
 396     }
 397     return result;
 398   }
 399 
 400   static uint small_block_max_size() { return _small_block_max_size; }
 401   static uint small_block_min_size() { return _small_block_min_size; }
 402 
 403   MetaWord* get_block(size_t word_size) {
 404     if (list_at(word_size).count() > 0) {
 405       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 406       return new_block;
 407     } else {
 408       return NULL;
 409     }
 410   }
 411   void return_block(Metablock* free_chunk, size_t word_size) {
 412     list_at(word_size).return_chunk_at_head(free_chunk, false);
 413     assert(list_at(word_size).count() > 0, "Should have a chunk");
 414   }
 415 
 416   void print_on(outputStream* st) const {
 417     st->print_cr("SmallBlocks:");
 418     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 419       uint k = i - _small_block_min_size;
 420       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 421     }
 422   }
 423 };
 424 
 425 // Used to manage the free list of Metablocks (a block corresponds
 426 // to the allocation of a quantum of metadata).
 427 class BlockFreelist : public CHeapObj<mtClass> {
 428   BlockTreeDictionary* const _dictionary;
 429   SmallBlocks* _small_blocks;
 430 
 431   // Only allocate and split from freelist if the size of the allocation
 432   // is at least 1/4th the size of the available block.
 433   const static int WasteMultiplier = 4;
 434 
 435   // Accessors
 436   BlockTreeDictionary* dictionary() const { return _dictionary; }
 437   SmallBlocks* small_blocks() {
 438     if (_small_blocks == NULL) {
 439       _small_blocks = new SmallBlocks();
 440     }
 441     return _small_blocks;
 442   }
 443 
 444  public:
 445   BlockFreelist();
 446   ~BlockFreelist();
 447 
 448   // Get and return a block to the free list
 449   MetaWord* get_block(size_t word_size);
 450   void return_block(MetaWord* p, size_t word_size);
 451 
 452   size_t total_size() const  {
 453     size_t result = dictionary()->total_size();
 454     if (_small_blocks != NULL) {
 455       result = result + _small_blocks->total_size();
 456     }
 457     return result;
 458   }
 459 
 460   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 461   void print_on(outputStream* st) const;
 462 };
 463 
 464 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 465 template <typename T> struct all_ones  { static const T value; };
 466 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 467 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 468 
 469 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 470 // keeps information about
 471 // - where a chunk starts
 472 // - whether a chunk is in-use or free
 473 // A bit in this bitmap represents one range of memory in the smallest
 474 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 475 class OccupancyMap : public CHeapObj<mtInternal> {
 476 
 477   // The address range this map covers.
 478   const MetaWord* const _reference_address;
 479   const size_t _word_size;
 480 
 481   // The word size of a specialized chunk, aka the number of words one
 482   // bit in this map represents.
 483   const size_t _smallest_chunk_word_size;
 484 
 485   // map data
 486   // Data are organized in two bit layers:
 487   // The first layer is the chunk-start-map. Here, a bit is set to mark
 488   // the corresponding region as the head of a chunk.
 489   // The second layer is the in-use-map. Here, a set bit indicates that
 490   // the corresponding belongs to a chunk which is in use.
 491   uint8_t* _map[2];
 492 
 493   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 494 
 495   // length, in bytes, of bitmap data
 496   size_t _map_size;
 497 
 498   // Returns true if bit at position pos at bit-layer layer is set.
 499   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 500     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 501     const unsigned byteoffset = pos / 8;
 502     assert(byteoffset < _map_size,
 503            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 504     const unsigned mask = 1 << (pos % 8);
 505     return (_map[layer][byteoffset] & mask) > 0;
 506   }
 507 
 508   // Changes bit at position pos at bit-layer layer to value v.
 509   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 510     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 511     const unsigned byteoffset = pos / 8;
 512     assert(byteoffset < _map_size,
 513            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 514     const unsigned mask = 1 << (pos % 8);
 515     if (v) {
 516       _map[layer][byteoffset] |= mask;
 517     } else {
 518       _map[layer][byteoffset] &= ~mask;
 519     }
 520   }
 521 
 522   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 523   // pos is 32/64 aligned and num_bits is 32/64.
 524   // This is the typical case when coalescing to medium chunks, whose size is
 525   // 32 or 64 times the specialized chunk size (depending on class or non class
 526   // case), so they occupy 64 bits which should be 64bit aligned, because
 527   // chunks are chunk-size aligned.
 528   template <typename T>
 529   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 530     assert(_map_size > 0, "not initialized");
 531     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 532     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 533     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 534     const size_t byteoffset = pos / 8;
 535     assert(byteoffset <= (_map_size - sizeof(T)),
 536            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 537     const T w = *(T*)(_map[layer] + byteoffset);
 538     return w > 0 ? true : false;
 539   }
 540 
 541   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 542   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 543     if (pos % 32 == 0 && num_bits == 32) {
 544       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 545     } else if (pos % 64 == 0 && num_bits == 64) {
 546       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 547     } else {
 548       for (unsigned n = 0; n < num_bits; n ++) {
 549         if (get_bit_at_position(pos + n, layer)) {
 550           return true;
 551         }
 552       }
 553     }
 554     return false;
 555   }
 556 
 557   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 558   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 559     assert(word_size % _smallest_chunk_word_size == 0,
 560         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 561     const unsigned pos = get_bitpos_for_address(p);
 562     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 563     return is_any_bit_set_in_region(pos, num_bits, layer);
 564   }
 565 
 566   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 567   // pos is 32/64 aligned and num_bits is 32/64.
 568   // This is the typical case when coalescing to medium chunks, whose size
 569   // is 32 or 64 times the specialized chunk size (depending on class or non
 570   // class case), so they occupy 64 bits which should be 64bit aligned,
 571   // because chunks are chunk-size aligned.
 572   template <typename T>
 573   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 574     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 575            (unsigned)(sizeof(T) * 8), pos);
 576     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 577            num_bits, (unsigned)(sizeof(T) * 8));
 578     const size_t byteoffset = pos / 8;
 579     assert(byteoffset <= (_map_size - sizeof(T)),
 580            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 581     T* const pw = (T*)(_map[layer] + byteoffset);
 582     *pw = v ? all_ones<T>::value : (T) 0;
 583   }
 584 
 585   // Set all bits in a region starting at pos to a value.
 586   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 587     assert(_map_size > 0, "not initialized");
 588     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 589     if (pos % 32 == 0 && num_bits == 32) {
 590       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 591     } else if (pos % 64 == 0 && num_bits == 64) {
 592       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 593     } else {
 594       for (unsigned n = 0; n < num_bits; n ++) {
 595         set_bit_at_position(pos + n, layer, v);
 596       }
 597     }
 598   }
 599 
 600   // Helper: sets all bits in a region [p, p+word_size).
 601   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 602     assert(word_size % _smallest_chunk_word_size == 0,
 603         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 604     const unsigned pos = get_bitpos_for_address(p);
 605     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 606     set_bits_of_region(pos, num_bits, layer, v);
 607   }
 608 
 609   // Helper: given an address, return the bit position representing that address.
 610   unsigned get_bitpos_for_address(const MetaWord* p) const {
 611     assert(_reference_address != NULL, "not initialized");
 612     assert(p >= _reference_address && p < _reference_address + _word_size,
 613            "Address %p out of range for occupancy map [%p..%p).",
 614             p, _reference_address, _reference_address + _word_size);
 615     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 616            "Address not aligned (%p).", p);
 617     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 618     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 619     return (unsigned) d;
 620   }
 621 
 622  public:
 623 
 624   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 625     _reference_address(reference_address), _word_size(word_size),
 626     _smallest_chunk_word_size(smallest_chunk_word_size) {
 627     assert(reference_address != NULL, "invalid reference address");
 628     assert(is_aligned(reference_address, smallest_chunk_word_size),
 629            "Reference address not aligned to smallest chunk size.");
 630     assert(is_aligned(word_size, smallest_chunk_word_size),
 631            "Word_size shall be a multiple of the smallest chunk size.");
 632     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 633     size_t num_bits = word_size / smallest_chunk_word_size;
 634     _map_size = (num_bits + 7) / 8;
 635     assert(_map_size * 8 >= num_bits, "sanity");
 636     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 637     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 638     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 639     memset(_map[1], 0, _map_size);
 640     memset(_map[0], 0, _map_size);
 641     // Sanity test: the first respectively last possible chunk start address in
 642     // the covered range shall map to the first and last bit in the bitmap.
 643     assert(get_bitpos_for_address(reference_address) == 0,
 644       "First chunk address in range must map to fist bit in bitmap.");
 645     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 646       "Last chunk address in range must map to last bit in bitmap.");
 647   }
 648 
 649   ~OccupancyMap() {
 650     os::free(_map[0]);
 651     os::free(_map[1]);
 652   }
 653 
 654   // Returns true if at address x a chunk is starting.
 655   bool chunk_starts_at_address(MetaWord* p) const {
 656     const unsigned pos = get_bitpos_for_address(p);
 657     return get_bit_at_position(pos, layer_chunk_start_map);
 658   }
 659 
 660   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 661     const unsigned pos = get_bitpos_for_address(p);
 662     set_bit_at_position(pos, layer_chunk_start_map, v);
 663   }
 664 
 665   // Removes all chunk-start-bits inside a region, typically as a
 666   // result of a chunk merge.
 667   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 668     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 669   }
 670 
 671   // Returns true if there are life (in use) chunks in the region limited
 672   // by [p, p+word_size).
 673   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 674     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 675   }
 676 
 677   // Marks the region starting at p with the size word_size as in use
 678   // or free, depending on v.
 679   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 680     set_bits_of_region(p, word_size, layer_in_use_map, v);
 681   }
 682 
 683 #ifdef ASSERT
 684   // Verify occupancy map for the address range [from, to).
 685   // We need to tell it the address range, because the memory the
 686   // occupancy map is covering may not be fully comitted yet.
 687   void verify(MetaWord* from, MetaWord* to) {
 688     Metachunk* chunk = NULL;
 689     int nth_bit_for_chunk = 0;
 690     MetaWord* chunk_end = NULL;
 691     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 692       const unsigned pos = get_bitpos_for_address(p);
 693       // Check the chunk-starts-info:
 694       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 695         // Chunk start marked in bitmap.
 696         chunk = (Metachunk*) p;
 697         if (chunk_end != NULL) {
 698           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 699                  "the next chunk to start at %p).", p, chunk_end);
 700         }
 701         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 702         if (chunk->get_chunk_type() != HumongousIndex) {
 703           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 704         }
 705         chunk_end = p + chunk->word_size();
 706         nth_bit_for_chunk = 0;
 707         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 708       } else {
 709         // No chunk start marked in bitmap.
 710         assert(chunk != NULL, "Chunk should start at start of address range.");
 711         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 712         nth_bit_for_chunk ++;
 713       }
 714       // Check the in-use-info:
 715       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 716       if (in_use_bit) {
 717         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 718                chunk, nth_bit_for_chunk);
 719       } else {
 720         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 721                chunk, nth_bit_for_chunk);
 722       }
 723     }
 724   }
 725 
 726   // Verify that a given chunk is correctly accounted for in the bitmap.
 727   void verify_for_chunk(Metachunk* chunk) {
 728     assert(chunk_starts_at_address((MetaWord*) chunk),
 729            "No chunk start marked in map for chunk %p.", chunk);
 730     // For chunks larger than the minimal chunk size, no other chunk
 731     // must start in its area.
 732     if (chunk->word_size() > _smallest_chunk_word_size) {
 733       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 734                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 735              "No chunk must start within another chunk.");
 736     }
 737     if (!chunk->is_tagged_free()) {
 738       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 739              "Chunk %p is in use but marked as free in map (%d %d).",
 740              chunk, chunk->get_chunk_type(), chunk->get_origin());
 741     } else {
 742       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 743              "Chunk %p is free but marked as in-use in map (%d %d).",
 744              chunk, chunk->get_chunk_type(), chunk->get_origin());
 745     }
 746   }
 747 
 748 #endif // ASSERT
 749 
 750 };
 751 
 752 // A VirtualSpaceList node.
 753 class VirtualSpaceNode : public CHeapObj<mtClass> {
 754   friend class VirtualSpaceList;
 755 
 756   // Link to next VirtualSpaceNode
 757   VirtualSpaceNode* _next;
 758 
 759   // Whether this node is contained in class or metaspace.
 760   const bool _is_class;
 761 
 762   // total in the VirtualSpace
 763   MemRegion _reserved;
 764   ReservedSpace _rs;
 765   VirtualSpace _virtual_space;
 766   MetaWord* _top;
 767   // count of chunks contained in this VirtualSpace
 768   uintx _container_count;
 769 
 770   OccupancyMap* _occupancy_map;
 771 
 772   // Convenience functions to access the _virtual_space
 773   char* low()  const { return virtual_space()->low(); }
 774   char* high() const { return virtual_space()->high(); }
 775 
 776   // The first Metachunk will be allocated at the bottom of the
 777   // VirtualSpace
 778   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 779 
 780   // Committed but unused space in the virtual space
 781   size_t free_words_in_vs() const;
 782 
 783   // True if this node belongs to class metaspace.
 784   bool is_class() const { return _is_class; }
 785 
 786   // Helper function for take_from_committed: allocate padding chunks
 787   // until top is at the given address.
 788   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 789 
 790  public:
 791 
 792   VirtualSpaceNode(bool is_class, size_t byte_size);
 793   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 794     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 795   ~VirtualSpaceNode();
 796 
 797   // Convenience functions for logical bottom and end
 798   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 799   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 800 
 801   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 802   OccupancyMap* occupancy_map() { return _occupancy_map; }
 803 
 804   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 805 
 806   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 807   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 808 
 809   bool is_pre_committed() const { return _virtual_space.special(); }
 810 
 811   // address of next available space in _virtual_space;
 812   // Accessors
 813   VirtualSpaceNode* next() { return _next; }
 814   void set_next(VirtualSpaceNode* v) { _next = v; }
 815 
 816   void set_reserved(MemRegion const v) { _reserved = v; }
 817   void set_top(MetaWord* v) { _top = v; }
 818 
 819   // Accessors
 820   MemRegion* reserved() { return &_reserved; }
 821   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 822 
 823   // Returns true if "word_size" is available in the VirtualSpace
 824   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 825 
 826   MetaWord* top() const { return _top; }
 827   void inc_top(size_t word_size) { _top += word_size; }
 828 
 829   uintx container_count() { return _container_count; }
 830   void inc_container_count();
 831   void dec_container_count();
 832 #ifdef ASSERT
 833   uintx container_count_slow();
 834   void verify_container_count();
 835 #endif
 836 
 837   // used and capacity in this single entry in the list
 838   size_t used_words_in_vs() const;
 839   size_t capacity_words_in_vs() const;
 840 
 841   bool initialize();
 842 
 843   // get space from the virtual space
 844   Metachunk* take_from_committed(size_t chunk_word_size);
 845 
 846   // Allocate a chunk from the virtual space and return it.
 847   Metachunk* get_chunk_vs(size_t chunk_word_size);
 848 
 849   // Expands/shrinks the committed space in a virtual space.  Delegates
 850   // to Virtualspace
 851   bool expand_by(size_t min_words, size_t preferred_words);
 852 
 853   // In preparation for deleting this node, remove all the chunks
 854   // in the node from any freelist.
 855   void purge(ChunkManager* chunk_manager);
 856 
 857   // If an allocation doesn't fit in the current node a new node is created.
 858   // Allocate chunks out of the remaining committed space in this node
 859   // to avoid wasting that memory.
 860   // This always adds up because all the chunk sizes are multiples of
 861   // the smallest chunk size.
 862   void retire(ChunkManager* chunk_manager);
 863 
 864 
 865   void print_on(outputStream* st) const;
 866   void print_map(outputStream* st, bool is_class) const;
 867 
 868   // Debug support
 869   DEBUG_ONLY(void mangle();)
 870   // Verify counters, all chunks in this list node and the occupancy map.
 871   DEBUG_ONLY(void verify();)
 872   // Verify that all free chunks in this node are ideally merged
 873   // (there not should be multiple small chunks where a large chunk could exist.)
 874   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 875 
 876 };
 877 
 878 #define assert_is_aligned(value, alignment)                  \
 879   assert(is_aligned((value), (alignment)),                   \
 880          SIZE_FORMAT_HEX " is not aligned to "               \
 881          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 882 
 883 // Decide if large pages should be committed when the memory is reserved.
 884 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 885   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 886     size_t words = bytes / BytesPerWord;
 887     bool is_class = false; // We never reserve large pages for the class space.
 888     if (MetaspaceGC::can_expand(words, is_class) &&
 889         MetaspaceGC::allowed_expansion() >= words) {
 890       return true;
 891     }
 892   }
 893 
 894   return false;
 895 }
 896 
 897   // byte_size is the size of the associated virtualspace.
 898 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 899   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 900   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 901   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 902   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 903 
 904   if (_rs.is_reserved()) {
 905     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 906     assert(_rs.size() != 0, "Catch if we get a 0 size");
 907     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 908     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 909 
 910     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 911   }
 912 }
 913 
 914 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 915   DEBUG_ONLY(this->verify();)
 916   Metachunk* chunk = first_chunk();
 917   Metachunk* invalid_chunk = (Metachunk*) top();
 918   while (chunk < invalid_chunk ) {
 919     assert(chunk->is_tagged_free(), "Should be tagged free");
 920     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 921     chunk_manager->remove_chunk(chunk);
 922     chunk->remove_sentinel();
 923     assert(chunk->next() == NULL &&
 924            chunk->prev() == NULL,
 925            "Was not removed from its list");
 926     chunk = (Metachunk*) next;
 927   }
 928 }
 929 
 930 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 931 
 932   if (bottom() == top()) {
 933     return;
 934   }
 935 
 936   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 937   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 938   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 939 
 940   int line_len = 100;
 941   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 942   line_len = (int)(section_len / spec_chunk_size);
 943 
 944   static const int NUM_LINES = 4;
 945 
 946   char* lines[NUM_LINES];
 947   for (int i = 0; i < NUM_LINES; i ++) {
 948     lines[i] = (char*)os::malloc(line_len, mtInternal);
 949   }
 950   int pos = 0;
 951   const MetaWord* p = bottom();
 952   const Metachunk* chunk = (const Metachunk*)p;
 953   const MetaWord* chunk_end = p + chunk->word_size();
 954   while (p < top()) {
 955     if (pos == line_len) {
 956       pos = 0;
 957       for (int i = 0; i < NUM_LINES; i ++) {
 958         st->fill_to(22);
 959         st->print_raw(lines[i], line_len);
 960         st->cr();
 961       }
 962     }
 963     if (pos == 0) {
 964       st->print(PTR_FORMAT ":", p2i(p));
 965     }
 966     if (p == chunk_end) {
 967       chunk = (Metachunk*)p;
 968       chunk_end = p + chunk->word_size();
 969     }
 970     // line 1: chunk starting points (a dot if that area is a chunk start).
 971     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 972 
 973     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 974     // chunk is in use.
 975     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 976     if (chunk->word_size() == spec_chunk_size) {
 977       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 978     } else if (chunk->word_size() == small_chunk_size) {
 979       lines[1][pos] = chunk_is_free ? 's' : 'S';
 980     } else if (chunk->word_size() == med_chunk_size) {
 981       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 982     } else if (chunk->word_size() > med_chunk_size) {
 983       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 984     } else {
 985       ShouldNotReachHere();
 986     }
 987 
 988     // Line 3: chunk origin
 989     const ChunkOrigin origin = chunk->get_origin();
 990     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 991 
 992     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 993     //         but were never used.
 994     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 995 
 996     p += spec_chunk_size;
 997     pos ++;
 998   }
 999   if (pos > 0) {
1000     for (int i = 0; i < NUM_LINES; i ++) {
1001       st->fill_to(22);
1002       st->print_raw(lines[i], line_len);
1003       st->cr();
1004     }
1005   }
1006   for (int i = 0; i < NUM_LINES; i ++) {
1007     os::free(lines[i]);
1008   }
1009 }
1010 
1011 
1012 #ifdef ASSERT
1013 uintx VirtualSpaceNode::container_count_slow() {
1014   uintx count = 0;
1015   Metachunk* chunk = first_chunk();
1016   Metachunk* invalid_chunk = (Metachunk*) top();
1017   while (chunk < invalid_chunk ) {
1018     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1019     do_verify_chunk(chunk);
1020     // Don't count the chunks on the free lists.  Those are
1021     // still part of the VirtualSpaceNode but not currently
1022     // counted.
1023     if (!chunk->is_tagged_free()) {
1024       count++;
1025     }
1026     chunk = (Metachunk*) next;
1027   }
1028   return count;
1029 }
1030 #endif
1031 
1032 #ifdef ASSERT
1033 // Verify counters, all chunks in this list node and the occupancy map.
1034 void VirtualSpaceNode::verify() {
1035   uintx num_in_use_chunks = 0;
1036   Metachunk* chunk = first_chunk();
1037   Metachunk* invalid_chunk = (Metachunk*) top();
1038 
1039   // Iterate the chunks in this node and verify each chunk.
1040   while (chunk < invalid_chunk ) {
1041     DEBUG_ONLY(do_verify_chunk(chunk);)
1042     if (!chunk->is_tagged_free()) {
1043       num_in_use_chunks ++;
1044     }
1045     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1046     chunk = (Metachunk*) next;
1047   }
1048   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1049          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1050   // Also verify the occupancy map.
1051   occupancy_map()->verify(this->bottom(), this->top());
1052 }
1053 #endif // ASSERT
1054 
1055 #ifdef ASSERT
1056 // Verify that all free chunks in this node are ideally merged
1057 // (there not should be multiple small chunks where a large chunk could exist.)
1058 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1059   Metachunk* chunk = first_chunk();
1060   Metachunk* invalid_chunk = (Metachunk*) top();
1061   // Shorthands.
1062   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1063   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1064   int num_free_chunks_since_last_med_boundary = -1;
1065   int num_free_chunks_since_last_small_boundary = -1;
1066   while (chunk < invalid_chunk ) {
1067     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1068     // Reset the counter when encountering a non-free chunk.
1069     if (chunk->get_chunk_type() != HumongousIndex) {
1070       if (chunk->is_tagged_free()) {
1071         // Count successive free, non-humongous chunks.
1072         if (is_aligned(chunk, size_small)) {
1073           assert(num_free_chunks_since_last_small_boundary <= 1,
1074                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1075           num_free_chunks_since_last_small_boundary = 0;
1076         } else if (num_free_chunks_since_last_small_boundary != -1) {
1077           num_free_chunks_since_last_small_boundary ++;
1078         }
1079         if (is_aligned(chunk, size_med)) {
1080           assert(num_free_chunks_since_last_med_boundary <= 1,
1081                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1082           num_free_chunks_since_last_med_boundary = 0;
1083         } else if (num_free_chunks_since_last_med_boundary != -1) {
1084           num_free_chunks_since_last_med_boundary ++;
1085         }
1086       } else {
1087         // Encountering a non-free chunk, reset counters.
1088         num_free_chunks_since_last_med_boundary = -1;
1089         num_free_chunks_since_last_small_boundary = -1;
1090       }
1091     } else {
1092       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1093       num_free_chunks_since_last_med_boundary = -1;
1094       num_free_chunks_since_last_small_boundary = -1;
1095     }
1096 
1097     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1098     chunk = (Metachunk*) next;
1099   }
1100 }
1101 #endif // ASSERT
1102 
1103 // List of VirtualSpaces for metadata allocation.
1104 class VirtualSpaceList : public CHeapObj<mtClass> {
1105   friend class VirtualSpaceNode;
1106 
1107   enum VirtualSpaceSizes {
1108     VirtualSpaceSize = 256 * K
1109   };
1110 
1111   // Head of the list
1112   VirtualSpaceNode* _virtual_space_list;
1113   // virtual space currently being used for allocations
1114   VirtualSpaceNode* _current_virtual_space;
1115 
1116   // Is this VirtualSpaceList used for the compressed class space
1117   bool _is_class;
1118 
1119   // Sum of reserved and committed memory in the virtual spaces
1120   size_t _reserved_words;
1121   size_t _committed_words;
1122 
1123   // Number of virtual spaces
1124   size_t _virtual_space_count;
1125 
1126   ~VirtualSpaceList();
1127 
1128   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1129 
1130   void set_virtual_space_list(VirtualSpaceNode* v) {
1131     _virtual_space_list = v;
1132   }
1133   void set_current_virtual_space(VirtualSpaceNode* v) {
1134     _current_virtual_space = v;
1135   }
1136 
1137   void link_vs(VirtualSpaceNode* new_entry);
1138 
1139   // Get another virtual space and add it to the list.  This
1140   // is typically prompted by a failed attempt to allocate a chunk
1141   // and is typically followed by the allocation of a chunk.
1142   bool create_new_virtual_space(size_t vs_word_size);
1143 
1144   // Chunk up the unused committed space in the current
1145   // virtual space and add the chunks to the free list.
1146   void retire_current_virtual_space();
1147 
1148  public:
1149   VirtualSpaceList(size_t word_size);
1150   VirtualSpaceList(ReservedSpace rs);
1151 
1152   size_t free_bytes();
1153 
1154   Metachunk* get_new_chunk(size_t chunk_word_size,
1155                            size_t suggested_commit_granularity);
1156 
1157   bool expand_node_by(VirtualSpaceNode* node,
1158                       size_t min_words,
1159                       size_t preferred_words);
1160 
1161   bool expand_by(size_t min_words,
1162                  size_t preferred_words);
1163 
1164   VirtualSpaceNode* current_virtual_space() {
1165     return _current_virtual_space;
1166   }
1167 
1168   bool is_class() const { return _is_class; }
1169 
1170   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1171 
1172   size_t reserved_words()  { return _reserved_words; }
1173   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1174   size_t committed_words() { return _committed_words; }
1175   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1176 
1177   void inc_reserved_words(size_t v);
1178   void dec_reserved_words(size_t v);
1179   void inc_committed_words(size_t v);
1180   void dec_committed_words(size_t v);
1181   void inc_virtual_space_count();
1182   void dec_virtual_space_count();
1183 
1184   bool contains(const void* ptr);
1185 
1186   // Unlink empty VirtualSpaceNodes and free it.
1187   void purge(ChunkManager* chunk_manager);
1188 
1189   void print_on(outputStream* st) const;
1190   void print_map(outputStream* st) const;
1191 
1192   class VirtualSpaceListIterator : public StackObj {
1193     VirtualSpaceNode* _virtual_spaces;
1194    public:
1195     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1196       _virtual_spaces(virtual_spaces) {}
1197 
1198     bool repeat() {
1199       return _virtual_spaces != NULL;
1200     }
1201 
1202     VirtualSpaceNode* get_next() {
1203       VirtualSpaceNode* result = _virtual_spaces;
1204       if (_virtual_spaces != NULL) {
1205         _virtual_spaces = _virtual_spaces->next();
1206       }
1207       return result;
1208     }
1209   };
1210 };
1211 
1212 class Metadebug : AllStatic {
1213   // Debugging support for Metaspaces
1214   static int _allocation_fail_alot_count;
1215 
1216  public:
1217 
1218   static void init_allocation_fail_alot_count();
1219 #ifdef ASSERT
1220   static bool test_metadata_failure();
1221 #endif
1222 };
1223 
1224 int Metadebug::_allocation_fail_alot_count = 0;
1225 
1226 //  SpaceManager - used by Metaspace to handle allocations
1227 class SpaceManager : public CHeapObj<mtClass> {
1228   friend class ClassLoaderMetaspace;
1229   friend class Metadebug;
1230 
1231  private:
1232 
1233   // protects allocations
1234   Mutex* const _lock;
1235 
1236   // Type of metadata allocated.
1237   const Metaspace::MetadataType   _mdtype;
1238 
1239   // Type of metaspace
1240   const Metaspace::MetaspaceType  _space_type;
1241 
1242   // List of chunks in use by this SpaceManager.  Allocations
1243   // are done from the current chunk.  The list is used for deallocating
1244   // chunks when the SpaceManager is freed.
1245   Metachunk* _chunks_in_use[NumberOfInUseLists];
1246   Metachunk* _current_chunk;
1247 
1248   // Maximum number of small chunks to allocate to a SpaceManager
1249   static uint const _small_chunk_limit;
1250 
1251   // Maximum number of specialize chunks to allocate for anonymous and delegating
1252   // metadata space to a SpaceManager
1253   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1254 
1255   // Sum of all space in allocated chunks
1256   size_t _allocated_blocks_words;
1257 
1258   // Sum of all allocated chunks
1259   size_t _allocated_chunks_words;
1260   size_t _allocated_chunks_count;
1261 
1262   // Free lists of blocks are per SpaceManager since they
1263   // are assumed to be in chunks in use by the SpaceManager
1264   // and all chunks in use by a SpaceManager are freed when
1265   // the class loader using the SpaceManager is collected.
1266   BlockFreelist* _block_freelists;
1267 
1268   // protects virtualspace and chunk expansions
1269   static const char*  _expand_lock_name;
1270   static const int    _expand_lock_rank;
1271   static Mutex* const _expand_lock;
1272 
1273  private:
1274   // Accessors
1275   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1276   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1277     _chunks_in_use[index] = v;
1278   }
1279 
1280   BlockFreelist* block_freelists() const { return _block_freelists; }
1281 
1282   Metaspace::MetadataType mdtype() { return _mdtype; }
1283 
1284   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1285   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1286 
1287   Metachunk* current_chunk() const { return _current_chunk; }
1288   void set_current_chunk(Metachunk* v) {
1289     _current_chunk = v;
1290   }
1291 
1292   Metachunk* find_current_chunk(size_t word_size);
1293 
1294   // Add chunk to the list of chunks in use
1295   void add_chunk(Metachunk* v, bool make_current);
1296   void retire_current_chunk();
1297 
1298   Mutex* lock() const { return _lock; }
1299 
1300  protected:
1301   void initialize();
1302 
1303  public:
1304   SpaceManager(Metaspace::MetadataType mdtype,
1305                Metaspace::MetaspaceType space_type,
1306                Mutex* lock);
1307   ~SpaceManager();
1308 
1309   enum ChunkMultiples {
1310     MediumChunkMultiple = 4
1311   };
1312 
1313   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1314   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1315   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1316 
1317   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1318 
1319   // Accessors
1320   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1321 
1322   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1323   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1324   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1325 
1326   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1327 
1328   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1329 
1330   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1331   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1332   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1333   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1334   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1335 
1336   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1337 
1338   static Mutex* expand_lock() { return _expand_lock; }
1339 
1340   // Increment the per Metaspace and global running sums for Metachunks
1341   // by the given size.  This is used when a Metachunk to added to
1342   // the in-use list.
1343   void inc_size_metrics(size_t words);
1344   // Increment the per Metaspace and global running sums Metablocks by the given
1345   // size.  This is used when a Metablock is allocated.
1346   void inc_used_metrics(size_t words);
1347   // Delete the portion of the running sums for this SpaceManager. That is,
1348   // the globals running sums for the Metachunks and Metablocks are
1349   // decremented for all the Metachunks in-use by this SpaceManager.
1350   void dec_total_from_size_metrics();
1351 
1352   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1353   // or return the unadjusted size if the requested size is humongous.
1354   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1355   size_t adjust_initial_chunk_size(size_t requested) const;
1356 
1357   // Get the initial chunks size for this metaspace type.
1358   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1359 
1360   size_t sum_capacity_in_chunks_in_use() const;
1361   size_t sum_used_in_chunks_in_use() const;
1362   size_t sum_free_in_chunks_in_use() const;
1363   size_t sum_waste_in_chunks_in_use() const;
1364   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1365 
1366   size_t sum_count_in_chunks_in_use();
1367   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1368 
1369   Metachunk* get_new_chunk(size_t chunk_word_size);
1370 
1371   // Block allocation and deallocation.
1372   // Allocates a block from the current chunk
1373   MetaWord* allocate(size_t word_size);
1374 
1375   // Helper for allocations
1376   MetaWord* allocate_work(size_t word_size);
1377 
1378   // Returns a block to the per manager freelist
1379   void deallocate(MetaWord* p, size_t word_size);
1380 
1381   // Based on the allocation size and a minimum chunk size,
1382   // returned chunk size (for expanding space for chunk allocation).
1383   size_t calc_chunk_size(size_t allocation_word_size);
1384 
1385   // Called when an allocation from the current chunk fails.
1386   // Gets a new chunk (may require getting a new virtual space),
1387   // and allocates from that chunk.
1388   MetaWord* grow_and_allocate(size_t word_size);
1389 
1390   // Notify memory usage to MemoryService.
1391   void track_metaspace_memory_usage();
1392 
1393   // debugging support.
1394 
1395   void dump(outputStream* const out) const;
1396   void print_on(outputStream* st) const;
1397   void locked_print_chunks_in_use_on(outputStream* st) const;
1398 
1399   void verify();
1400   void verify_chunk_size(Metachunk* chunk);
1401 #ifdef ASSERT
1402   void verify_allocated_blocks_words();
1403 #endif
1404 
1405   // This adjusts the size given to be greater than the minimum allocation size in
1406   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1407   size_t get_allocation_word_size(size_t word_size) {
1408     size_t byte_size = word_size * BytesPerWord;
1409 
1410     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1411     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1412 
1413     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1414     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1415 
1416     return raw_word_size;
1417   }
1418 };
1419 
1420 uint const SpaceManager::_small_chunk_limit = 4;
1421 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1422 
1423 const char* SpaceManager::_expand_lock_name =
1424   "SpaceManager chunk allocation lock";
1425 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1426 Mutex* const SpaceManager::_expand_lock =
1427   new Mutex(SpaceManager::_expand_lock_rank,
1428             SpaceManager::_expand_lock_name,
1429             Mutex::_allow_vm_block_flag,
1430             Monitor::_safepoint_check_never);
1431 
1432 void VirtualSpaceNode::inc_container_count() {
1433   assert_lock_strong(SpaceManager::expand_lock());
1434   _container_count++;
1435 }
1436 
1437 void VirtualSpaceNode::dec_container_count() {
1438   assert_lock_strong(SpaceManager::expand_lock());
1439   _container_count--;
1440 }
1441 
1442 #ifdef ASSERT
1443 void VirtualSpaceNode::verify_container_count() {
1444   assert(_container_count == container_count_slow(),
1445          "Inconsistency in container_count _container_count " UINTX_FORMAT
1446          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1447 }
1448 #endif
1449 
1450 // BlockFreelist methods
1451 
1452 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1453 
1454 BlockFreelist::~BlockFreelist() {
1455   delete _dictionary;
1456   if (_small_blocks != NULL) {
1457     delete _small_blocks;
1458   }
1459 }
1460 
1461 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1462   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1463 
1464   Metablock* free_chunk = ::new (p) Metablock(word_size);
1465   if (word_size < SmallBlocks::small_block_max_size()) {
1466     small_blocks()->return_block(free_chunk, word_size);
1467   } else {
1468   dictionary()->return_chunk(free_chunk);
1469 }
1470   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1471             SIZE_FORMAT, p2i(free_chunk), word_size);
1472 }
1473 
1474 MetaWord* BlockFreelist::get_block(size_t word_size) {
1475   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1476 
1477   // Try small_blocks first.
1478   if (word_size < SmallBlocks::small_block_max_size()) {
1479     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1480     // this space manager.
1481     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1482     if (new_block != NULL) {
1483       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1484               p2i(new_block), word_size);
1485       return new_block;
1486     }
1487   }
1488 
1489   if (word_size < BlockFreelist::min_dictionary_size()) {
1490     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1491     return NULL;
1492   }
1493 
1494   Metablock* free_block = dictionary()->get_chunk(word_size);
1495   if (free_block == NULL) {
1496     return NULL;
1497   }
1498 
1499   const size_t block_size = free_block->size();
1500   if (block_size > WasteMultiplier * word_size) {
1501     return_block((MetaWord*)free_block, block_size);
1502     return NULL;
1503   }
1504 
1505   MetaWord* new_block = (MetaWord*)free_block;
1506   assert(block_size >= word_size, "Incorrect size of block from freelist");
1507   const size_t unused = block_size - word_size;
1508   if (unused >= SmallBlocks::small_block_min_size()) {
1509     return_block(new_block + word_size, unused);
1510   }
1511 
1512   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1513             p2i(new_block), word_size);
1514   return new_block;
1515 }
1516 
1517 void BlockFreelist::print_on(outputStream* st) const {
1518   dictionary()->print_free_lists(st);
1519   if (_small_blocks != NULL) {
1520     _small_blocks->print_on(st);
1521   }
1522 }
1523 
1524 // VirtualSpaceNode methods
1525 
1526 VirtualSpaceNode::~VirtualSpaceNode() {
1527   _rs.release();
1528   if (_occupancy_map != NULL) {
1529     delete _occupancy_map;
1530   }
1531 #ifdef ASSERT
1532   size_t word_size = sizeof(*this) / BytesPerWord;
1533   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1534 #endif
1535 }
1536 
1537 size_t VirtualSpaceNode::used_words_in_vs() const {
1538   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1539 }
1540 
1541 // Space committed in the VirtualSpace
1542 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1543   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1544 }
1545 
1546 size_t VirtualSpaceNode::free_words_in_vs() const {
1547   return pointer_delta(end(), top(), sizeof(MetaWord));
1548 }
1549 
1550 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1551 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1552 
1553   assert(target_top > top(), "Sanity");
1554 
1555   // Padding chunks are added to the freelist.
1556   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1557 
1558   // shorthands
1559   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1560   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1561   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1562 
1563   while (top() < target_top) {
1564 
1565     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1566     // for padding chunks, so it is not worth it.
1567     size_t padding_chunk_word_size = small_word_size;
1568     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1569       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1570       padding_chunk_word_size = spec_word_size;
1571     }
1572     MetaWord* here = top();
1573     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1574     inc_top(padding_chunk_word_size);
1575 
1576     // Create new padding chunk.
1577     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1578     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1579 
1580     Metachunk* const padding_chunk =
1581       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1582     assert(padding_chunk == (Metachunk*)here, "Sanity");
1583     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1584     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1585                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1586                                        (is_class() ? "class space " : "metaspace"),
1587                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1588 
1589     // Mark chunk start in occupancy map.
1590     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1591 
1592     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1593     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1594     // will assert that).
1595     do_update_in_use_info_for_chunk(padding_chunk, true);
1596 
1597     // Return Chunk to freelist.
1598     inc_container_count();
1599     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1600     // Please note: at this point, ChunkManager::return_single_chunk()
1601     // may already have merged the padding chunk with neighboring chunks, so
1602     // it may have vanished at this point. Do not reference the padding
1603     // chunk beyond this point.
1604   }
1605 
1606   assert(top() == target_top, "Sanity");
1607 
1608 } // allocate_padding_chunks_until_top_is_at()
1609 
1610 // Allocates the chunk from the virtual space only.
1611 // This interface is also used internally for debugging.  Not all
1612 // chunks removed here are necessarily used for allocation.
1613 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1614   // Non-humongous chunks are to be allocated aligned to their chunk
1615   // size. So, start addresses of medium chunks are aligned to medium
1616   // chunk size, those of small chunks to small chunk size and so
1617   // forth. This facilitates merging of free chunks and reduces
1618   // fragmentation. Chunk sizes are spec < small < medium, with each
1619   // larger chunk size being a multiple of the next smaller chunk
1620   // size.
1621   // Because of this alignment, me may need to create a number of padding
1622   // chunks. These chunks are created and added to the freelist.
1623 
1624   // The chunk manager to which we will give our padding chunks.
1625   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1626 
1627   // shorthands
1628   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1629   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1630   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1631 
1632   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1633          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1634 
1635   // Chunk alignment (in bytes) == chunk size unless humongous.
1636   // Humongous chunks are aligned to the smallest chunk size (spec).
1637   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1638                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1639 
1640   // Do we have enough space to create the requested chunk plus
1641   // any padding chunks needed?
1642   MetaWord* const next_aligned =
1643     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1644   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1645     return NULL;
1646   }
1647 
1648   // Before allocating the requested chunk, allocate padding chunks if necessary.
1649   // We only need to do this for small or medium chunks: specialized chunks are the
1650   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1651   // (implicitly, also aligned to smallest chunk size).
1652   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1653     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1654         (is_class() ? "class space " : "metaspace"),
1655         top(), next_aligned);
1656     allocate_padding_chunks_until_top_is_at(next_aligned);
1657     // Now, top should be aligned correctly.
1658     assert_is_aligned(top(), required_chunk_alignment);
1659   }
1660 
1661   // Now, top should be aligned correctly.
1662   assert_is_aligned(top(), required_chunk_alignment);
1663 
1664   // Bottom of the new chunk
1665   MetaWord* chunk_limit = top();
1666   assert(chunk_limit != NULL, "Not safe to call this method");
1667 
1668   // The virtual spaces are always expanded by the
1669   // commit granularity to enforce the following condition.
1670   // Without this the is_available check will not work correctly.
1671   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1672       "The committed memory doesn't match the expanded memory.");
1673 
1674   if (!is_available(chunk_word_size)) {
1675     LogTarget(Debug, gc, metaspace, freelist) lt;
1676     if (lt.is_enabled()) {
1677       LogStream ls(lt);
1678       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1679       // Dump some information about the virtual space that is nearly full
1680       print_on(&ls);
1681     }
1682     return NULL;
1683   }
1684 
1685   // Take the space  (bump top on the current virtual space).
1686   inc_top(chunk_word_size);
1687 
1688   // Initialize the chunk
1689   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1690   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1691   assert(result == (Metachunk*)chunk_limit, "Sanity");
1692   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1693   do_update_in_use_info_for_chunk(result, true);
1694 
1695   inc_container_count();
1696 
1697   if (metaspace_slow_verify) {
1698     DEBUG_ONLY(chunk_manager->locked_verify());
1699     DEBUG_ONLY(this->verify());
1700   }
1701 
1702   DEBUG_ONLY(do_verify_chunk(result));
1703 
1704   result->inc_use_count();
1705 
1706   return result;
1707 }
1708 
1709 
1710 // Expand the virtual space (commit more of the reserved space)
1711 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1712   size_t min_bytes = min_words * BytesPerWord;
1713   size_t preferred_bytes = preferred_words * BytesPerWord;
1714 
1715   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1716 
1717   if (uncommitted < min_bytes) {
1718     return false;
1719   }
1720 
1721   size_t commit = MIN2(preferred_bytes, uncommitted);
1722   bool result = virtual_space()->expand_by(commit, false);
1723 
1724   if (result) {
1725     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1726               (is_class() ? "class" : "non-class"), commit);
1727   } else {
1728     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1729               (is_class() ? "class" : "non-class"), commit);
1730   }
1731 
1732   assert(result, "Failed to commit memory");
1733 
1734   return result;
1735 }
1736 
1737 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1738   assert_lock_strong(SpaceManager::expand_lock());
1739   Metachunk* result = take_from_committed(chunk_word_size);
1740   return result;
1741 }
1742 
1743 bool VirtualSpaceNode::initialize() {
1744 
1745   if (!_rs.is_reserved()) {
1746     return false;
1747   }
1748 
1749   // These are necessary restriction to make sure that the virtual space always
1750   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1751   // aligned only the middle alignment of the VirtualSpace is used.
1752   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1753   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1754 
1755   // ReservedSpaces marked as special will have the entire memory
1756   // pre-committed. Setting a committed size will make sure that
1757   // committed_size and actual_committed_size agrees.
1758   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1759 
1760   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1761                                             Metaspace::commit_alignment());
1762   if (result) {
1763     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1764         "Checking that the pre-committed memory was registered by the VirtualSpace");
1765 
1766     set_top((MetaWord*)virtual_space()->low());
1767     set_reserved(MemRegion((HeapWord*)_rs.base(),
1768                  (HeapWord*)(_rs.base() + _rs.size())));
1769 
1770     assert(reserved()->start() == (HeapWord*) _rs.base(),
1771            "Reserved start was not set properly " PTR_FORMAT
1772            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1773     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1774            "Reserved size was not set properly " SIZE_FORMAT
1775            " != " SIZE_FORMAT, reserved()->word_size(),
1776            _rs.size() / BytesPerWord);
1777   }
1778 
1779   // Initialize Occupancy Map.
1780   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1781   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1782 
1783   return result;
1784 }
1785 
1786 void VirtualSpaceNode::print_on(outputStream* st) const {
1787   size_t used = used_words_in_vs();
1788   size_t capacity = capacity_words_in_vs();
1789   VirtualSpace* vs = virtual_space();
1790   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1791            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1792            PTR_FORMAT ", " PTR_FORMAT ")",
1793            p2i(vs), capacity / K,
1794            capacity == 0 ? 0 : used * 100 / capacity,
1795            p2i(bottom()), p2i(top()), p2i(end()),
1796            p2i(vs->high_boundary()));
1797 }
1798 
1799 #ifdef ASSERT
1800 void VirtualSpaceNode::mangle() {
1801   size_t word_size = capacity_words_in_vs();
1802   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1803 }
1804 #endif // ASSERT
1805 
1806 // VirtualSpaceList methods
1807 // Space allocated from the VirtualSpace
1808 
1809 VirtualSpaceList::~VirtualSpaceList() {
1810   VirtualSpaceListIterator iter(virtual_space_list());
1811   while (iter.repeat()) {
1812     VirtualSpaceNode* vsl = iter.get_next();
1813     delete vsl;
1814   }
1815 }
1816 
1817 void VirtualSpaceList::inc_reserved_words(size_t v) {
1818   assert_lock_strong(SpaceManager::expand_lock());
1819   _reserved_words = _reserved_words + v;
1820 }
1821 void VirtualSpaceList::dec_reserved_words(size_t v) {
1822   assert_lock_strong(SpaceManager::expand_lock());
1823   _reserved_words = _reserved_words - v;
1824 }
1825 
1826 #define assert_committed_below_limit()                        \
1827   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1828          "Too much committed memory. Committed: " SIZE_FORMAT \
1829          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1830           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1831 
1832 void VirtualSpaceList::inc_committed_words(size_t v) {
1833   assert_lock_strong(SpaceManager::expand_lock());
1834   _committed_words = _committed_words + v;
1835 
1836   assert_committed_below_limit();
1837 }
1838 void VirtualSpaceList::dec_committed_words(size_t v) {
1839   assert_lock_strong(SpaceManager::expand_lock());
1840   _committed_words = _committed_words - v;
1841 
1842   assert_committed_below_limit();
1843 }
1844 
1845 void VirtualSpaceList::inc_virtual_space_count() {
1846   assert_lock_strong(SpaceManager::expand_lock());
1847   _virtual_space_count++;
1848 }
1849 void VirtualSpaceList::dec_virtual_space_count() {
1850   assert_lock_strong(SpaceManager::expand_lock());
1851   _virtual_space_count--;
1852 }
1853 
1854 void ChunkManager::remove_chunk(Metachunk* chunk) {
1855   size_t word_size = chunk->word_size();
1856   ChunkIndex index = list_index(word_size);
1857   if (index != HumongousIndex) {
1858     free_chunks(index)->remove_chunk(chunk);
1859   } else {
1860     humongous_dictionary()->remove_chunk(chunk);
1861   }
1862 
1863   // Chunk has been removed from the chunks free list, update counters.
1864   account_for_removed_chunk(chunk);
1865 }
1866 
1867 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1868   assert_lock_strong(SpaceManager::expand_lock());
1869   assert(chunk != NULL, "invalid chunk pointer");
1870   // Check for valid merge combinations.
1871   assert((chunk->get_chunk_type() == SpecializedIndex &&
1872           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1873          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1874         "Invalid chunk merge combination.");
1875 
1876   const size_t target_chunk_word_size =
1877     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1878 
1879   // [ prospective merge region )
1880   MetaWord* const p_merge_region_start =
1881     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1882   MetaWord* const p_merge_region_end =
1883     p_merge_region_start + target_chunk_word_size;
1884 
1885   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1886   VirtualSpaceNode* const vsn = chunk->container();
1887   OccupancyMap* const ocmap = vsn->occupancy_map();
1888 
1889   // The prospective chunk merge range must be completely contained by the
1890   // committed range of the virtual space node.
1891   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1892     return false;
1893   }
1894 
1895   // Only attempt to merge this range if at its start a chunk starts and at its end
1896   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1897   // of that range, we cannot merge.
1898   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1899     return false;
1900   }
1901   if (p_merge_region_end < vsn->top() &&
1902       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1903     return false;
1904   }
1905 
1906   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1907   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1908     return false;
1909   }
1910 
1911   // Success! Remove all chunks in this region...
1912   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1913     (is_class() ? "class space" : "metaspace"),
1914     p_merge_region_start, p_merge_region_end);
1915 
1916   const int num_chunks_removed =
1917     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1918 
1919   // ... and create a single new bigger chunk.
1920   Metachunk* const p_new_chunk =
1921       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1922   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1923   p_new_chunk->set_origin(origin_merge);
1924 
1925   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1926     (is_class() ? "class space" : "metaspace"),
1927     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1928 
1929   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1930   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1931   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1932 
1933   // Mark chunk as free. Note: it is not necessary to update the occupancy
1934   // map in-use map, because the old chunks were also free, so nothing
1935   // should have changed.
1936   p_new_chunk->set_is_tagged_free(true);
1937 
1938   // Add new chunk to its freelist.
1939   ChunkList* const list = free_chunks(target_chunk_type);
1940   list->return_chunk_at_head(p_new_chunk);
1941 
1942   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1943   // should not have changed, because the size of the space should be the same)
1944   _free_chunks_count -= num_chunks_removed;
1945   _free_chunks_count ++;
1946 
1947   // VirtualSpaceNode::container_count does not have to be modified:
1948   // it means "number of active (non-free) chunks", so merging free chunks
1949   // should not affect that count.
1950 
1951   // At the end of a chunk merge, run verification tests.
1952   if (metaspace_slow_verify) {
1953     DEBUG_ONLY(this->locked_verify());
1954     DEBUG_ONLY(vsn->verify());
1955   }
1956 
1957   return true;
1958 }
1959 
1960 // Remove all chunks in the given area - the chunks are supposed to be free -
1961 // from their corresponding freelists. Mark them as invalid.
1962 // - This does not correct the occupancy map.
1963 // - This does not adjust the counters in ChunkManager.
1964 // - Does not adjust container count counter in containing VirtualSpaceNode
1965 // Returns number of chunks removed.
1966 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1967   assert(p != NULL && word_size > 0, "Invalid range.");
1968   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1969   assert_is_aligned(word_size, smallest_chunk_size);
1970 
1971   Metachunk* const start = (Metachunk*) p;
1972   const Metachunk* const end = (Metachunk*)(p + word_size);
1973   Metachunk* cur = start;
1974   int num_removed = 0;
1975   while (cur < end) {
1976     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1977     DEBUG_ONLY(do_verify_chunk(cur));
1978     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1979     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1980     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1981       (is_class() ? "class space" : "metaspace"),
1982       cur, cur->word_size() * sizeof(MetaWord));
1983     cur->remove_sentinel();
1984     // Note: cannot call ChunkManager::remove_chunk, because that
1985     // modifies the counters in ChunkManager, which we do not want. So
1986     // we call remove_chunk on the freelist directly (see also the
1987     // splitting function which does the same).
1988     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1989     list->remove_chunk(cur);
1990     num_removed ++;
1991     cur = next;
1992   }
1993   return num_removed;
1994 }
1995 
1996 // Walk the list of VirtualSpaceNodes and delete
1997 // nodes with a 0 container_count.  Remove Metachunks in
1998 // the node from their respective freelists.
1999 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2000   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2001   assert_lock_strong(SpaceManager::expand_lock());
2002   // Don't use a VirtualSpaceListIterator because this
2003   // list is being changed and a straightforward use of an iterator is not safe.
2004   VirtualSpaceNode* purged_vsl = NULL;
2005   VirtualSpaceNode* prev_vsl = virtual_space_list();
2006   VirtualSpaceNode* next_vsl = prev_vsl;
2007   while (next_vsl != NULL) {
2008     VirtualSpaceNode* vsl = next_vsl;
2009     DEBUG_ONLY(vsl->verify_container_count();)
2010     next_vsl = vsl->next();
2011     // Don't free the current virtual space since it will likely
2012     // be needed soon.
2013     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2014       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2015                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2016       // Unlink it from the list
2017       if (prev_vsl == vsl) {
2018         // This is the case of the current node being the first node.
2019         assert(vsl == virtual_space_list(), "Expected to be the first node");
2020         set_virtual_space_list(vsl->next());
2021       } else {
2022         prev_vsl->set_next(vsl->next());
2023       }
2024 
2025       vsl->purge(chunk_manager);
2026       dec_reserved_words(vsl->reserved_words());
2027       dec_committed_words(vsl->committed_words());
2028       dec_virtual_space_count();
2029       purged_vsl = vsl;
2030       delete vsl;
2031     } else {
2032       prev_vsl = vsl;
2033     }
2034   }
2035 #ifdef ASSERT
2036   if (purged_vsl != NULL) {
2037     // List should be stable enough to use an iterator here.
2038     VirtualSpaceListIterator iter(virtual_space_list());
2039     while (iter.repeat()) {
2040       VirtualSpaceNode* vsl = iter.get_next();
2041       assert(vsl != purged_vsl, "Purge of vsl failed");
2042     }
2043   }
2044 #endif
2045 }
2046 
2047 
2048 // This function looks at the mmap regions in the metaspace without locking.
2049 // The chunks are added with store ordering and not deleted except for at
2050 // unloading time during a safepoint.
2051 bool VirtualSpaceList::contains(const void* ptr) {
2052   // List should be stable enough to use an iterator here because removing virtual
2053   // space nodes is only allowed at a safepoint.
2054   VirtualSpaceListIterator iter(virtual_space_list());
2055   while (iter.repeat()) {
2056     VirtualSpaceNode* vsn = iter.get_next();
2057     if (vsn->contains(ptr)) {
2058       return true;
2059     }
2060   }
2061   return false;
2062 }
2063 
2064 void VirtualSpaceList::retire_current_virtual_space() {
2065   assert_lock_strong(SpaceManager::expand_lock());
2066 
2067   VirtualSpaceNode* vsn = current_virtual_space();
2068 
2069   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2070                                   Metaspace::chunk_manager_metadata();
2071 
2072   vsn->retire(cm);
2073 }
2074 
2075 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2076   DEBUG_ONLY(verify_container_count();)
2077   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2078   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2079     ChunkIndex index = (ChunkIndex)i;
2080     size_t chunk_size = chunk_manager->size_by_index(index);
2081 
2082     while (free_words_in_vs() >= chunk_size) {
2083       Metachunk* chunk = get_chunk_vs(chunk_size);
2084       // Chunk will be allocated aligned, so allocation may require
2085       // additional padding chunks. That may cause above allocation to
2086       // fail. Just ignore the failed allocation and continue with the
2087       // next smaller chunk size. As the VirtualSpaceNode comitted
2088       // size should be a multiple of the smallest chunk size, we
2089       // should always be able to fill the VirtualSpace completely.
2090       if (chunk == NULL) {
2091         break;
2092       }
2093       chunk_manager->return_single_chunk(index, chunk);
2094     }
2095     DEBUG_ONLY(verify_container_count();)
2096   }
2097   assert(free_words_in_vs() == 0, "should be empty now");
2098 }
2099 
2100 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2101                                    _is_class(false),
2102                                    _virtual_space_list(NULL),
2103                                    _current_virtual_space(NULL),
2104                                    _reserved_words(0),
2105                                    _committed_words(0),
2106                                    _virtual_space_count(0) {
2107   MutexLockerEx cl(SpaceManager::expand_lock(),
2108                    Mutex::_no_safepoint_check_flag);
2109   create_new_virtual_space(word_size);
2110 }
2111 
2112 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2113                                    _is_class(true),
2114                                    _virtual_space_list(NULL),
2115                                    _current_virtual_space(NULL),
2116                                    _reserved_words(0),
2117                                    _committed_words(0),
2118                                    _virtual_space_count(0) {
2119   MutexLockerEx cl(SpaceManager::expand_lock(),
2120                    Mutex::_no_safepoint_check_flag);
2121   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2122   bool succeeded = class_entry->initialize();
2123   if (succeeded) {
2124     link_vs(class_entry);
2125   }
2126 }
2127 
2128 size_t VirtualSpaceList::free_bytes() {
2129   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2130 }
2131 
2132 // Allocate another meta virtual space and add it to the list.
2133 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2134   assert_lock_strong(SpaceManager::expand_lock());
2135 
2136   if (is_class()) {
2137     assert(false, "We currently don't support more than one VirtualSpace for"
2138                   " the compressed class space. The initialization of the"
2139                   " CCS uses another code path and should not hit this path.");
2140     return false;
2141   }
2142 
2143   if (vs_word_size == 0) {
2144     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2145     return false;
2146   }
2147 
2148   // Reserve the space
2149   size_t vs_byte_size = vs_word_size * BytesPerWord;
2150   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2151 
2152   // Allocate the meta virtual space and initialize it.
2153   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2154   if (!new_entry->initialize()) {
2155     delete new_entry;
2156     return false;
2157   } else {
2158     assert(new_entry->reserved_words() == vs_word_size,
2159         "Reserved memory size differs from requested memory size");
2160     // ensure lock-free iteration sees fully initialized node
2161     OrderAccess::storestore();
2162     link_vs(new_entry);
2163     return true;
2164   }
2165 }
2166 
2167 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2168   if (virtual_space_list() == NULL) {
2169       set_virtual_space_list(new_entry);
2170   } else {
2171     current_virtual_space()->set_next(new_entry);
2172   }
2173   set_current_virtual_space(new_entry);
2174   inc_reserved_words(new_entry->reserved_words());
2175   inc_committed_words(new_entry->committed_words());
2176   inc_virtual_space_count();
2177 #ifdef ASSERT
2178   new_entry->mangle();
2179 #endif
2180   LogTarget(Trace, gc, metaspace) lt;
2181   if (lt.is_enabled()) {
2182     LogStream ls(lt);
2183     VirtualSpaceNode* vsl = current_virtual_space();
2184     ResourceMark rm;
2185     vsl->print_on(&ls);
2186   }
2187 }
2188 
2189 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2190                                       size_t min_words,
2191                                       size_t preferred_words) {
2192   size_t before = node->committed_words();
2193 
2194   bool result = node->expand_by(min_words, preferred_words);
2195 
2196   size_t after = node->committed_words();
2197 
2198   // after and before can be the same if the memory was pre-committed.
2199   assert(after >= before, "Inconsistency");
2200   inc_committed_words(after - before);
2201 
2202   return result;
2203 }
2204 
2205 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2206   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2207   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2208   assert(min_words <= preferred_words, "Invalid arguments");
2209 
2210   const char* const class_or_not = (is_class() ? "class" : "non-class");
2211 
2212   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2213     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2214               class_or_not);
2215     return  false;
2216   }
2217 
2218   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2219   if (allowed_expansion_words < min_words) {
2220     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2221               class_or_not);
2222     return false;
2223   }
2224 
2225   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2226 
2227   // Commit more memory from the the current virtual space.
2228   bool vs_expanded = expand_node_by(current_virtual_space(),
2229                                     min_words,
2230                                     max_expansion_words);
2231   if (vs_expanded) {
2232      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2233                class_or_not);
2234      return true;
2235   }
2236   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2237             class_or_not);
2238   retire_current_virtual_space();
2239 
2240   // Get another virtual space.
2241   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2242   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2243 
2244   if (create_new_virtual_space(grow_vs_words)) {
2245     if (current_virtual_space()->is_pre_committed()) {
2246       // The memory was pre-committed, so we are done here.
2247       assert(min_words <= current_virtual_space()->committed_words(),
2248           "The new VirtualSpace was pre-committed, so it"
2249           "should be large enough to fit the alloc request.");
2250       return true;
2251     }
2252 
2253     return expand_node_by(current_virtual_space(),
2254                           min_words,
2255                           max_expansion_words);
2256   }
2257 
2258   return false;
2259 }
2260 
2261 // Given a chunk, calculate the largest possible padding space which
2262 // could be required when allocating it.
2263 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2264   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2265   if (chunk_type != HumongousIndex) {
2266     // Normal, non-humongous chunks are allocated at chunk size
2267     // boundaries, so the largest padding space required would be that
2268     // minus the smallest chunk size.
2269     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2270     return chunk_word_size - smallest_chunk_size;
2271   } else {
2272     // Humongous chunks are allocated at smallest-chunksize
2273     // boundaries, so there is no padding required.
2274     return 0;
2275   }
2276 }
2277 
2278 
2279 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2280 
2281   // Allocate a chunk out of the current virtual space.
2282   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2283 
2284   if (next != NULL) {
2285     return next;
2286   }
2287 
2288   // The expand amount is currently only determined by the requested sizes
2289   // and not how much committed memory is left in the current virtual space.
2290 
2291   // We must have enough space for the requested size and any
2292   // additional reqired padding chunks.
2293   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2294 
2295   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2296   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2297   if (min_word_size >= preferred_word_size) {
2298     // Can happen when humongous chunks are allocated.
2299     preferred_word_size = min_word_size;
2300   }
2301 
2302   bool expanded = expand_by(min_word_size, preferred_word_size);
2303   if (expanded) {
2304     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2305     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2306   }
2307 
2308    return next;
2309 }
2310 
2311 void VirtualSpaceList::print_on(outputStream* st) const {
2312   VirtualSpaceListIterator iter(virtual_space_list());
2313   while (iter.repeat()) {
2314     VirtualSpaceNode* node = iter.get_next();
2315     node->print_on(st);
2316   }
2317 }
2318 
2319 void VirtualSpaceList::print_map(outputStream* st) const {
2320   VirtualSpaceNode* list = virtual_space_list();
2321   VirtualSpaceListIterator iter(list);
2322   unsigned i = 0;
2323   while (iter.repeat()) {
2324     st->print_cr("Node %u:", i);
2325     VirtualSpaceNode* node = iter.get_next();
2326     node->print_map(st, this->is_class());
2327     i ++;
2328   }
2329 }
2330 
2331 // MetaspaceGC methods
2332 
2333 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2334 // Within the VM operation after the GC the attempt to allocate the metadata
2335 // should succeed.  If the GC did not free enough space for the metaspace
2336 // allocation, the HWM is increased so that another virtualspace will be
2337 // allocated for the metadata.  With perm gen the increase in the perm
2338 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2339 // metaspace policy uses those as the small and large steps for the HWM.
2340 //
2341 // After the GC the compute_new_size() for MetaspaceGC is called to
2342 // resize the capacity of the metaspaces.  The current implementation
2343 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2344 // to resize the Java heap by some GC's.  New flags can be implemented
2345 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2346 // free space is desirable in the metaspace capacity to decide how much
2347 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2348 // free space is desirable in the metaspace capacity before decreasing
2349 // the HWM.
2350 
2351 // Calculate the amount to increase the high water mark (HWM).
2352 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2353 // another expansion is not requested too soon.  If that is not
2354 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2355 // If that is still not enough, expand by the size of the allocation
2356 // plus some.
2357 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2358   size_t min_delta = MinMetaspaceExpansion;
2359   size_t max_delta = MaxMetaspaceExpansion;
2360   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2361 
2362   if (delta <= min_delta) {
2363     delta = min_delta;
2364   } else if (delta <= max_delta) {
2365     // Don't want to hit the high water mark on the next
2366     // allocation so make the delta greater than just enough
2367     // for this allocation.
2368     delta = max_delta;
2369   } else {
2370     // This allocation is large but the next ones are probably not
2371     // so increase by the minimum.
2372     delta = delta + min_delta;
2373   }
2374 
2375   assert_is_aligned(delta, Metaspace::commit_alignment());
2376 
2377   return delta;
2378 }
2379 
2380 size_t MetaspaceGC::capacity_until_GC() {
2381   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2382   assert(value >= MetaspaceSize, "Not initialized properly?");
2383   return value;
2384 }
2385 
2386 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2387   assert_is_aligned(v, Metaspace::commit_alignment());
2388 
2389   intptr_t capacity_until_GC = _capacity_until_GC;
2390   intptr_t new_value = capacity_until_GC + v;
2391 
2392   if (new_value < capacity_until_GC) {
2393     // The addition wrapped around, set new_value to aligned max value.
2394     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2395   }
2396 
2397   intptr_t expected = _capacity_until_GC;
2398   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2399 
2400   if (expected != actual) {
2401     return false;
2402   }
2403 
2404   if (new_cap_until_GC != NULL) {
2405     *new_cap_until_GC = new_value;
2406   }
2407   if (old_cap_until_GC != NULL) {
2408     *old_cap_until_GC = capacity_until_GC;
2409   }
2410   return true;
2411 }
2412 
2413 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2414   assert_is_aligned(v, Metaspace::commit_alignment());
2415 
2416   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2417 }
2418 
2419 void MetaspaceGC::initialize() {
2420   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2421   // we can't do a GC during initialization.
2422   _capacity_until_GC = MaxMetaspaceSize;
2423 }
2424 
2425 void MetaspaceGC::post_initialize() {
2426   // Reset the high-water mark once the VM initialization is done.
2427   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2428 }
2429 
2430 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2431   // Check if the compressed class space is full.
2432   if (is_class && Metaspace::using_class_space()) {
2433     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2434     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2435       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2436                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2437       return false;
2438     }
2439   }
2440 
2441   // Check if the user has imposed a limit on the metaspace memory.
2442   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2443   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2444     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2445               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2446     return false;
2447   }
2448 
2449   return true;
2450 }
2451 
2452 size_t MetaspaceGC::allowed_expansion() {
2453   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2454   size_t capacity_until_gc = capacity_until_GC();
2455 
2456   assert(capacity_until_gc >= committed_bytes,
2457          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2458          capacity_until_gc, committed_bytes);
2459 
2460   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2461   size_t left_until_GC = capacity_until_gc - committed_bytes;
2462   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2463   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2464             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2465             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2466 
2467   return left_to_commit / BytesPerWord;
2468 }
2469 
2470 void MetaspaceGC::compute_new_size() {
2471   assert(_shrink_factor <= 100, "invalid shrink factor");
2472   uint current_shrink_factor = _shrink_factor;
2473   _shrink_factor = 0;
2474 
2475   // Using committed_bytes() for used_after_gc is an overestimation, since the
2476   // chunk free lists are included in committed_bytes() and the memory in an
2477   // un-fragmented chunk free list is available for future allocations.
2478   // However, if the chunk free lists becomes fragmented, then the memory may
2479   // not be available for future allocations and the memory is therefore "in use".
2480   // Including the chunk free lists in the definition of "in use" is therefore
2481   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2482   // shrink below committed_bytes() and this has caused serious bugs in the past.
2483   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2484   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2485 
2486   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2487   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2488 
2489   const double min_tmp = used_after_gc / maximum_used_percentage;
2490   size_t minimum_desired_capacity =
2491     (size_t)MIN2(min_tmp, double(max_uintx));
2492   // Don't shrink less than the initial generation size
2493   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2494                                   MetaspaceSize);
2495 
2496   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2497   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2498                            minimum_free_percentage, maximum_used_percentage);
2499   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2500 
2501 
2502   size_t shrink_bytes = 0;
2503   if (capacity_until_GC < minimum_desired_capacity) {
2504     // If we have less capacity below the metaspace HWM, then
2505     // increment the HWM.
2506     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2507     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2508     // Don't expand unless it's significant
2509     if (expand_bytes >= MinMetaspaceExpansion) {
2510       size_t new_capacity_until_GC = 0;
2511       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2512       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2513 
2514       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2515                                                new_capacity_until_GC,
2516                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2517       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2518                                minimum_desired_capacity / (double) K,
2519                                expand_bytes / (double) K,
2520                                MinMetaspaceExpansion / (double) K,
2521                                new_capacity_until_GC / (double) K);
2522     }
2523     return;
2524   }
2525 
2526   // No expansion, now see if we want to shrink
2527   // We would never want to shrink more than this
2528   assert(capacity_until_GC >= minimum_desired_capacity,
2529          SIZE_FORMAT " >= " SIZE_FORMAT,
2530          capacity_until_GC, minimum_desired_capacity);
2531   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2532 
2533   // Should shrinking be considered?
2534   if (MaxMetaspaceFreeRatio < 100) {
2535     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2536     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2537     const double max_tmp = used_after_gc / minimum_used_percentage;
2538     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2539     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2540                                     MetaspaceSize);
2541     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2542                              maximum_free_percentage, minimum_used_percentage);
2543     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2544                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2545 
2546     assert(minimum_desired_capacity <= maximum_desired_capacity,
2547            "sanity check");
2548 
2549     if (capacity_until_GC > maximum_desired_capacity) {
2550       // Capacity too large, compute shrinking size
2551       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2552       // We don't want shrink all the way back to initSize if people call
2553       // System.gc(), because some programs do that between "phases" and then
2554       // we'd just have to grow the heap up again for the next phase.  So we
2555       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2556       // on the third call, and 100% by the fourth call.  But if we recompute
2557       // size without shrinking, it goes back to 0%.
2558       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2559 
2560       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2561 
2562       assert(shrink_bytes <= max_shrink_bytes,
2563              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2564              shrink_bytes, max_shrink_bytes);
2565       if (current_shrink_factor == 0) {
2566         _shrink_factor = 10;
2567       } else {
2568         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2569       }
2570       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2571                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2572       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2573                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2574     }
2575   }
2576 
2577   // Don't shrink unless it's significant
2578   if (shrink_bytes >= MinMetaspaceExpansion &&
2579       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2580     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2581     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2582                                              new_capacity_until_GC,
2583                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2584   }
2585 }
2586 
2587 // Metadebug methods
2588 
2589 void Metadebug::init_allocation_fail_alot_count() {
2590   if (MetadataAllocationFailALot) {
2591     _allocation_fail_alot_count =
2592       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2593   }
2594 }
2595 
2596 #ifdef ASSERT
2597 bool Metadebug::test_metadata_failure() {
2598   if (MetadataAllocationFailALot &&
2599       Threads::is_vm_complete()) {
2600     if (_allocation_fail_alot_count > 0) {
2601       _allocation_fail_alot_count--;
2602     } else {
2603       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2604       init_allocation_fail_alot_count();
2605       return true;
2606     }
2607   }
2608   return false;
2609 }
2610 #endif
2611 
2612 // ChunkManager methods
2613 size_t ChunkManager::free_chunks_total_words() {
2614   return _free_chunks_total;
2615 }
2616 
2617 size_t ChunkManager::free_chunks_total_bytes() {
2618   return free_chunks_total_words() * BytesPerWord;
2619 }
2620 
2621 // Update internal accounting after a chunk was added
2622 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2623   assert_lock_strong(SpaceManager::expand_lock());
2624   _free_chunks_count ++;
2625   _free_chunks_total += c->word_size();
2626 }
2627 
2628 // Update internal accounting after a chunk was removed
2629 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2630   assert_lock_strong(SpaceManager::expand_lock());
2631   assert(_free_chunks_count >= 1,
2632     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2633   assert(_free_chunks_total >= c->word_size(),
2634     "ChunkManager::_free_chunks_total: about to go negative"
2635      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2636   _free_chunks_count --;
2637   _free_chunks_total -= c->word_size();
2638 }
2639 
2640 size_t ChunkManager::free_chunks_count() {
2641 #ifdef ASSERT
2642   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2643     MutexLockerEx cl(SpaceManager::expand_lock(),
2644                      Mutex::_no_safepoint_check_flag);
2645     // This lock is only needed in debug because the verification
2646     // of the _free_chunks_totals walks the list of free chunks
2647     slow_locked_verify_free_chunks_count();
2648   }
2649 #endif
2650   return _free_chunks_count;
2651 }
2652 
2653 ChunkIndex ChunkManager::list_index(size_t size) {
2654   if (size_by_index(SpecializedIndex) == size) {
2655     return SpecializedIndex;
2656   }
2657   if (size_by_index(SmallIndex) == size) {
2658     return SmallIndex;
2659   }
2660   const size_t med_size = size_by_index(MediumIndex);
2661   if (med_size == size) {
2662     return MediumIndex;
2663   }
2664 
2665   assert(size > med_size, "Not a humongous chunk");
2666   return HumongousIndex;
2667 }
2668 
2669 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2670   index_bounds_check(index);
2671   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2672   return _free_chunks[index].size();
2673 }
2674 
2675 void ChunkManager::locked_verify_free_chunks_total() {
2676   assert_lock_strong(SpaceManager::expand_lock());
2677   assert(sum_free_chunks() == _free_chunks_total,
2678          "_free_chunks_total " SIZE_FORMAT " is not the"
2679          " same as sum " SIZE_FORMAT, _free_chunks_total,
2680          sum_free_chunks());
2681 }
2682 
2683 void ChunkManager::verify_free_chunks_total() {
2684   MutexLockerEx cl(SpaceManager::expand_lock(),
2685                      Mutex::_no_safepoint_check_flag);
2686   locked_verify_free_chunks_total();
2687 }
2688 
2689 void ChunkManager::locked_verify_free_chunks_count() {
2690   assert_lock_strong(SpaceManager::expand_lock());
2691   assert(sum_free_chunks_count() == _free_chunks_count,
2692          "_free_chunks_count " SIZE_FORMAT " is not the"
2693          " same as sum " SIZE_FORMAT, _free_chunks_count,
2694          sum_free_chunks_count());
2695 }
2696 
2697 void ChunkManager::verify_free_chunks_count() {
2698 #ifdef ASSERT
2699   MutexLockerEx cl(SpaceManager::expand_lock(),
2700                      Mutex::_no_safepoint_check_flag);
2701   locked_verify_free_chunks_count();
2702 #endif
2703 }
2704 
2705 void ChunkManager::verify() {
2706   MutexLockerEx cl(SpaceManager::expand_lock(),
2707                      Mutex::_no_safepoint_check_flag);
2708   locked_verify();
2709 }
2710 
2711 void ChunkManager::locked_verify() {
2712   locked_verify_free_chunks_count();
2713   locked_verify_free_chunks_total();
2714   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2715     ChunkList* list = free_chunks(i);
2716     if (list != NULL) {
2717       Metachunk* chunk = list->head();
2718       while (chunk) {
2719         DEBUG_ONLY(do_verify_chunk(chunk);)
2720         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2721         chunk = chunk->next();
2722       }
2723     }
2724   }
2725 }
2726 
2727 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2728   assert_lock_strong(SpaceManager::expand_lock());
2729   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2730                 _free_chunks_total, _free_chunks_count);
2731 }
2732 
2733 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2734   assert_lock_strong(SpaceManager::expand_lock());
2735   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2736                 sum_free_chunks(), sum_free_chunks_count());
2737 }
2738 
2739 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2740   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2741          "Bad index: %d", (int)index);
2742 
2743   return &_free_chunks[index];
2744 }
2745 
2746 // These methods that sum the free chunk lists are used in printing
2747 // methods that are used in product builds.
2748 size_t ChunkManager::sum_free_chunks() {
2749   assert_lock_strong(SpaceManager::expand_lock());
2750   size_t result = 0;
2751   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2752     ChunkList* list = free_chunks(i);
2753 
2754     if (list == NULL) {
2755       continue;
2756     }
2757 
2758     result = result + list->count() * list->size();
2759   }
2760   result = result + humongous_dictionary()->total_size();
2761   return result;
2762 }
2763 
2764 size_t ChunkManager::sum_free_chunks_count() {
2765   assert_lock_strong(SpaceManager::expand_lock());
2766   size_t count = 0;
2767   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2768     ChunkList* list = free_chunks(i);
2769     if (list == NULL) {
2770       continue;
2771     }
2772     count = count + list->count();
2773   }
2774   count = count + humongous_dictionary()->total_free_blocks();
2775   return count;
2776 }
2777 
2778 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2779   ChunkIndex index = list_index(word_size);
2780   assert(index < HumongousIndex, "No humongous list");
2781   return free_chunks(index);
2782 }
2783 
2784 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2785 // split up the larger chunk into n smaller chunks, at least one of which should be
2786 // the target chunk of target chunk size. The smaller chunks, including the target
2787 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2788 // Note that this chunk is supposed to be removed from the freelist right away.
2789 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2790   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2791 
2792   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2793   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2794 
2795   MetaWord* const region_start = (MetaWord*)larger_chunk;
2796   const size_t region_word_len = larger_chunk->word_size();
2797   MetaWord* const region_end = region_start + region_word_len;
2798   VirtualSpaceNode* const vsn = larger_chunk->container();
2799   OccupancyMap* const ocmap = vsn->occupancy_map();
2800 
2801   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2802   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2803   // at an address suitable to place the smaller target chunk.
2804   assert_is_aligned(region_start, target_chunk_word_size);
2805 
2806   // Remove old chunk.
2807   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2808   larger_chunk->remove_sentinel();
2809 
2810   // Prevent access to the old chunk from here on.
2811   larger_chunk = NULL;
2812   // ... and wipe it.
2813   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2814 
2815   // In its place create first the target chunk...
2816   MetaWord* p = region_start;
2817   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2818   assert(target_chunk == (Metachunk*)p, "Sanity");
2819   target_chunk->set_origin(origin_split);
2820 
2821   // Note: we do not need to mark its start in the occupancy map
2822   // because it coincides with the old chunk start.
2823 
2824   // Mark chunk as free and return to the freelist.
2825   do_update_in_use_info_for_chunk(target_chunk, false);
2826   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2827 
2828   // This chunk should now be valid and can be verified.
2829   DEBUG_ONLY(do_verify_chunk(target_chunk));
2830 
2831   // In the remaining space create the remainder chunks.
2832   p += target_chunk->word_size();
2833   assert(p < region_end, "Sanity");
2834 
2835   while (p < region_end) {
2836 
2837     // Find the largest chunk size which fits the alignment requirements at address p.
2838     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2839     size_t this_chunk_word_size = 0;
2840     for(;;) {
2841       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2842       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2843         break;
2844       } else {
2845         this_chunk_index = prev_chunk_index(this_chunk_index);
2846         assert(this_chunk_index >= target_chunk_index, "Sanity");
2847       }
2848     }
2849 
2850     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2851     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2852     assert(p + this_chunk_word_size <= region_end, "Sanity");
2853 
2854     // Create splitting chunk.
2855     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2856     assert(this_chunk == (Metachunk*)p, "Sanity");
2857     this_chunk->set_origin(origin_split);
2858     ocmap->set_chunk_starts_at_address(p, true);
2859     do_update_in_use_info_for_chunk(this_chunk, false);
2860 
2861     // This chunk should be valid and can be verified.
2862     DEBUG_ONLY(do_verify_chunk(this_chunk));
2863 
2864     // Return this chunk to freelist and correct counter.
2865     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2866     _free_chunks_count ++;
2867 
2868     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2869       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2870       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2871       p2i(region_start), p2i(region_end));
2872 
2873     p += this_chunk_word_size;
2874 
2875   }
2876 
2877   return target_chunk;
2878 }
2879 
2880 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2881   assert_lock_strong(SpaceManager::expand_lock());
2882 
2883   slow_locked_verify();
2884 
2885   Metachunk* chunk = NULL;
2886   bool we_did_split_a_chunk = false;
2887 
2888   if (list_index(word_size) != HumongousIndex) {
2889 
2890     ChunkList* free_list = find_free_chunks_list(word_size);
2891     assert(free_list != NULL, "Sanity check");
2892 
2893     chunk = free_list->head();
2894 
2895     if (chunk == NULL) {
2896       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2897       // This is the counterpart of the coalescing-upon-chunk-return.
2898 
2899       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2900 
2901       // Is there a larger chunk we could split?
2902       Metachunk* larger_chunk = NULL;
2903       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2904       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2905         larger_chunk = free_chunks(larger_chunk_index)->head();
2906         if (larger_chunk == NULL) {
2907           larger_chunk_index = next_chunk_index(larger_chunk_index);
2908         }
2909       }
2910 
2911       if (larger_chunk != NULL) {
2912         assert(larger_chunk->word_size() > word_size, "Sanity");
2913         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2914 
2915         // We found a larger chunk. Lets split it up:
2916         // - remove old chunk
2917         // - in its place, create new smaller chunks, with at least one chunk
2918         //   being of target size, the others sized as large as possible. This
2919         //   is to make sure the resulting chunks are "as coalesced as possible"
2920         //   (similar to VirtualSpaceNode::retire()).
2921         // Note: during this operation both ChunkManager and VirtualSpaceNode
2922         //  are temporarily invalid, so be careful with asserts.
2923 
2924         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2925            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2926           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2927           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2928 
2929         chunk = split_chunk(word_size, larger_chunk);
2930 
2931         // This should have worked.
2932         assert(chunk != NULL, "Sanity");
2933         assert(chunk->word_size() == word_size, "Sanity");
2934         assert(chunk->is_tagged_free(), "Sanity");
2935 
2936         we_did_split_a_chunk = true;
2937 
2938       }
2939     }
2940 
2941     if (chunk == NULL) {
2942       return NULL;
2943     }
2944 
2945     // Remove the chunk as the head of the list.
2946     free_list->remove_chunk(chunk);
2947 
2948     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2949                                        p2i(free_list), free_list->count());
2950 
2951   } else {
2952     chunk = humongous_dictionary()->get_chunk(word_size);
2953 
2954     if (chunk == NULL) {
2955       return NULL;
2956     }
2957 
2958     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2959                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2960   }
2961 
2962   // Chunk has been removed from the chunk manager; update counters.
2963   account_for_removed_chunk(chunk);
2964   do_update_in_use_info_for_chunk(chunk, true);
2965   chunk->container()->inc_container_count();
2966   chunk->inc_use_count();
2967 
2968   // Remove it from the links to this freelist
2969   chunk->set_next(NULL);
2970   chunk->set_prev(NULL);
2971 
2972   // Run some verifications (some more if we did a chunk split)
2973 #ifdef ASSERT
2974   if (metaspace_slow_verify) {
2975     locked_verify();
2976     VirtualSpaceNode* const vsn = chunk->container();
2977     vsn->verify();
2978     if (we_did_split_a_chunk) {
2979       vsn->verify_free_chunks_are_ideally_merged();
2980     }
2981   }
2982 #endif
2983 
2984   return chunk;
2985 }
2986 
2987 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2988   assert_lock_strong(SpaceManager::expand_lock());
2989   slow_locked_verify();
2990 
2991   // Take from the beginning of the list
2992   Metachunk* chunk = free_chunks_get(word_size);
2993   if (chunk == NULL) {
2994     return NULL;
2995   }
2996 
2997   assert((word_size <= chunk->word_size()) ||
2998          (list_index(chunk->word_size()) == HumongousIndex),
2999          "Non-humongous variable sized chunk");
3000   LogTarget(Debug, gc, metaspace, freelist) lt;
3001   if (lt.is_enabled()) {
3002     size_t list_count;
3003     if (list_index(word_size) < HumongousIndex) {
3004       ChunkList* list = find_free_chunks_list(word_size);
3005       list_count = list->count();
3006     } else {
3007       list_count = humongous_dictionary()->total_count();
3008     }
3009     LogStream ls(lt);
3010     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3011              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3012     ResourceMark rm;
3013     locked_print_free_chunks(&ls);
3014   }
3015 
3016   return chunk;
3017 }
3018 
3019 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3020   assert_lock_strong(SpaceManager::expand_lock());
3021   DEBUG_ONLY(do_verify_chunk(chunk);)
3022   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3023   assert(chunk != NULL, "Expected chunk.");
3024   assert(chunk->container() != NULL, "Container should have been set.");
3025   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3026   index_bounds_check(index);
3027 
3028   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3029   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3030   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3031   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3032 
3033   if (index != HumongousIndex) {
3034     // Return non-humongous chunk to freelist.
3035     ChunkList* list = free_chunks(index);
3036     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3037     list->return_chunk_at_head(chunk);
3038     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3039         chunk_size_name(index), p2i(chunk));
3040   } else {
3041     // Return humongous chunk to dictionary.
3042     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3043     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3044            "Humongous chunk has wrong alignment.");
3045     _humongous_dictionary.return_chunk(chunk);
3046     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3047         chunk_size_name(index), p2i(chunk), chunk->word_size());
3048   }
3049   chunk->container()->dec_container_count();
3050   do_update_in_use_info_for_chunk(chunk, false);
3051 
3052   // Chunk has been added; update counters.
3053   account_for_added_chunk(chunk);
3054 
3055   // Attempt coalesce returned chunks with its neighboring chunks:
3056   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3057   if (index == SmallIndex || index == SpecializedIndex) {
3058     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3059       // This did not work. But if this chunk is special, we still may form a small chunk?
3060       if (index == SpecializedIndex) {
3061         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3062           // give up.
3063         }
3064       }
3065     }
3066   }
3067 
3068 }
3069 
3070 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3071   index_bounds_check(index);
3072   if (chunks == NULL) {
3073     return;
3074   }
3075   LogTarget(Trace, gc, metaspace, freelist) log;
3076   if (log.is_enabled()) { // tracing
3077     log.print("returning list of %s chunks...", chunk_size_name(index));
3078   }
3079   unsigned num_chunks_returned = 0;
3080   size_t size_chunks_returned = 0;
3081   Metachunk* cur = chunks;
3082   while (cur != NULL) {
3083     // Capture the next link before it is changed
3084     // by the call to return_chunk_at_head();
3085     Metachunk* next = cur->next();
3086     if (log.is_enabled()) { // tracing
3087       num_chunks_returned ++;
3088       size_chunks_returned += cur->word_size();
3089     }
3090     return_single_chunk(index, cur);
3091     cur = next;
3092   }
3093   if (log.is_enabled()) { // tracing
3094     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3095         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3096     if (index != HumongousIndex) {
3097       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3098     } else {
3099       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3100     }
3101   }
3102 }
3103 
3104 void ChunkManager::print_on(outputStream* out) const {
3105   _humongous_dictionary.report_statistics(out);
3106 }
3107 
3108 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3109   assert_lock_strong(SpaceManager::expand_lock());
3110   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3111     stat->num_by_type[i] = num_free_chunks(i);
3112     stat->single_size_by_type[i] = size_by_index(i);
3113     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3114   }
3115   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3116   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3117 }
3118 
3119 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3120   MutexLockerEx cl(SpaceManager::expand_lock(),
3121                    Mutex::_no_safepoint_check_flag);
3122   locked_get_statistics(stat);
3123 }
3124 
3125 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3126   size_t total = 0;
3127   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3128 
3129   const char* unit = scale_unit(scale);
3130   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3131     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3132                    stat->num_by_type[i], chunk_size_name(i),
3133                    stat->single_size_by_type[i]);
3134     if (scale == 1) {
3135       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3136     } else {
3137       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3138     }
3139 
3140     total += stat->total_size_by_type[i];
3141   }
3142 
3143 
3144   total += stat->total_size_humongous_chunks;
3145 
3146   if (scale == 1) {
3147     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3148     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3149 
3150     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3151   } else {
3152     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3153     stat->num_humongous_chunks,
3154     (float)stat->total_size_humongous_chunks / scale, unit);
3155 
3156     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3157   }
3158 
3159 }
3160 
3161 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3162   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3163 
3164   // Note: keep lock protection only to retrieving statistics; keep printing
3165   // out of lock protection
3166   ChunkManagerStatistics stat;
3167   out->print_cr("Chunkmanager (non-class):");
3168   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3169   if (non_class_cm != NULL) {
3170     non_class_cm->get_statistics(&stat);
3171     ChunkManager::print_statistics(&stat, out, scale);
3172   } else {
3173     out->print_cr("unavailable.");
3174   }
3175   out->print_cr("Chunkmanager (class):");
3176   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3177   if (class_cm != NULL) {
3178     class_cm->get_statistics(&stat);
3179     ChunkManager::print_statistics(&stat, out, scale);
3180   } else {
3181     out->print_cr("unavailable.");
3182   }
3183 }
3184 
3185 // SpaceManager methods
3186 
3187 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3188   size_t chunk_sizes[] = {
3189       specialized_chunk_size(is_class_space),
3190       small_chunk_size(is_class_space),
3191       medium_chunk_size(is_class_space)
3192   };
3193 
3194   // Adjust up to one of the fixed chunk sizes ...
3195   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3196     if (requested <= chunk_sizes[i]) {
3197       return chunk_sizes[i];
3198     }
3199   }
3200 
3201   // ... or return the size as a humongous chunk.
3202   return requested;
3203 }
3204 
3205 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3206   return adjust_initial_chunk_size(requested, is_class());
3207 }
3208 
3209 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3210   size_t requested;
3211 
3212   if (is_class()) {
3213     switch (type) {
3214     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3215     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3216     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3217     default:                                 requested = ClassSmallChunk; break;
3218     }
3219   } else {
3220     switch (type) {
3221     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3222     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3223     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3224     default:                                 requested = SmallChunk; break;
3225     }
3226   }
3227 
3228   // Adjust to one of the fixed chunk sizes (unless humongous)
3229   const size_t adjusted = adjust_initial_chunk_size(requested);
3230 
3231   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3232          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3233 
3234   return adjusted;
3235 }
3236 
3237 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3238   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3239   size_t free = 0;
3240   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3241     Metachunk* chunk = chunks_in_use(i);
3242     while (chunk != NULL) {
3243       free += chunk->free_word_size();
3244       chunk = chunk->next();
3245     }
3246   }
3247   return free;
3248 }
3249 
3250 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3251   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3252   size_t result = 0;
3253   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3254    result += sum_waste_in_chunks_in_use(i);
3255   }
3256 
3257   return result;
3258 }
3259 
3260 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3261   size_t result = 0;
3262   Metachunk* chunk = chunks_in_use(index);
3263   // Count the free space in all the chunk but not the
3264   // current chunk from which allocations are still being done.
3265   while (chunk != NULL) {
3266     if (chunk != current_chunk()) {
3267       result += chunk->free_word_size();
3268     }
3269     chunk = chunk->next();
3270   }
3271   return result;
3272 }
3273 
3274 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3275   // For CMS use "allocated_chunks_words()" which does not need the
3276   // Metaspace lock.  For the other collectors sum over the
3277   // lists.  Use both methods as a check that "allocated_chunks_words()"
3278   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3279   // to use in the product and allocated_chunks_words() should be used
3280   // but allow for  checking that allocated_chunks_words() returns the same
3281   // value as sum_capacity_in_chunks_in_use() which is the definitive
3282   // answer.
3283   if (UseConcMarkSweepGC) {
3284     return allocated_chunks_words();
3285   } else {
3286     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3287     size_t sum = 0;
3288     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3289       Metachunk* chunk = chunks_in_use(i);
3290       while (chunk != NULL) {
3291         sum += chunk->word_size();
3292         chunk = chunk->next();
3293       }
3294     }
3295   return sum;
3296   }
3297 }
3298 
3299 size_t SpaceManager::sum_count_in_chunks_in_use() {
3300   size_t count = 0;
3301   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302     count = count + sum_count_in_chunks_in_use(i);
3303   }
3304 
3305   return count;
3306 }
3307 
3308 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3309   size_t count = 0;
3310   Metachunk* chunk = chunks_in_use(i);
3311   while (chunk != NULL) {
3312     count++;
3313     chunk = chunk->next();
3314   }
3315   return count;
3316 }
3317 
3318 
3319 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3320   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3321   size_t used = 0;
3322   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3323     Metachunk* chunk = chunks_in_use(i);
3324     while (chunk != NULL) {
3325       used += chunk->used_word_size();
3326       chunk = chunk->next();
3327     }
3328   }
3329   return used;
3330 }
3331 
3332 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3333 
3334   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3335     Metachunk* chunk = chunks_in_use(i);
3336     st->print("SpaceManager: %s " PTR_FORMAT,
3337                  chunk_size_name(i), p2i(chunk));
3338     if (chunk != NULL) {
3339       st->print_cr(" free " SIZE_FORMAT,
3340                    chunk->free_word_size());
3341     } else {
3342       st->cr();
3343     }
3344   }
3345 
3346   chunk_manager()->locked_print_free_chunks(st);
3347   chunk_manager()->locked_print_sum_free_chunks(st);
3348 }
3349 
3350 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3351 
3352   // Decide between a small chunk and a medium chunk.  Up to
3353   // _small_chunk_limit small chunks can be allocated.
3354   // After that a medium chunk is preferred.
3355   size_t chunk_word_size;
3356 
3357   // Special case for anonymous metadata space.
3358   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3359   // rarely about 4K (64-bits JVM).
3360   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3361   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3362   // reduces space waste from 60+% to around 30%.
3363   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3364       _mdtype == Metaspace::NonClassType &&
3365       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3366       word_size + Metachunk::overhead() <= SpecializedChunk) {
3367     return SpecializedChunk;
3368   }
3369 
3370   if (chunks_in_use(MediumIndex) == NULL &&
3371       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3372     chunk_word_size = (size_t) small_chunk_size();
3373     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3374       chunk_word_size = medium_chunk_size();
3375     }
3376   } else {
3377     chunk_word_size = medium_chunk_size();
3378   }
3379 
3380   // Might still need a humongous chunk.  Enforce
3381   // humongous allocations sizes to be aligned up to
3382   // the smallest chunk size.
3383   size_t if_humongous_sized_chunk =
3384     align_up(word_size + Metachunk::overhead(),
3385                   smallest_chunk_size());
3386   chunk_word_size =
3387     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3388 
3389   assert(!SpaceManager::is_humongous(word_size) ||
3390          chunk_word_size == if_humongous_sized_chunk,
3391          "Size calculation is wrong, word_size " SIZE_FORMAT
3392          " chunk_word_size " SIZE_FORMAT,
3393          word_size, chunk_word_size);
3394   Log(gc, metaspace, alloc) log;
3395   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3396     log.debug("Metadata humongous allocation:");
3397     log.debug("  word_size " PTR_FORMAT, word_size);
3398     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3399     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3400   }
3401   return chunk_word_size;
3402 }
3403 
3404 void SpaceManager::track_metaspace_memory_usage() {
3405   if (is_init_completed()) {
3406     if (is_class()) {
3407       MemoryService::track_compressed_class_memory_usage();
3408     }
3409     MemoryService::track_metaspace_memory_usage();
3410   }
3411 }
3412 
3413 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3414   assert(vs_list()->current_virtual_space() != NULL,
3415          "Should have been set");
3416   assert(current_chunk() == NULL ||
3417          current_chunk()->allocate(word_size) == NULL,
3418          "Don't need to expand");
3419   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3420 
3421   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3422     size_t words_left = 0;
3423     size_t words_used = 0;
3424     if (current_chunk() != NULL) {
3425       words_left = current_chunk()->free_word_size();
3426       words_used = current_chunk()->used_word_size();
3427     }
3428     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3429                                        word_size, words_used, words_left);
3430   }
3431 
3432   // Get another chunk
3433   size_t chunk_word_size = calc_chunk_size(word_size);
3434   Metachunk* next = get_new_chunk(chunk_word_size);
3435 
3436   MetaWord* mem = NULL;
3437 
3438   // If a chunk was available, add it to the in-use chunk list
3439   // and do an allocation from it.
3440   if (next != NULL) {
3441     // Add to this manager's list of chunks in use.
3442     add_chunk(next, false);
3443     mem = next->allocate(word_size);
3444   }
3445 
3446   // Track metaspace memory usage statistic.
3447   track_metaspace_memory_usage();
3448 
3449   return mem;
3450 }
3451 
3452 void SpaceManager::print_on(outputStream* st) const {
3453 
3454   for (ChunkIndex i = ZeroIndex;
3455        i < NumberOfInUseLists ;
3456        i = next_chunk_index(i) ) {
3457     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3458                  p2i(chunks_in_use(i)),
3459                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3460   }
3461   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3462                " Humongous " SIZE_FORMAT,
3463                sum_waste_in_chunks_in_use(SmallIndex),
3464                sum_waste_in_chunks_in_use(MediumIndex),
3465                sum_waste_in_chunks_in_use(HumongousIndex));
3466   // block free lists
3467   if (block_freelists() != NULL) {
3468     st->print_cr("total in block free lists " SIZE_FORMAT,
3469       block_freelists()->total_size());
3470   }
3471 }
3472 
3473 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3474                            Metaspace::MetaspaceType space_type,
3475                            Mutex* lock) :
3476   _mdtype(mdtype),
3477   _space_type(space_type),
3478   _allocated_blocks_words(0),
3479   _allocated_chunks_words(0),
3480   _allocated_chunks_count(0),
3481   _block_freelists(NULL),
3482   _lock(lock)
3483 {
3484   initialize();
3485 }
3486 
3487 void SpaceManager::inc_size_metrics(size_t words) {
3488   assert_lock_strong(SpaceManager::expand_lock());
3489   // Total of allocated Metachunks and allocated Metachunks count
3490   // for each SpaceManager
3491   _allocated_chunks_words = _allocated_chunks_words + words;
3492   _allocated_chunks_count++;
3493   // Global total of capacity in allocated Metachunks
3494   MetaspaceUtils::inc_capacity(mdtype(), words);
3495   // Global total of allocated Metablocks.
3496   // used_words_slow() includes the overhead in each
3497   // Metachunk so include it in the used when the
3498   // Metachunk is first added (so only added once per
3499   // Metachunk).
3500   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3501 }
3502 
3503 void SpaceManager::inc_used_metrics(size_t words) {
3504   // Add to the per SpaceManager total
3505   Atomic::add(words, &_allocated_blocks_words);
3506   // Add to the global total
3507   MetaspaceUtils::inc_used(mdtype(), words);
3508 }
3509 
3510 void SpaceManager::dec_total_from_size_metrics() {
3511   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3512   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3513   // Also deduct the overhead per Metachunk
3514   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3515 }
3516 
3517 void SpaceManager::initialize() {
3518   Metadebug::init_allocation_fail_alot_count();
3519   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3520     _chunks_in_use[i] = NULL;
3521   }
3522   _current_chunk = NULL;
3523   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3524 }
3525 
3526 SpaceManager::~SpaceManager() {
3527   // This call this->_lock which can't be done while holding expand_lock()
3528   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3529          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3530          " allocated_chunks_words() " SIZE_FORMAT,
3531          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3532 
3533   MutexLockerEx fcl(SpaceManager::expand_lock(),
3534                     Mutex::_no_safepoint_check_flag);
3535 
3536   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3537          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3538          " allocated_chunks_count() " SIZE_FORMAT,
3539          sum_count_in_chunks_in_use(), allocated_chunks_count());
3540 
3541   chunk_manager()->slow_locked_verify();
3542 
3543   dec_total_from_size_metrics();
3544 
3545   Log(gc, metaspace, freelist) log;
3546   if (log.is_trace()) {
3547     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3548     ResourceMark rm;
3549     LogStream ls(log.trace());
3550     locked_print_chunks_in_use_on(&ls);
3551     if (block_freelists() != NULL) {
3552       block_freelists()->print_on(&ls);
3553     }
3554   }
3555 
3556   // Add all the chunks in use by this space manager
3557   // to the global list of free chunks.
3558 
3559   // Follow each list of chunks-in-use and add them to the
3560   // free lists.  Each list is NULL terminated.
3561 
3562   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3563     Metachunk* chunks = chunks_in_use(i);
3564     chunk_manager()->return_chunk_list(i, chunks);
3565     set_chunks_in_use(i, NULL);
3566   }
3567 
3568   chunk_manager()->slow_locked_verify();
3569 
3570   if (_block_freelists != NULL) {
3571     delete _block_freelists;
3572   }
3573 }
3574 
3575 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3576   assert_lock_strong(_lock);
3577   // Allocations and deallocations are in raw_word_size
3578   size_t raw_word_size = get_allocation_word_size(word_size);
3579   // Lazily create a block_freelist
3580   if (block_freelists() == NULL) {
3581     _block_freelists = new BlockFreelist();
3582   }
3583   block_freelists()->return_block(p, raw_word_size);
3584 }
3585 
3586 // Adds a chunk to the list of chunks in use.
3587 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3588 
3589   assert(new_chunk != NULL, "Should not be NULL");
3590   assert(new_chunk->next() == NULL, "Should not be on a list");
3591 
3592   new_chunk->reset_empty();
3593 
3594   // Find the correct list and and set the current
3595   // chunk for that list.
3596   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3597 
3598   if (index != HumongousIndex) {
3599     retire_current_chunk();
3600     set_current_chunk(new_chunk);
3601     new_chunk->set_next(chunks_in_use(index));
3602     set_chunks_in_use(index, new_chunk);
3603   } else {
3604     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3605     // small, so small will be null.  Link this first chunk as the current
3606     // chunk.
3607     if (make_current) {
3608       // Set as the current chunk but otherwise treat as a humongous chunk.
3609       set_current_chunk(new_chunk);
3610     }
3611     // Link at head.  The _current_chunk only points to a humongous chunk for
3612     // the null class loader metaspace (class and data virtual space managers)
3613     // any humongous chunks so will not point to the tail
3614     // of the humongous chunks list.
3615     new_chunk->set_next(chunks_in_use(HumongousIndex));
3616     set_chunks_in_use(HumongousIndex, new_chunk);
3617 
3618     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3619   }
3620 
3621   // Add to the running sum of capacity
3622   inc_size_metrics(new_chunk->word_size());
3623 
3624   assert(new_chunk->is_empty(), "Not ready for reuse");
3625   Log(gc, metaspace, freelist) log;
3626   if (log.is_trace()) {
3627     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3628     ResourceMark rm;
3629     LogStream ls(log.trace());
3630     new_chunk->print_on(&ls);
3631     chunk_manager()->locked_print_free_chunks(&ls);
3632   }
3633 }
3634 
3635 void SpaceManager::retire_current_chunk() {
3636   if (current_chunk() != NULL) {
3637     size_t remaining_words = current_chunk()->free_word_size();
3638     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3639       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3640       deallocate(ptr, remaining_words);
3641       inc_used_metrics(remaining_words);
3642     }
3643   }
3644 }
3645 
3646 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3647   // Get a chunk from the chunk freelist
3648   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3649 
3650   if (next == NULL) {
3651     next = vs_list()->get_new_chunk(chunk_word_size,
3652                                     medium_chunk_bunch());
3653   }
3654 
3655   Log(gc, metaspace, alloc) log;
3656   if (log.is_debug() && next != NULL &&
3657       SpaceManager::is_humongous(next->word_size())) {
3658     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3659   }
3660 
3661   return next;
3662 }
3663 
3664 MetaWord* SpaceManager::allocate(size_t word_size) {
3665   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3666   size_t raw_word_size = get_allocation_word_size(word_size);
3667   BlockFreelist* fl =  block_freelists();
3668   MetaWord* p = NULL;
3669   // Allocation from the dictionary is expensive in the sense that
3670   // the dictionary has to be searched for a size.  Don't allocate
3671   // from the dictionary until it starts to get fat.  Is this
3672   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3673   // for allocations.  Do some profiling.  JJJ
3674   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3675     p = fl->get_block(raw_word_size);
3676   }
3677   if (p == NULL) {
3678     p = allocate_work(raw_word_size);
3679   }
3680 
3681   return p;
3682 }
3683 
3684 // Returns the address of spaced allocated for "word_size".
3685 // This methods does not know about blocks (Metablocks)
3686 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3687   assert_lock_strong(_lock);
3688 #ifdef ASSERT
3689   if (Metadebug::test_metadata_failure()) {
3690     return NULL;
3691   }
3692 #endif
3693   // Is there space in the current chunk?
3694   MetaWord* result = NULL;
3695 
3696   if (current_chunk() != NULL) {
3697     result = current_chunk()->allocate(word_size);
3698   }
3699 
3700   if (result == NULL) {
3701     result = grow_and_allocate(word_size);
3702   }
3703 
3704   if (result != NULL) {
3705     inc_used_metrics(word_size);
3706     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3707            "Head of the list is being allocated");
3708   }
3709 
3710   return result;
3711 }
3712 
3713 void SpaceManager::verify() {
3714   // If there are blocks in the dictionary, then
3715   // verification of chunks does not work since
3716   // being in the dictionary alters a chunk.
3717   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
3718     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3719       Metachunk* curr = chunks_in_use(i);
3720       while (curr != NULL) {
3721         DEBUG_ONLY(do_verify_chunk(curr);)
3722         assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3723         curr = curr->next();
3724       }
3725     }
3726   }
3727 }
3728 
3729 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3730   assert(is_humongous(chunk->word_size()) ||
3731          chunk->word_size() == medium_chunk_size() ||
3732          chunk->word_size() == small_chunk_size() ||
3733          chunk->word_size() == specialized_chunk_size(),
3734          "Chunk size is wrong");
3735   return;
3736 }
3737 
3738 #ifdef ASSERT
3739 void SpaceManager::verify_allocated_blocks_words() {
3740   // Verification is only guaranteed at a safepoint.
3741   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3742     "Verification can fail if the applications is running");
3743   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3744          "allocation total is not consistent " SIZE_FORMAT
3745          " vs " SIZE_FORMAT,
3746          allocated_blocks_words(), sum_used_in_chunks_in_use());
3747 }
3748 
3749 #endif
3750 
3751 void SpaceManager::dump(outputStream* const out) const {
3752   size_t curr_total = 0;
3753   size_t waste = 0;
3754   uint i = 0;
3755   size_t used = 0;
3756   size_t capacity = 0;
3757 
3758   // Add up statistics for all chunks in this SpaceManager.
3759   for (ChunkIndex index = ZeroIndex;
3760        index < NumberOfInUseLists;
3761        index = next_chunk_index(index)) {
3762     for (Metachunk* curr = chunks_in_use(index);
3763          curr != NULL;
3764          curr = curr->next()) {
3765       out->print("%d) ", i++);
3766       curr->print_on(out);
3767       curr_total += curr->word_size();
3768       used += curr->used_word_size();
3769       capacity += curr->word_size();
3770       waste += curr->free_word_size() + curr->overhead();;
3771     }
3772   }
3773 
3774   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3775     if (block_freelists() != NULL) block_freelists()->print_on(out);
3776   }
3777 
3778   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3779   // Free space isn't wasted.
3780   waste -= free;
3781 
3782   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3783                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3784                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3785 }
3786 
3787 // MetaspaceUtils
3788 
3789 
3790 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3791 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3792 
3793 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3794   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3795   return list == NULL ? 0 : list->free_bytes();
3796 }
3797 
3798 size_t MetaspaceUtils::free_bytes() {
3799   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3800 }
3801 
3802 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3803   assert_lock_strong(SpaceManager::expand_lock());
3804   assert(words <= capacity_words(mdtype),
3805          "About to decrement below 0: words " SIZE_FORMAT
3806          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3807          words, mdtype, capacity_words(mdtype));
3808   _capacity_words[mdtype] -= words;
3809 }
3810 
3811 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3812   assert_lock_strong(SpaceManager::expand_lock());
3813   // Needs to be atomic
3814   _capacity_words[mdtype] += words;
3815 }
3816 
3817 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3818   assert(words <= used_words(mdtype),
3819          "About to decrement below 0: words " SIZE_FORMAT
3820          " is greater than _used_words[%u] " SIZE_FORMAT,
3821          words, mdtype, used_words(mdtype));
3822   // For CMS deallocation of the Metaspaces occurs during the
3823   // sweep which is a concurrent phase.  Protection by the expand_lock()
3824   // is not enough since allocation is on a per Metaspace basis
3825   // and protected by the Metaspace lock.
3826   Atomic::sub(words, &_used_words[mdtype]);
3827 }
3828 
3829 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3830   // _used_words tracks allocations for
3831   // each piece of metadata.  Those allocations are
3832   // generally done concurrently by different application
3833   // threads so must be done atomically.
3834   Atomic::add(words, &_used_words[mdtype]);
3835 }
3836 
3837 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3838   size_t used = 0;
3839   ClassLoaderDataGraphMetaspaceIterator iter;
3840   while (iter.repeat()) {
3841     ClassLoaderMetaspace* msp = iter.get_next();
3842     // Sum allocated_blocks_words for each metaspace
3843     if (msp != NULL) {
3844       used += msp->used_words_slow(mdtype);
3845     }
3846   }
3847   return used * BytesPerWord;
3848 }
3849 
3850 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3851   size_t free = 0;
3852   ClassLoaderDataGraphMetaspaceIterator iter;
3853   while (iter.repeat()) {
3854     ClassLoaderMetaspace* msp = iter.get_next();
3855     if (msp != NULL) {
3856       free += msp->free_words_slow(mdtype);
3857     }
3858   }
3859   return free * BytesPerWord;
3860 }
3861 
3862 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3863   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3864     return 0;
3865   }
3866   // Don't count the space in the freelists.  That space will be
3867   // added to the capacity calculation as needed.
3868   size_t capacity = 0;
3869   ClassLoaderDataGraphMetaspaceIterator iter;
3870   while (iter.repeat()) {
3871     ClassLoaderMetaspace* msp = iter.get_next();
3872     if (msp != NULL) {
3873       capacity += msp->capacity_words_slow(mdtype);
3874     }
3875   }
3876   return capacity * BytesPerWord;
3877 }
3878 
3879 size_t MetaspaceUtils::capacity_bytes_slow() {
3880 #ifdef PRODUCT
3881   // Use capacity_bytes() in PRODUCT instead of this function.
3882   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3883 #endif
3884   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3885   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3886   assert(capacity_bytes() == class_capacity + non_class_capacity,
3887          "bad accounting: capacity_bytes() " SIZE_FORMAT
3888          " class_capacity + non_class_capacity " SIZE_FORMAT
3889          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3890          capacity_bytes(), class_capacity + non_class_capacity,
3891          class_capacity, non_class_capacity);
3892 
3893   return class_capacity + non_class_capacity;
3894 }
3895 
3896 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3897   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3898   return list == NULL ? 0 : list->reserved_bytes();
3899 }
3900 
3901 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3902   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3903   return list == NULL ? 0 : list->committed_bytes();
3904 }
3905 
3906 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3907 
3908 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3909   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3910   if (chunk_manager == NULL) {
3911     return 0;
3912   }
3913   chunk_manager->slow_verify();
3914   return chunk_manager->free_chunks_total_words();
3915 }
3916 
3917 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3918   return free_chunks_total_words(mdtype) * BytesPerWord;
3919 }
3920 
3921 size_t MetaspaceUtils::free_chunks_total_words() {
3922   return free_chunks_total_words(Metaspace::ClassType) +
3923          free_chunks_total_words(Metaspace::NonClassType);
3924 }
3925 
3926 size_t MetaspaceUtils::free_chunks_total_bytes() {
3927   return free_chunks_total_words() * BytesPerWord;
3928 }
3929 
3930 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3931   return Metaspace::get_chunk_manager(mdtype) != NULL;
3932 }
3933 
3934 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3935   if (!has_chunk_free_list(mdtype)) {
3936     return MetaspaceChunkFreeListSummary();
3937   }
3938 
3939   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3940   return cm->chunk_free_list_summary();
3941 }
3942 
3943 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3944   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3945                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3946 }
3947 
3948 void MetaspaceUtils::print_on(outputStream* out) {
3949   Metaspace::MetadataType nct = Metaspace::NonClassType;
3950 
3951   out->print_cr(" Metaspace       "
3952                 "used "      SIZE_FORMAT "K, "
3953                 "capacity "  SIZE_FORMAT "K, "
3954                 "committed " SIZE_FORMAT "K, "
3955                 "reserved "  SIZE_FORMAT "K",
3956                 used_bytes()/K,
3957                 capacity_bytes()/K,
3958                 committed_bytes()/K,
3959                 reserved_bytes()/K);
3960 
3961   if (Metaspace::using_class_space()) {
3962     Metaspace::MetadataType ct = Metaspace::ClassType;
3963     out->print_cr("  class space    "
3964                   "used "      SIZE_FORMAT "K, "
3965                   "capacity "  SIZE_FORMAT "K, "
3966                   "committed " SIZE_FORMAT "K, "
3967                   "reserved "  SIZE_FORMAT "K",
3968                   used_bytes(ct)/K,
3969                   capacity_bytes(ct)/K,
3970                   committed_bytes(ct)/K,
3971                   reserved_bytes(ct)/K);
3972   }
3973 }
3974 
3975 // Print information for class space and data space separately.
3976 // This is almost the same as above.
3977 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3978   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3979   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3980   size_t used_bytes = used_bytes_slow(mdtype);
3981   size_t free_bytes = free_bytes_slow(mdtype);
3982   size_t used_and_free = used_bytes + free_bytes +
3983                            free_chunks_capacity_bytes;
3984   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3985              "K + unused in chunks " SIZE_FORMAT "K  + "
3986              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3987              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3988              used_bytes / K,
3989              free_bytes / K,
3990              free_chunks_capacity_bytes / K,
3991              used_and_free / K,
3992              capacity_bytes / K);
3993   // Accounting can only be correct if we got the values during a safepoint
3994   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3995 }
3996 
3997 // Print total fragmentation for class metaspaces
3998 void MetaspaceUtils::print_class_waste(outputStream* out) {
3999   assert(Metaspace::using_class_space(), "class metaspace not used");
4000   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
4001   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
4002   ClassLoaderDataGraphMetaspaceIterator iter;
4003   while (iter.repeat()) {
4004     ClassLoaderMetaspace* msp = iter.get_next();
4005     if (msp != NULL) {
4006       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4007       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4008       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4009       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
4010       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4011       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
4012       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4013     }
4014   }
4015   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4016                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4017                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4018                 "large count " SIZE_FORMAT,
4019                 cls_specialized_count, cls_specialized_waste,
4020                 cls_small_count, cls_small_waste,
4021                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4022 }
4023 
4024 // Print total fragmentation for data and class metaspaces separately
4025 void MetaspaceUtils::print_waste(outputStream* out) {
4026   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4027   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4028 
4029   ClassLoaderDataGraphMetaspaceIterator iter;
4030   while (iter.repeat()) {
4031     ClassLoaderMetaspace* msp = iter.get_next();
4032     if (msp != NULL) {
4033       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4034       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4035       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4036       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4037       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4038       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4039       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4040     }
4041   }
4042   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4043   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4044                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4045                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4046                         "large count " SIZE_FORMAT,
4047              specialized_count, specialized_waste, small_count,
4048              small_waste, medium_count, medium_waste, humongous_count);
4049   if (Metaspace::using_class_space()) {
4050     print_class_waste(out);
4051   }
4052 }
4053 
4054 class MetadataStats {
4055 private:
4056   size_t _capacity;
4057   size_t _used;
4058   size_t _free;
4059   size_t _waste;
4060 
4061 public:
4062   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4063   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4064   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4065 
4066   void add(const MetadataStats& stats) {
4067     _capacity += stats.capacity();
4068     _used += stats.used();
4069     _free += stats.free();
4070     _waste += stats.waste();
4071   }
4072 
4073   size_t capacity() const { return _capacity; }
4074   size_t used() const     { return _used; }
4075   size_t free() const     { return _free; }
4076   size_t waste() const    { return _waste; }
4077 
4078   void print_on(outputStream* out, size_t scale) const;
4079 };
4080 
4081 
4082 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4083   const char* unit = scale_unit(scale);
4084   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4085     (float)capacity() / scale, unit,
4086     (float)used() / scale, unit,
4087     (float)free() / scale, unit,
4088     (float)waste() / scale, unit);
4089 }
4090 
4091 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4092 private:
4093   outputStream*  _out;
4094   size_t         _scale;
4095 
4096   size_t         _total_count;
4097   MetadataStats  _total_metadata;
4098   MetadataStats  _total_class;
4099 
4100   size_t         _total_anon_count;
4101   MetadataStats  _total_anon_metadata;
4102   MetadataStats  _total_anon_class;
4103 
4104 public:
4105   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4106   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4107 
4108   ~PrintCLDMetaspaceInfoClosure() {
4109     print_summary();
4110   }
4111 
4112   void do_cld(ClassLoaderData* cld) {
4113     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4114 
4115     if (cld->is_unloading()) return;
4116     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4117     if (msp == NULL) {
4118       return;
4119     }
4120 
4121     bool anonymous = false;
4122     if (cld->is_anonymous()) {
4123       _out->print_cr("ClassLoader: for anonymous class");
4124       anonymous = true;
4125     } else {
4126       ResourceMark rm;
4127       _out->print_cr("ClassLoader: %s", cld->loader_name());
4128     }
4129 
4130     print_metaspace(msp, anonymous);
4131     _out->cr();
4132   }
4133 
4134 private:
4135   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4136   void print_summary() const;
4137 };
4138 
4139 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4140   assert(msp != NULL, "Sanity");
4141   SpaceManager* vsm = msp->vsm();
4142   const char* unit = scale_unit(_scale);
4143 
4144   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4145   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4146   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4147   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4148 
4149   _total_count ++;
4150   MetadataStats metadata_stats(capacity, used, free, waste);
4151   _total_metadata.add(metadata_stats);
4152 
4153   if (anonymous) {
4154     _total_anon_count ++;
4155     _total_anon_metadata.add(metadata_stats);
4156   }
4157 
4158   _out->print("  Metadata   ");
4159   metadata_stats.print_on(_out, _scale);
4160 
4161   if (Metaspace::using_class_space()) {
4162     vsm = msp->class_vsm();
4163 
4164     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4165     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4166     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4167     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4168 
4169     MetadataStats class_stats(capacity, used, free, waste);
4170     _total_class.add(class_stats);
4171 
4172     if (anonymous) {
4173       _total_anon_class.add(class_stats);
4174     }
4175 
4176     _out->print("  Class data ");
4177     class_stats.print_on(_out, _scale);
4178   }
4179 }
4180 
4181 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4182   const char* unit = scale_unit(_scale);
4183   _out->cr();
4184   _out->print_cr("Summary:");
4185 
4186   MetadataStats total;
4187   total.add(_total_metadata);
4188   total.add(_total_class);
4189 
4190   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4191   total.print_on(_out, _scale);
4192 
4193   _out->print("                    Metadata ");
4194   _total_metadata.print_on(_out, _scale);
4195 
4196   if (Metaspace::using_class_space()) {
4197     _out->print("                  Class data ");
4198     _total_class.print_on(_out, _scale);
4199   }
4200   _out->cr();
4201 
4202   MetadataStats total_anon;
4203   total_anon.add(_total_anon_metadata);
4204   total_anon.add(_total_anon_class);
4205 
4206   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4207   total_anon.print_on(_out, _scale);
4208 
4209   _out->print("                    Metadata ");
4210   _total_anon_metadata.print_on(_out, _scale);
4211 
4212   if (Metaspace::using_class_space()) {
4213     _out->print("                  Class data ");
4214     _total_anon_class.print_on(_out, _scale);
4215   }
4216 }
4217 
4218 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4219   const char* unit = scale_unit(scale);
4220   out->print_cr("Metaspaces:");
4221   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4222     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4223     committed_bytes(Metaspace::NonClassType) / scale, unit);
4224   if (Metaspace::using_class_space()) {
4225     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4226     reserved_bytes(Metaspace::ClassType) / scale, unit,
4227     committed_bytes(Metaspace::ClassType) / scale, unit);
4228   }
4229 
4230   out->cr();
4231   ChunkManager::print_all_chunkmanagers(out, scale);
4232 
4233   out->cr();
4234   out->print_cr("Per-classloader metadata:");
4235   out->cr();
4236 
4237   PrintCLDMetaspaceInfoClosure cl(out, scale);
4238   ClassLoaderDataGraph::cld_do(&cl);
4239 }
4240 
4241 
4242 // Dump global metaspace things from the end of ClassLoaderDataGraph
4243 void MetaspaceUtils::dump(outputStream* out) {
4244   out->print_cr("All Metaspace:");
4245   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4246   out->print("class space: "); print_on(out, Metaspace::ClassType);
4247   print_waste(out);
4248 }
4249 
4250 // Prints an ASCII representation of the given space.
4251 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4252   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4253   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4254   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4255   if (vsl != NULL) {
4256     if (for_class) {
4257       if (!Metaspace::using_class_space()) {
4258         out->print_cr("No Class Space.");
4259         return;
4260       }
4261       out->print_raw("---- Metaspace Map (Class Space) ----");
4262     } else {
4263       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4264     }
4265     // Print legend:
4266     out->cr();
4267     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4268     out->cr();
4269     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4270     vsl->print_map(out);
4271     out->cr();
4272   }
4273 }
4274 
4275 void MetaspaceUtils::verify_free_chunks() {
4276   Metaspace::chunk_manager_metadata()->verify();
4277   if (Metaspace::using_class_space()) {
4278     Metaspace::chunk_manager_class()->verify();
4279   }
4280 }
4281 
4282 void MetaspaceUtils::verify_capacity() {
4283 #ifdef ASSERT
4284   size_t running_sum_capacity_bytes = capacity_bytes();
4285   // For purposes of the running sum of capacity, verify against capacity
4286   size_t capacity_in_use_bytes = capacity_bytes_slow();
4287   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4288          "capacity_words() * BytesPerWord " SIZE_FORMAT
4289          " capacity_bytes_slow()" SIZE_FORMAT,
4290          running_sum_capacity_bytes, capacity_in_use_bytes);
4291   for (Metaspace::MetadataType i = Metaspace::ClassType;
4292        i < Metaspace:: MetadataTypeCount;
4293        i = (Metaspace::MetadataType)(i + 1)) {
4294     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4295     assert(capacity_bytes(i) == capacity_in_use_bytes,
4296            "capacity_bytes(%u) " SIZE_FORMAT
4297            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4298            i, capacity_bytes(i), i, capacity_in_use_bytes);
4299   }
4300 #endif
4301 }
4302 
4303 void MetaspaceUtils::verify_used() {
4304 #ifdef ASSERT
4305   size_t running_sum_used_bytes = used_bytes();
4306   // For purposes of the running sum of used, verify against used
4307   size_t used_in_use_bytes = used_bytes_slow();
4308   assert(used_bytes() == used_in_use_bytes,
4309          "used_bytes() " SIZE_FORMAT
4310          " used_bytes_slow()" SIZE_FORMAT,
4311          used_bytes(), used_in_use_bytes);
4312   for (Metaspace::MetadataType i = Metaspace::ClassType;
4313        i < Metaspace:: MetadataTypeCount;
4314        i = (Metaspace::MetadataType)(i + 1)) {
4315     size_t used_in_use_bytes = used_bytes_slow(i);
4316     assert(used_bytes(i) == used_in_use_bytes,
4317            "used_bytes(%u) " SIZE_FORMAT
4318            " used_bytes_slow(%u)" SIZE_FORMAT,
4319            i, used_bytes(i), i, used_in_use_bytes);
4320   }
4321 #endif
4322 }
4323 
4324 void MetaspaceUtils::verify_metrics() {
4325   verify_capacity();
4326   verify_used();
4327 }
4328 
4329 
4330 // Metaspace methods
4331 
4332 size_t Metaspace::_first_chunk_word_size = 0;
4333 size_t Metaspace::_first_class_chunk_word_size = 0;
4334 
4335 size_t Metaspace::_commit_alignment = 0;
4336 size_t Metaspace::_reserve_alignment = 0;
4337 
4338 VirtualSpaceList* Metaspace::_space_list = NULL;
4339 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4340 
4341 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4342 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4343 
4344 #define VIRTUALSPACEMULTIPLIER 2
4345 
4346 #ifdef _LP64
4347 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4348 
4349 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4350   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4351   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4352   // narrow_klass_base is the lower of the metaspace base and the cds base
4353   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4354   // between the lower base and higher address.
4355   address lower_base;
4356   address higher_address;
4357 #if INCLUDE_CDS
4358   if (UseSharedSpaces) {
4359     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4360                           (address)(metaspace_base + compressed_class_space_size()));
4361     lower_base = MIN2(metaspace_base, cds_base);
4362   } else
4363 #endif
4364   {
4365     higher_address = metaspace_base + compressed_class_space_size();
4366     lower_base = metaspace_base;
4367 
4368     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4369     // If compressed class space fits in lower 32G, we don't need a base.
4370     if (higher_address <= (address)klass_encoding_max) {
4371       lower_base = 0; // Effectively lower base is zero.
4372     }
4373   }
4374 
4375   Universe::set_narrow_klass_base(lower_base);
4376 
4377   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4378   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4379   // how dump time narrow_klass_shift is set. Although, CDS can work
4380   // with zero-shift mode also, to be consistent with AOT it uses
4381   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4382   // can be used at same time as AOT code.
4383   if (!UseSharedSpaces
4384       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4385     Universe::set_narrow_klass_shift(0);
4386   } else {
4387     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4388   }
4389   AOTLoader::set_narrow_klass_shift();
4390 }
4391 
4392 #if INCLUDE_CDS
4393 // Return TRUE if the specified metaspace_base and cds_base are close enough
4394 // to work with compressed klass pointers.
4395 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4396   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4397   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4398   address lower_base = MIN2((address)metaspace_base, cds_base);
4399   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4400                                 (address)(metaspace_base + compressed_class_space_size()));
4401   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4402 }
4403 #endif
4404 
4405 // Try to allocate the metaspace at the requested addr.
4406 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4407   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4408   assert(using_class_space(), "called improperly");
4409   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4410   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4411          "Metaspace size is too big");
4412   assert_is_aligned(requested_addr, _reserve_alignment);
4413   assert_is_aligned(cds_base, _reserve_alignment);
4414   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4415 
4416   // Don't use large pages for the class space.
4417   bool large_pages = false;
4418 
4419 #if !(defined(AARCH64) || defined(AIX))
4420   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4421                                              _reserve_alignment,
4422                                              large_pages,
4423                                              requested_addr);
4424 #else // AARCH64
4425   ReservedSpace metaspace_rs;
4426 
4427   // Our compressed klass pointers may fit nicely into the lower 32
4428   // bits.
4429   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4430     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4431                                  _reserve_alignment,
4432                                  large_pages,
4433                                  requested_addr);
4434   }
4435 
4436   if (! metaspace_rs.is_reserved()) {
4437     // Aarch64: Try to align metaspace so that we can decode a compressed
4438     // klass with a single MOVK instruction.  We can do this iff the
4439     // compressed class base is a multiple of 4G.
4440     // Aix: Search for a place where we can find memory. If we need to load
4441     // the base, 4G alignment is helpful, too.
4442     size_t increment = AARCH64_ONLY(4*)G;
4443     for (char *a = align_up(requested_addr, increment);
4444          a < (char*)(1024*G);
4445          a += increment) {
4446       if (a == (char *)(32*G)) {
4447         // Go faster from here on. Zero-based is no longer possible.
4448         increment = 4*G;
4449       }
4450 
4451 #if INCLUDE_CDS
4452       if (UseSharedSpaces
4453           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4454         // We failed to find an aligned base that will reach.  Fall
4455         // back to using our requested addr.
4456         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4457                                      _reserve_alignment,
4458                                      large_pages,
4459                                      requested_addr);
4460         break;
4461       }
4462 #endif
4463 
4464       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4465                                    _reserve_alignment,
4466                                    large_pages,
4467                                    a);
4468       if (metaspace_rs.is_reserved())
4469         break;
4470     }
4471   }
4472 
4473 #endif // AARCH64
4474 
4475   if (!metaspace_rs.is_reserved()) {
4476 #if INCLUDE_CDS
4477     if (UseSharedSpaces) {
4478       size_t increment = align_up(1*G, _reserve_alignment);
4479 
4480       // Keep trying to allocate the metaspace, increasing the requested_addr
4481       // by 1GB each time, until we reach an address that will no longer allow
4482       // use of CDS with compressed klass pointers.
4483       char *addr = requested_addr;
4484       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4485              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4486         addr = addr + increment;
4487         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4488                                      _reserve_alignment, large_pages, addr);
4489       }
4490     }
4491 #endif
4492     // If no successful allocation then try to allocate the space anywhere.  If
4493     // that fails then OOM doom.  At this point we cannot try allocating the
4494     // metaspace as if UseCompressedClassPointers is off because too much
4495     // initialization has happened that depends on UseCompressedClassPointers.
4496     // So, UseCompressedClassPointers cannot be turned off at this point.
4497     if (!metaspace_rs.is_reserved()) {
4498       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4499                                    _reserve_alignment, large_pages);
4500       if (!metaspace_rs.is_reserved()) {
4501         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4502                                               compressed_class_space_size()));
4503       }
4504     }
4505   }
4506 
4507   // If we got here then the metaspace got allocated.
4508   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4509 
4510 #if INCLUDE_CDS
4511   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4512   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4513     FileMapInfo::stop_sharing_and_unmap(
4514         "Could not allocate metaspace at a compatible address");
4515   }
4516 #endif
4517   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4518                                   UseSharedSpaces ? (address)cds_base : 0);
4519 
4520   initialize_class_space(metaspace_rs);
4521 
4522   LogTarget(Trace, gc, metaspace) lt;
4523   if (lt.is_enabled()) {
4524     ResourceMark rm;
4525     LogStream ls(lt);
4526     print_compressed_class_space(&ls, requested_addr);
4527   }
4528 }
4529 
4530 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4531   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4532                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4533   if (_class_space_list != NULL) {
4534     address base = (address)_class_space_list->current_virtual_space()->bottom();
4535     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4536                  compressed_class_space_size(), p2i(base));
4537     if (requested_addr != 0) {
4538       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4539     }
4540     st->cr();
4541   }
4542 }
4543 
4544 // For UseCompressedClassPointers the class space is reserved above the top of
4545 // the Java heap.  The argument passed in is at the base of the compressed space.
4546 void Metaspace::initialize_class_space(ReservedSpace rs) {
4547   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4548   assert(rs.size() >= CompressedClassSpaceSize,
4549          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4550   assert(using_class_space(), "Must be using class space");
4551   _class_space_list = new VirtualSpaceList(rs);
4552   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4553 
4554   if (!_class_space_list->initialization_succeeded()) {
4555     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4556   }
4557 }
4558 
4559 #endif
4560 
4561 void Metaspace::ergo_initialize() {
4562   if (DumpSharedSpaces) {
4563     // Using large pages when dumping the shared archive is currently not implemented.
4564     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4565   }
4566 
4567   size_t page_size = os::vm_page_size();
4568   if (UseLargePages && UseLargePagesInMetaspace) {
4569     page_size = os::large_page_size();
4570   }
4571 
4572   _commit_alignment  = page_size;
4573   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4574 
4575   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4576   // override if MaxMetaspaceSize was set on the command line or not.
4577   // This information is needed later to conform to the specification of the
4578   // java.lang.management.MemoryUsage API.
4579   //
4580   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4581   // globals.hpp to the aligned value, but this is not possible, since the
4582   // alignment depends on other flags being parsed.
4583   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4584 
4585   if (MetaspaceSize > MaxMetaspaceSize) {
4586     MetaspaceSize = MaxMetaspaceSize;
4587   }
4588 
4589   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4590 
4591   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4592 
4593   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4594   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4595 
4596   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4597 
4598   // Initial virtual space size will be calculated at global_initialize()
4599   size_t min_metaspace_sz =
4600       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4601   if (UseCompressedClassPointers) {
4602     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4603       if (min_metaspace_sz >= MaxMetaspaceSize) {
4604         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4605       } else {
4606         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4607                       MaxMetaspaceSize - min_metaspace_sz);
4608       }
4609     }
4610   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4611     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4612                   min_metaspace_sz);
4613   }
4614 
4615   set_compressed_class_space_size(CompressedClassSpaceSize);
4616 }
4617 
4618 void Metaspace::global_initialize() {
4619   MetaspaceGC::initialize();
4620 
4621 #if INCLUDE_CDS
4622   if (DumpSharedSpaces) {
4623     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4624   } else if (UseSharedSpaces) {
4625     // If any of the archived space fails to map, UseSharedSpaces
4626     // is reset to false. Fall through to the
4627     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4628     // metaspace.
4629     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4630   }
4631 
4632   if (!DumpSharedSpaces && !UseSharedSpaces)
4633 #endif // INCLUDE_CDS
4634   {
4635 #ifdef _LP64
4636     if (using_class_space()) {
4637       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4638       allocate_metaspace_compressed_klass_ptrs(base, 0);
4639     }
4640 #endif // _LP64
4641   }
4642 
4643   // Initialize these before initializing the VirtualSpaceList
4644   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4645   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4646   // Make the first class chunk bigger than a medium chunk so it's not put
4647   // on the medium chunk list.   The next chunk will be small and progress
4648   // from there.  This size calculated by -version.
4649   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4650                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4651   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4652   // Arbitrarily set the initial virtual space to a multiple
4653   // of the boot class loader size.
4654   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4655   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4656 
4657   // Initialize the list of virtual spaces.
4658   _space_list = new VirtualSpaceList(word_size);
4659   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4660 
4661   if (!_space_list->initialization_succeeded()) {
4662     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4663   }
4664 
4665   _tracer = new MetaspaceTracer();
4666 }
4667 
4668 void Metaspace::post_initialize() {
4669   MetaspaceGC::post_initialize();
4670 }
4671 
4672 void Metaspace::verify_global_initialization() {
4673   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4674   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4675 
4676   if (using_class_space()) {
4677     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4678     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4679   }
4680 }
4681 
4682 size_t Metaspace::align_word_size_up(size_t word_size) {
4683   size_t byte_size = word_size * wordSize;
4684   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4685 }
4686 
4687 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4688                               MetaspaceObj::Type type, TRAPS) {
4689   assert(!_frozen, "sanity");
4690   if (HAS_PENDING_EXCEPTION) {
4691     assert(false, "Should not allocate with exception pending");
4692     return NULL;  // caller does a CHECK_NULL too
4693   }
4694 
4695   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4696         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4697 
4698   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4699 
4700   // Try to allocate metadata.
4701   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4702 
4703   if (result == NULL) {
4704     if (DumpSharedSpaces && THREAD->is_VM_thread()) {
4705       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4706           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4707       vm_exit(1);
4708     }
4709 
4710     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4711 
4712     // Allocation failed.
4713     if (is_init_completed()) {
4714       // Only start a GC if the bootstrapping has completed.
4715 
4716       // Try to clean out some memory and retry.
4717       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4718     }
4719   }
4720 
4721   if (result == NULL) {
4722     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4723   }
4724 
4725   // Zero initialize.
4726   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4727 
4728   return result;
4729 }
4730 
4731 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4732   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4733 
4734   // If result is still null, we are out of memory.
4735   Log(gc, metaspace, freelist) log;
4736   if (log.is_info()) {
4737     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4738              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4739     ResourceMark rm;
4740     if (log.is_debug()) {
4741       if (loader_data->metaspace_or_null() != NULL) {
4742         LogStream ls(log.debug());
4743         loader_data->print_value_on(&ls);
4744       }
4745     }
4746     LogStream ls(log.info());
4747     MetaspaceUtils::dump(&ls);
4748     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4749     ChunkManager::print_all_chunkmanagers(&ls);
4750   }
4751 
4752   bool out_of_compressed_class_space = false;
4753   if (is_class_space_allocation(mdtype)) {
4754     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4755     out_of_compressed_class_space =
4756       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4757       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4758       CompressedClassSpaceSize;
4759   }
4760 
4761   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4762   const char* space_string = out_of_compressed_class_space ?
4763     "Compressed class space" : "Metaspace";
4764 
4765   report_java_out_of_memory(space_string);
4766 
4767   if (JvmtiExport::should_post_resource_exhausted()) {
4768     JvmtiExport::post_resource_exhausted(
4769         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4770         space_string);
4771   }
4772 
4773   if (!is_init_completed()) {
4774     vm_exit_during_initialization("OutOfMemoryError", space_string);
4775   }
4776 
4777   if (out_of_compressed_class_space) {
4778     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4779   } else {
4780     THROW_OOP(Universe::out_of_memory_error_metaspace());
4781   }
4782 }
4783 
4784 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4785   switch (mdtype) {
4786     case Metaspace::ClassType: return "Class";
4787     case Metaspace::NonClassType: return "Metadata";
4788     default:
4789       assert(false, "Got bad mdtype: %d", (int) mdtype);
4790       return NULL;
4791   }
4792 }
4793 
4794 void Metaspace::purge(MetadataType mdtype) {
4795   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4796 }
4797 
4798 void Metaspace::purge() {
4799   MutexLockerEx cl(SpaceManager::expand_lock(),
4800                    Mutex::_no_safepoint_check_flag);
4801   purge(NonClassType);
4802   if (using_class_space()) {
4803     purge(ClassType);
4804   }
4805 }
4806 
4807 bool Metaspace::contains(const void* ptr) {
4808   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4809     return true;
4810   }
4811   return contains_non_shared(ptr);
4812 }
4813 
4814 bool Metaspace::contains_non_shared(const void* ptr) {
4815   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4816      return true;
4817   }
4818 
4819   return get_space_list(NonClassType)->contains(ptr);
4820 }
4821 
4822 // ClassLoaderMetaspace
4823 
4824 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4825   initialize(lock, type);
4826 }
4827 
4828 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4829   delete _vsm;
4830   if (Metaspace::using_class_space()) {
4831     delete _class_vsm;
4832   }
4833 }
4834 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4835   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4836   if (chunk != NULL) {
4837     // Add to this manager's list of chunks in use and current_chunk().
4838     get_space_manager(mdtype)->add_chunk(chunk, true);
4839   }
4840 }
4841 
4842 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4843   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4844 
4845   // Get a chunk from the chunk freelist
4846   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4847 
4848   if (chunk == NULL) {
4849     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4850                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4851   }
4852 
4853   return chunk;
4854 }
4855 
4856 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4857   Metaspace::verify_global_initialization();
4858 
4859   // Allocate SpaceManager for metadata objects.
4860   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4861 
4862   if (Metaspace::using_class_space()) {
4863     // Allocate SpaceManager for classes.
4864     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4865   }
4866 
4867   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4868 
4869   // Allocate chunk for metadata objects
4870   initialize_first_chunk(type, Metaspace::NonClassType);
4871 
4872   // Allocate chunk for class metadata objects
4873   if (Metaspace::using_class_space()) {
4874     initialize_first_chunk(type, Metaspace::ClassType);
4875   }
4876 }
4877 
4878 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4879   Metaspace::assert_not_frozen();
4880   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4881   if (Metaspace::is_class_space_allocation(mdtype)) {
4882     return  class_vsm()->allocate(word_size);
4883   } else {
4884     return  vsm()->allocate(word_size);
4885   }
4886 }
4887 
4888 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4889   Metaspace::assert_not_frozen();
4890   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4891   assert(delta_bytes > 0, "Must be");
4892 
4893   size_t before = 0;
4894   size_t after = 0;
4895   MetaWord* res;
4896   bool incremented;
4897 
4898   // Each thread increments the HWM at most once. Even if the thread fails to increment
4899   // the HWM, an allocation is still attempted. This is because another thread must then
4900   // have incremented the HWM and therefore the allocation might still succeed.
4901   do {
4902     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4903     res = allocate(word_size, mdtype);
4904   } while (!incremented && res == NULL);
4905 
4906   if (incremented) {
4907     Metaspace::tracer()->report_gc_threshold(before, after,
4908                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4909     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4910   }
4911 
4912   return res;
4913 }
4914 
4915 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4916   if (mdtype == Metaspace::ClassType) {
4917     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4918   } else {
4919     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4920   }
4921 }
4922 
4923 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4924   Metaspace::assert_not_frozen();
4925   if (mdtype == Metaspace::ClassType) {
4926     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4927   } else {
4928     return vsm()->sum_free_in_chunks_in_use();
4929   }
4930 }
4931 
4932 // Space capacity in the Metaspace.  It includes
4933 // space in the list of chunks from which allocations
4934 // have been made. Don't include space in the global freelist and
4935 // in the space available in the dictionary which
4936 // is already counted in some chunk.
4937 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4938   if (mdtype == Metaspace::ClassType) {
4939     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4940   } else {
4941     return vsm()->sum_capacity_in_chunks_in_use();
4942   }
4943 }
4944 
4945 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4946   return used_words_slow(mdtype) * BytesPerWord;
4947 }
4948 
4949 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4950   return capacity_words_slow(mdtype) * BytesPerWord;
4951 }
4952 
4953 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4954   return vsm()->allocated_blocks_bytes() +
4955       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4956 }
4957 
4958 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4959   return vsm()->allocated_chunks_bytes() +
4960       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4961 }
4962 
4963 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4964   Metaspace::assert_not_frozen();
4965   assert(!SafepointSynchronize::is_at_safepoint()
4966          || Thread::current()->is_VM_thread(), "should be the VM thread");
4967 
4968   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4969 
4970   if (is_class && Metaspace::using_class_space()) {
4971     class_vsm()->deallocate(ptr, word_size);
4972   } else {
4973     vsm()->deallocate(ptr, word_size);
4974   }
4975 }
4976 
4977 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4978   assert(Metaspace::using_class_space(), "Has to use class space");
4979   return class_vsm()->calc_chunk_size(word_size);
4980 }
4981 
4982 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4983   // Print both class virtual space counts and metaspace.
4984   if (Verbose) {
4985     vsm()->print_on(out);
4986     if (Metaspace::using_class_space()) {
4987       class_vsm()->print_on(out);
4988     }
4989   }
4990 }
4991 
4992 void ClassLoaderMetaspace::verify() {
4993   vsm()->verify();
4994   if (Metaspace::using_class_space()) {
4995     class_vsm()->verify();
4996   }
4997 }
4998 
4999 void ClassLoaderMetaspace::dump(outputStream* const out) const {
5000   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
5001   vsm()->dump(out);
5002   if (Metaspace::using_class_space()) {
5003     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
5004     class_vsm()->dump(out);
5005   }
5006 }
5007 
5008 
5009 
5010 #ifdef ASSERT
5011 static void do_verify_chunk(Metachunk* chunk) {
5012   guarantee(chunk != NULL, "Sanity");
5013   // Verify chunk itself; then verify that it is consistent with the
5014   // occupany map of its containing node.
5015   chunk->verify();
5016   VirtualSpaceNode* const vsn = chunk->container();
5017   OccupancyMap* const ocmap = vsn->occupancy_map();
5018   ocmap->verify_for_chunk(chunk);
5019 }
5020 #endif
5021 
5022 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5023   chunk->set_is_tagged_free(!inuse);
5024   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5025   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5026 }
5027 
5028 /////////////// Unit tests ///////////////
5029 
5030 #ifndef PRODUCT
5031 
5032 class TestMetaspaceUtilsTest : AllStatic {
5033  public:
5034   static void test_reserved() {
5035     size_t reserved = MetaspaceUtils::reserved_bytes();
5036 
5037     assert(reserved > 0, "assert");
5038 
5039     size_t committed  = MetaspaceUtils::committed_bytes();
5040     assert(committed <= reserved, "assert");
5041 
5042     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5043     assert(reserved_metadata > 0, "assert");
5044     assert(reserved_metadata <= reserved, "assert");
5045 
5046     if (UseCompressedClassPointers) {
5047       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5048       assert(reserved_class > 0, "assert");
5049       assert(reserved_class < reserved, "assert");
5050     }
5051   }
5052 
5053   static void test_committed() {
5054     size_t committed = MetaspaceUtils::committed_bytes();
5055 
5056     assert(committed > 0, "assert");
5057 
5058     size_t reserved  = MetaspaceUtils::reserved_bytes();
5059     assert(committed <= reserved, "assert");
5060 
5061     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5062     assert(committed_metadata > 0, "assert");
5063     assert(committed_metadata <= committed, "assert");
5064 
5065     if (UseCompressedClassPointers) {
5066       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5067       assert(committed_class > 0, "assert");
5068       assert(committed_class < committed, "assert");
5069     }
5070   }
5071 
5072   static void test_virtual_space_list_large_chunk() {
5073     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5074     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5075     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5076     // vm_allocation_granularity aligned on Windows.
5077     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5078     large_size += (os::vm_page_size()/BytesPerWord);
5079     vs_list->get_new_chunk(large_size, 0);
5080   }
5081 
5082   static void test() {
5083     test_reserved();
5084     test_committed();
5085     test_virtual_space_list_large_chunk();
5086   }
5087 };
5088 
5089 void TestMetaspaceUtils_test() {
5090   TestMetaspaceUtilsTest::test();
5091 }
5092 
5093 class TestVirtualSpaceNodeTest {
5094   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5095                                           size_t& num_small_chunks,
5096                                           size_t& num_specialized_chunks) {
5097     num_medium_chunks = words_left / MediumChunk;
5098     words_left = words_left % MediumChunk;
5099 
5100     num_small_chunks = words_left / SmallChunk;
5101     words_left = words_left % SmallChunk;
5102     // how many specialized chunks can we get?
5103     num_specialized_chunks = words_left / SpecializedChunk;
5104     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5105   }
5106 
5107  public:
5108   static void test() {
5109     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5110     const size_t vsn_test_size_words = MediumChunk  * 4;
5111     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5112 
5113     // The chunk sizes must be multiples of eachother, or this will fail
5114     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5115     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5116 
5117     { // No committed memory in VSN
5118       ChunkManager cm(false);
5119       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5120       vsn.initialize();
5121       vsn.retire(&cm);
5122       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5123     }
5124 
5125     { // All of VSN is committed, half is used by chunks
5126       ChunkManager cm(false);
5127       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5128       vsn.initialize();
5129       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5130       vsn.get_chunk_vs(MediumChunk);
5131       vsn.get_chunk_vs(MediumChunk);
5132       vsn.retire(&cm);
5133       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5134       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5135     }
5136 
5137     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5138     // This doesn't work for systems with vm_page_size >= 16K.
5139     if (page_chunks < MediumChunk) {
5140       // 4 pages of VSN is committed, some is used by chunks
5141       ChunkManager cm(false);
5142       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5143 
5144       vsn.initialize();
5145       vsn.expand_by(page_chunks, page_chunks);
5146       vsn.get_chunk_vs(SmallChunk);
5147       vsn.get_chunk_vs(SpecializedChunk);
5148       vsn.retire(&cm);
5149 
5150       // committed - used = words left to retire
5151       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5152 
5153       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5154       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5155 
5156       assert(num_medium_chunks == 0, "should not get any medium chunks");
5157       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5158       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5159     }
5160 
5161     { // Half of VSN is committed, a humongous chunk is used
5162       ChunkManager cm(false);
5163       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5164       vsn.initialize();
5165       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5166       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5167       vsn.retire(&cm);
5168 
5169       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5170       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5171       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5172 
5173       assert(num_medium_chunks == 0, "should not get any medium chunks");
5174       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5175       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5176     }
5177 
5178   }
5179 
5180 #define assert_is_available_positive(word_size) \
5181   assert(vsn.is_available(word_size), \
5182          #word_size ": " PTR_FORMAT " bytes were not available in " \
5183          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5184          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5185 
5186 #define assert_is_available_negative(word_size) \
5187   assert(!vsn.is_available(word_size), \
5188          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5189          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5190          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5191 
5192   static void test_is_available_positive() {
5193     // Reserve some memory.
5194     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5195     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5196 
5197     // Commit some memory.
5198     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5199     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5200     assert(expanded, "Failed to commit");
5201 
5202     // Check that is_available accepts the committed size.
5203     assert_is_available_positive(commit_word_size);
5204 
5205     // Check that is_available accepts half the committed size.
5206     size_t expand_word_size = commit_word_size / 2;
5207     assert_is_available_positive(expand_word_size);
5208   }
5209 
5210   static void test_is_available_negative() {
5211     // Reserve some memory.
5212     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5213     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5214 
5215     // Commit some memory.
5216     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5217     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5218     assert(expanded, "Failed to commit");
5219 
5220     // Check that is_available doesn't accept a too large size.
5221     size_t two_times_commit_word_size = commit_word_size * 2;
5222     assert_is_available_negative(two_times_commit_word_size);
5223   }
5224 
5225   static void test_is_available_overflow() {
5226     // Reserve some memory.
5227     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5228     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5229 
5230     // Commit some memory.
5231     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5232     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5233     assert(expanded, "Failed to commit");
5234 
5235     // Calculate a size that will overflow the virtual space size.
5236     void* virtual_space_max = (void*)(uintptr_t)-1;
5237     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5238     size_t overflow_size = bottom_to_max + BytesPerWord;
5239     size_t overflow_word_size = overflow_size / BytesPerWord;
5240 
5241     // Check that is_available can handle the overflow.
5242     assert_is_available_negative(overflow_word_size);
5243   }
5244 
5245   static void test_is_available() {
5246     TestVirtualSpaceNodeTest::test_is_available_positive();
5247     TestVirtualSpaceNodeTest::test_is_available_negative();
5248     TestVirtualSpaceNodeTest::test_is_available_overflow();
5249   }
5250 };
5251 
5252 // The following test is placed here instead of a gtest / unittest file
5253 // because the ChunkManager class is only available in this file.
5254 void ChunkManager_test_list_index() {
5255   ChunkManager manager(true);
5256 
5257   // Test previous bug where a query for a humongous class metachunk,
5258   // incorrectly matched the non-class medium metachunk size.
5259   {
5260     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5261 
5262     ChunkIndex index = manager.list_index(MediumChunk);
5263 
5264     assert(index == HumongousIndex,
5265            "Requested size is larger than ClassMediumChunk,"
5266            " so should return HumongousIndex. Got index: %d", (int)index);
5267   }
5268 
5269   // Check the specified sizes as well.
5270   {
5271     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5272     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5273   }
5274   {
5275     ChunkIndex index = manager.list_index(ClassSmallChunk);
5276     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5277   }
5278   {
5279     ChunkIndex index = manager.list_index(ClassMediumChunk);
5280     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5281   }
5282   {
5283     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5284     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5285   }
5286 }
5287 
5288 #endif // !PRODUCT
5289 
5290 #ifdef ASSERT
5291 
5292 // The following test is placed here instead of a gtest / unittest file
5293 // because the ChunkManager class is only available in this file.
5294 class SpaceManagerTest : AllStatic {
5295   friend void SpaceManager_test_adjust_initial_chunk_size();
5296 
5297   static void test_adjust_initial_chunk_size(bool is_class) {
5298     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5299     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5300     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5301 
5302 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5303     do {                                                                         \
5304       size_t v = value;                                                          \
5305       size_t e = expected;                                                       \
5306       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5307              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5308     } while (0)
5309 
5310     // Smallest (specialized)
5311     test_adjust_initial_chunk_size(1,            smallest, is_class);
5312     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5313     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5314 
5315     // Small
5316     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5317     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5318     test_adjust_initial_chunk_size(normal,       normal, is_class);
5319 
5320     // Medium
5321     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5322     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5323     test_adjust_initial_chunk_size(medium,     medium, is_class);
5324 
5325     // Humongous
5326     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5327 
5328 #undef test_adjust_initial_chunk_size
5329   }
5330 
5331   static void test_adjust_initial_chunk_size() {
5332     test_adjust_initial_chunk_size(false);
5333     test_adjust_initial_chunk_size(true);
5334   }
5335 };
5336 
5337 void SpaceManager_test_adjust_initial_chunk_size() {
5338   SpaceManagerTest::test_adjust_initial_chunk_size();
5339 }
5340 
5341 #endif // ASSERT
5342 
5343 struct chunkmanager_statistics_t {
5344   int num_specialized_chunks;
5345   int num_small_chunks;
5346   int num_medium_chunks;
5347   int num_humongous_chunks;
5348 };
5349 
5350 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5351   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5352   ChunkManager::ChunkManagerStatistics stat;
5353   chunk_manager->get_statistics(&stat);
5354   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5355   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5356   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5357   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5358 }
5359 
5360 struct chunk_geometry_t {
5361   size_t specialized_chunk_word_size;
5362   size_t small_chunk_word_size;
5363   size_t medium_chunk_word_size;
5364 };
5365 
5366 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5367   if (mdType == Metaspace::NonClassType) {
5368     out->specialized_chunk_word_size = SpecializedChunk;
5369     out->small_chunk_word_size = SmallChunk;
5370     out->medium_chunk_word_size = MediumChunk;
5371   } else {
5372     out->specialized_chunk_word_size = ClassSpecializedChunk;
5373     out->small_chunk_word_size = ClassSmallChunk;
5374     out->medium_chunk_word_size = ClassMediumChunk;
5375   }
5376 }
5377