1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = false;
  60 
  61 size_t const allocation_from_dictionary_limit = 4 * K;
  62 
  63 MetaWord* last_allocated = 0;
  64 
  65 size_t Metaspace::_compressed_class_space_size;
  66 const MetaspaceTracer* Metaspace::_tracer = NULL;
  67 
  68 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  69 
  70 // Used in declarations in SpaceManager and ChunkManager
  71 enum ChunkIndex {
  72   ZeroIndex = 0,
  73   SpecializedIndex = ZeroIndex,
  74   SmallIndex = SpecializedIndex + 1,
  75   MediumIndex = SmallIndex + 1,
  76   HumongousIndex = MediumIndex + 1,
  77   NumberOfFreeLists = 3,
  78   NumberOfInUseLists = 4
  79 };
  80 
  81 // Helper, returns a descriptive name for the given index.
  82 static const char* chunk_size_name(ChunkIndex index) {
  83   switch (index) {
  84     case SpecializedIndex:
  85       return "specialized";
  86     case SmallIndex:
  87       return "small";
  88     case MediumIndex:
  89       return "medium";
  90     case HumongousIndex:
  91       return "humongous";
  92     default:
  93       return "Invalid index";
  94   }
  95 }
  96 
  97 enum ChunkSizes {    // in words.
  98   ClassSpecializedChunk = 128,
  99   SpecializedChunk = 128,
 100   ClassSmallChunk = 256,
 101   SmallChunk = 512,
 102   ClassMediumChunk = 4 * K,
 103   MediumChunk = 8 * K
 104 };
 105 
 106 static ChunkIndex next_chunk_index(ChunkIndex i) {
 107   assert(i < NumberOfInUseLists, "Out of bound");
 108   return (ChunkIndex) (i+1);
 109 }
 110 
 111 static const char* scale_unit(size_t scale) {
 112   switch(scale) {
 113     case 1: return "BYTES";
 114     case K: return "KB";
 115     case M: return "MB";
 116     case G: return "GB";
 117     default:
 118       ShouldNotReachHere();
 119       return NULL;
 120   }
 121 }
 122 
 123 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 124 uint MetaspaceGC::_shrink_factor = 0;
 125 bool MetaspaceGC::_should_concurrent_collect = false;
 126 
 127 typedef class FreeList<Metachunk> ChunkList;
 128 
 129 // Manages the global free lists of chunks.
 130 class ChunkManager : public CHeapObj<mtInternal> {
 131   friend class TestVirtualSpaceNodeTest;
 132 
 133   // Free list of chunks of different sizes.
 134   //   SpecializedChunk
 135   //   SmallChunk
 136   //   MediumChunk
 137   ChunkList _free_chunks[NumberOfFreeLists];
 138 
 139   // Return non-humongous chunk list by its index.
 140   ChunkList* free_chunks(ChunkIndex index);
 141 
 142   // Returns non-humongous chunk list for the given chunk word size.
 143   ChunkList* find_free_chunks_list(size_t word_size);
 144 
 145   //   HumongousChunk
 146   ChunkTreeDictionary _humongous_dictionary;
 147 
 148   // Returns the humongous chunk dictionary.
 149   ChunkTreeDictionary* humongous_dictionary() {
 150     return &_humongous_dictionary;
 151   }
 152 
 153   // Size, in metaspace words, of all chunks managed by this ChunkManager
 154   size_t _free_chunks_total;
 155   // Number of chunks in this ChunkManager
 156   size_t _free_chunks_count;
 157 
 158   // Update counters after a chunk was added or removed removed.
 159   void account_for_added_chunk(const Metachunk* c);
 160   void account_for_removed_chunk(const Metachunk* c);
 161 
 162   // Debug support
 163 
 164   size_t sum_free_chunks();
 165   size_t sum_free_chunks_count();
 166 
 167   void locked_verify_free_chunks_total();
 168   void slow_locked_verify_free_chunks_total() {
 169     if (metaspace_slow_verify) {
 170       locked_verify_free_chunks_total();
 171     }
 172   }
 173   void locked_verify_free_chunks_count();
 174   void slow_locked_verify_free_chunks_count() {
 175     if (metaspace_slow_verify) {
 176       locked_verify_free_chunks_count();
 177     }
 178   }
 179   void verify_free_chunks_count();
 180 
 181   struct ChunkManagerStatistics {
 182     size_t num_by_type[NumberOfFreeLists];
 183     size_t single_size_by_type[NumberOfFreeLists];
 184     size_t total_size_by_type[NumberOfFreeLists];
 185     size_t num_humongous_chunks;
 186     size_t total_size_humongous_chunks;
 187   };
 188 
 189   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 190   void get_statistics(ChunkManagerStatistics* stat) const;
 191   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 192 
 193  public:
 194 
 195   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 196       : _free_chunks_total(0), _free_chunks_count(0) {
 197     _free_chunks[SpecializedIndex].set_size(specialized_size);
 198     _free_chunks[SmallIndex].set_size(small_size);
 199     _free_chunks[MediumIndex].set_size(medium_size);
 200   }
 201 
 202   // add or delete (return) a chunk to the global freelist.
 203   Metachunk* chunk_freelist_allocate(size_t word_size);
 204 
 205   // Map a size to a list index assuming that there are lists
 206   // for special, small, medium, and humongous chunks.
 207   ChunkIndex list_index(size_t size);
 208 
 209   // Map a given index to the chunk size.
 210   size_t size_by_index(ChunkIndex index) const;
 211 
 212   // Take a chunk from the ChunkManager. The chunk is expected to be in
 213   // the chunk manager (the freelist if non-humongous, the dictionary if
 214   // humongous).
 215   void remove_chunk(Metachunk* chunk);
 216 
 217   // Return a single chunk of type index to the ChunkManager.
 218   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 219 
 220   // Add the simple linked list of chunks to the freelist of chunks
 221   // of type index.
 222   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 223 
 224   // Total of the space in the free chunks list
 225   size_t free_chunks_total_words();
 226   size_t free_chunks_total_bytes();
 227 
 228   // Number of chunks in the free chunks list
 229   size_t free_chunks_count();
 230 
 231   // Remove from a list by size.  Selects list based on size of chunk.
 232   Metachunk* free_chunks_get(size_t chunk_word_size);
 233 
 234 #define index_bounds_check(index)                                         \
 235   assert(index == SpecializedIndex ||                                     \
 236          index == SmallIndex ||                                           \
 237          index == MediumIndex ||                                          \
 238          index == HumongousIndex, "Bad index: %d", (int) index)
 239 
 240   size_t num_free_chunks(ChunkIndex index) const {
 241     index_bounds_check(index);
 242 
 243     if (index == HumongousIndex) {
 244       return _humongous_dictionary.total_free_blocks();
 245     }
 246 
 247     ssize_t count = _free_chunks[index].count();
 248     return count == -1 ? 0 : (size_t) count;
 249   }
 250 
 251   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 252     index_bounds_check(index);
 253 
 254     size_t word_size = 0;
 255     if (index == HumongousIndex) {
 256       word_size = _humongous_dictionary.total_size();
 257     } else {
 258       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 259       word_size = size_per_chunk_in_words * num_free_chunks(index);
 260     }
 261 
 262     return word_size * BytesPerWord;
 263   }
 264 
 265   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 266     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 267                                          num_free_chunks(SmallIndex),
 268                                          num_free_chunks(MediumIndex),
 269                                          num_free_chunks(HumongousIndex),
 270                                          size_free_chunks_in_bytes(SpecializedIndex),
 271                                          size_free_chunks_in_bytes(SmallIndex),
 272                                          size_free_chunks_in_bytes(MediumIndex),
 273                                          size_free_chunks_in_bytes(HumongousIndex));
 274   }
 275 
 276   // Debug support
 277   void verify();
 278   void slow_verify() {
 279     if (metaspace_slow_verify) {
 280       verify();
 281     }
 282   }
 283   void locked_verify();
 284   void slow_locked_verify() {
 285     if (metaspace_slow_verify) {
 286       locked_verify();
 287     }
 288   }
 289   void verify_free_chunks_total();
 290 
 291   void locked_print_free_chunks(outputStream* st);
 292   void locked_print_sum_free_chunks(outputStream* st);
 293 
 294   void print_on(outputStream* st) const;
 295 
 296   // Prints composition for both non-class and (if available)
 297   // class chunk manager.
 298   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 299 };
 300 
 301 class SmallBlocks : public CHeapObj<mtClass> {
 302   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 303   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 304 
 305  private:
 306   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 307 
 308   FreeList<Metablock>& list_at(size_t word_size) {
 309     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 310     return _small_lists[word_size - _small_block_min_size];
 311   }
 312 
 313  public:
 314   SmallBlocks() {
 315     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 316       uint k = i - _small_block_min_size;
 317       _small_lists[k].set_size(i);
 318     }
 319   }
 320 
 321   size_t total_size() const {
 322     size_t result = 0;
 323     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 324       uint k = i - _small_block_min_size;
 325       result = result + _small_lists[k].count() * _small_lists[k].size();
 326     }
 327     return result;
 328   }
 329 
 330   static uint small_block_max_size() { return _small_block_max_size; }
 331   static uint small_block_min_size() { return _small_block_min_size; }
 332 
 333   MetaWord* get_block(size_t word_size) {
 334     if (list_at(word_size).count() > 0) {
 335       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 336       return new_block;
 337     } else {
 338       return NULL;
 339     }
 340   }
 341   void return_block(Metablock* free_chunk, size_t word_size) {
 342     list_at(word_size).return_chunk_at_head(free_chunk, false);
 343     assert(list_at(word_size).count() > 0, "Should have a chunk");
 344   }
 345 
 346   void print_on(outputStream* st) const {
 347     st->print_cr("SmallBlocks:");
 348     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 349       uint k = i - _small_block_min_size;
 350       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 351     }
 352   }
 353 };
 354 
 355 // Used to manage the free list of Metablocks (a block corresponds
 356 // to the allocation of a quantum of metadata).
 357 class BlockFreelist : public CHeapObj<mtClass> {
 358   BlockTreeDictionary* const _dictionary;
 359   SmallBlocks* _small_blocks;
 360 
 361   // Only allocate and split from freelist if the size of the allocation
 362   // is at least 1/4th the size of the available block.
 363   const static int WasteMultiplier = 4;
 364 
 365   // Accessors
 366   BlockTreeDictionary* dictionary() const { return _dictionary; }
 367   SmallBlocks* small_blocks() {
 368     if (_small_blocks == NULL) {
 369       _small_blocks = new SmallBlocks();
 370     }
 371     return _small_blocks;
 372   }
 373 
 374  public:
 375   BlockFreelist();
 376   ~BlockFreelist();
 377 
 378   // Get and return a block to the free list
 379   MetaWord* get_block(size_t word_size);
 380   void return_block(MetaWord* p, size_t word_size);
 381 
 382   size_t total_size() const  {
 383     size_t result = dictionary()->total_size();
 384     if (_small_blocks != NULL) {
 385       result = result + _small_blocks->total_size();
 386     }
 387     return result;
 388   }
 389 
 390   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 391   void print_on(outputStream* st) const;
 392 };
 393 
 394 // A VirtualSpaceList node.
 395 class VirtualSpaceNode : public CHeapObj<mtClass> {
 396   friend class VirtualSpaceList;
 397 
 398   // Link to next VirtualSpaceNode
 399   VirtualSpaceNode* _next;
 400 
 401   // total in the VirtualSpace
 402   MemRegion _reserved;
 403   ReservedSpace _rs;
 404   VirtualSpace _virtual_space;
 405   MetaWord* _top;
 406   // count of chunks contained in this VirtualSpace
 407   uintx _container_count;
 408 
 409   // Convenience functions to access the _virtual_space
 410   char* low()  const { return virtual_space()->low(); }
 411   char* high() const { return virtual_space()->high(); }
 412 
 413   // The first Metachunk will be allocated at the bottom of the
 414   // VirtualSpace
 415   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 416 
 417   // Committed but unused space in the virtual space
 418   size_t free_words_in_vs() const;
 419  public:
 420 
 421   VirtualSpaceNode(size_t byte_size);
 422   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 423   ~VirtualSpaceNode();
 424 
 425   // Convenience functions for logical bottom and end
 426   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 427   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 428 
 429   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 430 
 431   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 432   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 433 
 434   bool is_pre_committed() const { return _virtual_space.special(); }
 435 
 436   // address of next available space in _virtual_space;
 437   // Accessors
 438   VirtualSpaceNode* next() { return _next; }
 439   void set_next(VirtualSpaceNode* v) { _next = v; }
 440 
 441   void set_reserved(MemRegion const v) { _reserved = v; }
 442   void set_top(MetaWord* v) { _top = v; }
 443 
 444   // Accessors
 445   MemRegion* reserved() { return &_reserved; }
 446   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 447 
 448   // Returns true if "word_size" is available in the VirtualSpace
 449   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 450 
 451   MetaWord* top() const { return _top; }
 452   void inc_top(size_t word_size) { _top += word_size; }
 453 
 454   uintx container_count() { return _container_count; }
 455   void inc_container_count();
 456   void dec_container_count();
 457 #ifdef ASSERT
 458   uintx container_count_slow();
 459   void verify_container_count();
 460 #endif
 461 
 462   // used and capacity in this single entry in the list
 463   size_t used_words_in_vs() const;
 464   size_t capacity_words_in_vs() const;
 465 
 466   bool initialize();
 467 
 468   // get space from the virtual space
 469   Metachunk* take_from_committed(size_t chunk_word_size);
 470 
 471   // Allocate a chunk from the virtual space and return it.
 472   Metachunk* get_chunk_vs(size_t chunk_word_size);
 473 
 474   // Expands/shrinks the committed space in a virtual space.  Delegates
 475   // to Virtualspace
 476   bool expand_by(size_t min_words, size_t preferred_words);
 477 
 478   // In preparation for deleting this node, remove all the chunks
 479   // in the node from any freelist.
 480   void purge(ChunkManager* chunk_manager);
 481 
 482   // If an allocation doesn't fit in the current node a new node is created.
 483   // Allocate chunks out of the remaining committed space in this node
 484   // to avoid wasting that memory.
 485   // This always adds up because all the chunk sizes are multiples of
 486   // the smallest chunk size.
 487   void retire(ChunkManager* chunk_manager);
 488 
 489 #ifdef ASSERT
 490   // Debug support
 491   void mangle();
 492 #endif
 493 
 494   void print_on(outputStream* st) const;
 495   void print_map(outputStream* st, bool is_class) const;
 496 };
 497 
 498 #define assert_is_aligned(value, alignment)                  \
 499   assert(is_aligned((value), (alignment)),                   \
 500          SIZE_FORMAT_HEX " is not aligned to "               \
 501          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 502 
 503 // Decide if large pages should be committed when the memory is reserved.
 504 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 505   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 506     size_t words = bytes / BytesPerWord;
 507     bool is_class = false; // We never reserve large pages for the class space.
 508     if (MetaspaceGC::can_expand(words, is_class) &&
 509         MetaspaceGC::allowed_expansion() >= words) {
 510       return true;
 511     }
 512   }
 513 
 514   return false;
 515 }
 516 
 517   // byte_size is the size of the associated virtualspace.
 518 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 519   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 520   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 521   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 522 
 523   if (_rs.is_reserved()) {
 524     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 525     assert(_rs.size() != 0, "Catch if we get a 0 size");
 526     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 527     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 528 
 529     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 530   }
 531 }
 532 
 533 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 534   Metachunk* chunk = first_chunk();
 535   Metachunk* invalid_chunk = (Metachunk*) top();
 536   while (chunk < invalid_chunk ) {
 537     assert(chunk->is_tagged_free(), "Should be tagged free");
 538     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 539     chunk_manager->remove_chunk(chunk);
 540     assert(chunk->next() == NULL &&
 541            chunk->prev() == NULL,
 542            "Was not removed from its list");
 543     chunk = (Metachunk*) next;
 544   }
 545 }
 546 
 547 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 548 
 549   // Format:
 550   // <ptr>
 551   // <ptr>  . .. .               .  ..
 552   //        SSxSSMMMMMMMMMMMMMMMMsssXX
 553   //        112114444444444444444
 554   // <ptr>  . .. .               .  ..
 555   //        SSxSSMMMMMMMMMMMMMMMMsssXX
 556   //        112114444444444444444
 557 
 558   if (bottom() == top()) {
 559     return;
 560   }
 561 
 562   // First line: dividers for every med-chunk-sized interval
 563   // Second line: a dot for the start of a chunk
 564   // Third line: a letter per chunk type (x,s,m,h), uppercase if in use.
 565 
 566   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 567   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 568   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 569 
 570   int line_len = 100;
 571   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 572   line_len = (int)(section_len / spec_chunk_size);
 573 
 574   char* line1 = (char*)os::malloc(line_len, mtInternal);
 575   char* line2 = (char*)os::malloc(line_len, mtInternal);
 576   char* line3 = (char*)os::malloc(line_len, mtInternal);
 577   int pos = 0;
 578   const MetaWord* p = bottom();
 579   const Metachunk* chunk = (const Metachunk*)p;
 580   const MetaWord* chunk_end = p + chunk->word_size();
 581   while (p < top()) {
 582     if (pos == line_len) {
 583       pos = 0;
 584       st->fill_to(22);
 585       st->print_raw(line1, line_len);
 586       st->cr();
 587       st->fill_to(22);
 588       st->print_raw(line2, line_len);
 589       st->cr();
 590     }
 591     if (pos == 0) {
 592       st->print(PTR_FORMAT ":", p2i(p));
 593     }
 594     if (p == chunk_end) {
 595       chunk = (Metachunk*)p;
 596       chunk_end = p + chunk->word_size();
 597     }
 598     if (p == (const MetaWord*)chunk) {
 599       // chunk starts.
 600       line1[pos] = '.';
 601     } else {
 602       line1[pos] = ' ';
 603     }
 604     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 605     // chunk is in use.
 606     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 607     if (chunk->word_size() == spec_chunk_size) {
 608       line2[pos] = chunk_is_free ? 'x' : 'X';
 609     } else if (chunk->word_size() == small_chunk_size) {
 610       line2[pos] = chunk_is_free ? 's' : 'S';
 611     } else if (chunk->word_size() == med_chunk_size) {
 612       line2[pos] = chunk_is_free ? 'm' : 'M';
 613    } else if (chunk->word_size() > med_chunk_size) {
 614       line2[pos] = chunk_is_free ? 'h' : 'H';
 615     } else {
 616       ShouldNotReachHere();
 617     }
 618     p += spec_chunk_size;
 619     pos ++;
 620   }
 621   if (pos > 0) {
 622     st->fill_to(22);
 623     st->print_raw(line1, pos);
 624     st->cr();
 625     st->fill_to(22);
 626     st->print_raw(line2, pos);
 627     st->cr();
 628   }
 629   os::free(line1);
 630   os::free(line2);
 631   os::free(line3);
 632 }
 633 
 634 
 635 #ifdef ASSERT
 636 uintx VirtualSpaceNode::container_count_slow() {
 637   uintx count = 0;
 638   Metachunk* chunk = first_chunk();
 639   Metachunk* invalid_chunk = (Metachunk*) top();
 640   while (chunk < invalid_chunk ) {
 641     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 642     // Don't count the chunks on the free lists.  Those are
 643     // still part of the VirtualSpaceNode but not currently
 644     // counted.
 645     if (!chunk->is_tagged_free()) {
 646       count++;
 647     }
 648     chunk = (Metachunk*) next;
 649   }
 650   return count;
 651 }
 652 #endif
 653 
 654 // List of VirtualSpaces for metadata allocation.
 655 class VirtualSpaceList : public CHeapObj<mtClass> {
 656   friend class VirtualSpaceNode;
 657 
 658   enum VirtualSpaceSizes {
 659     VirtualSpaceSize = 256 * K
 660   };
 661 
 662   // Head of the list
 663   VirtualSpaceNode* _virtual_space_list;
 664   // virtual space currently being used for allocations
 665   VirtualSpaceNode* _current_virtual_space;
 666 
 667   // Is this VirtualSpaceList used for the compressed class space
 668   bool _is_class;
 669 
 670   // Sum of reserved and committed memory in the virtual spaces
 671   size_t _reserved_words;
 672   size_t _committed_words;
 673 
 674   // Number of virtual spaces
 675   size_t _virtual_space_count;
 676 
 677   ~VirtualSpaceList();
 678 
 679   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 680 
 681   void set_virtual_space_list(VirtualSpaceNode* v) {
 682     _virtual_space_list = v;
 683   }
 684   void set_current_virtual_space(VirtualSpaceNode* v) {
 685     _current_virtual_space = v;
 686   }
 687 
 688   void link_vs(VirtualSpaceNode* new_entry);
 689 
 690   // Get another virtual space and add it to the list.  This
 691   // is typically prompted by a failed attempt to allocate a chunk
 692   // and is typically followed by the allocation of a chunk.
 693   bool create_new_virtual_space(size_t vs_word_size);
 694 
 695   // Chunk up the unused committed space in the current
 696   // virtual space and add the chunks to the free list.
 697   void retire_current_virtual_space();
 698 
 699  public:
 700   VirtualSpaceList(size_t word_size);
 701   VirtualSpaceList(ReservedSpace rs);
 702 
 703   size_t free_bytes();
 704 
 705   Metachunk* get_new_chunk(size_t chunk_word_size,
 706                            size_t suggested_commit_granularity);
 707 
 708   bool expand_node_by(VirtualSpaceNode* node,
 709                       size_t min_words,
 710                       size_t preferred_words);
 711 
 712   bool expand_by(size_t min_words,
 713                  size_t preferred_words);
 714 
 715   VirtualSpaceNode* current_virtual_space() {
 716     return _current_virtual_space;
 717   }
 718 
 719   bool is_class() const { return _is_class; }
 720 
 721   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 722 
 723   size_t reserved_words()  { return _reserved_words; }
 724   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 725   size_t committed_words() { return _committed_words; }
 726   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 727 
 728   void inc_reserved_words(size_t v);
 729   void dec_reserved_words(size_t v);
 730   void inc_committed_words(size_t v);
 731   void dec_committed_words(size_t v);
 732   void inc_virtual_space_count();
 733   void dec_virtual_space_count();
 734 
 735   bool contains(const void* ptr);
 736 
 737   // Unlink empty VirtualSpaceNodes and free it.
 738   void purge(ChunkManager* chunk_manager);
 739 
 740   void print_on(outputStream* st) const;
 741   void print_map(outputStream* st) const;
 742 
 743   class VirtualSpaceListIterator : public StackObj {
 744     VirtualSpaceNode* _virtual_spaces;
 745    public:
 746     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 747       _virtual_spaces(virtual_spaces) {}
 748 
 749     bool repeat() {
 750       return _virtual_spaces != NULL;
 751     }
 752 
 753     VirtualSpaceNode* get_next() {
 754       VirtualSpaceNode* result = _virtual_spaces;
 755       if (_virtual_spaces != NULL) {
 756         _virtual_spaces = _virtual_spaces->next();
 757       }
 758       return result;
 759     }
 760   };
 761 };
 762 
 763 class Metadebug : AllStatic {
 764   // Debugging support for Metaspaces
 765   static int _allocation_fail_alot_count;
 766 
 767  public:
 768 
 769   static void init_allocation_fail_alot_count();
 770 #ifdef ASSERT
 771   static bool test_metadata_failure();
 772 #endif
 773 };
 774 
 775 int Metadebug::_allocation_fail_alot_count = 0;
 776 
 777 //  SpaceManager - used by Metaspace to handle allocations
 778 class SpaceManager : public CHeapObj<mtClass> {
 779   friend class Metaspace;
 780   friend class Metadebug;
 781 
 782  private:
 783 
 784   // protects allocations
 785   Mutex* const _lock;
 786 
 787   // Type of metadata allocated.
 788   Metaspace::MetadataType _mdtype;
 789 
 790   // List of chunks in use by this SpaceManager.  Allocations
 791   // are done from the current chunk.  The list is used for deallocating
 792   // chunks when the SpaceManager is freed.
 793   Metachunk* _chunks_in_use[NumberOfInUseLists];
 794   Metachunk* _current_chunk;
 795 
 796   // Maximum number of small chunks to allocate to a SpaceManager
 797   static uint const _small_chunk_limit;
 798 
 799   // Sum of all space in allocated chunks
 800   size_t _allocated_blocks_words;
 801 
 802   // Sum of all allocated chunks
 803   size_t _allocated_chunks_words;
 804   size_t _allocated_chunks_count;
 805 
 806   // Free lists of blocks are per SpaceManager since they
 807   // are assumed to be in chunks in use by the SpaceManager
 808   // and all chunks in use by a SpaceManager are freed when
 809   // the class loader using the SpaceManager is collected.
 810   BlockFreelist* _block_freelists;
 811 
 812   // protects virtualspace and chunk expansions
 813   static const char*  _expand_lock_name;
 814   static const int    _expand_lock_rank;
 815   static Mutex* const _expand_lock;
 816 
 817  private:
 818   // Accessors
 819   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 820   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 821     _chunks_in_use[index] = v;
 822   }
 823 
 824   BlockFreelist* block_freelists() const { return _block_freelists; }
 825 
 826   Metaspace::MetadataType mdtype() { return _mdtype; }
 827 
 828   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 829   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 830 
 831   Metachunk* current_chunk() const { return _current_chunk; }
 832   void set_current_chunk(Metachunk* v) {
 833     _current_chunk = v;
 834   }
 835 
 836   Metachunk* find_current_chunk(size_t word_size);
 837 
 838   // Add chunk to the list of chunks in use
 839   void add_chunk(Metachunk* v, bool make_current);
 840   void retire_current_chunk();
 841 
 842   Mutex* lock() const { return _lock; }
 843 
 844  protected:
 845   void initialize();
 846 
 847  public:
 848   SpaceManager(Metaspace::MetadataType mdtype,
 849                Mutex* lock);
 850   ~SpaceManager();
 851 
 852   enum ChunkMultiples {
 853     MediumChunkMultiple = 4
 854   };
 855 
 856   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 857   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 858   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 859 
 860   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 861 
 862   // Accessors
 863   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 864 
 865   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 866   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 867   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 868 
 869   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 870 
 871   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 872 
 873   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 874   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 875   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 876   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 877   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 878 
 879   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 880 
 881   static Mutex* expand_lock() { return _expand_lock; }
 882 
 883   // Increment the per Metaspace and global running sums for Metachunks
 884   // by the given size.  This is used when a Metachunk to added to
 885   // the in-use list.
 886   void inc_size_metrics(size_t words);
 887   // Increment the per Metaspace and global running sums Metablocks by the given
 888   // size.  This is used when a Metablock is allocated.
 889   void inc_used_metrics(size_t words);
 890   // Delete the portion of the running sums for this SpaceManager. That is,
 891   // the globals running sums for the Metachunks and Metablocks are
 892   // decremented for all the Metachunks in-use by this SpaceManager.
 893   void dec_total_from_size_metrics();
 894 
 895   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 896   // or return the unadjusted size if the requested size is humongous.
 897   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 898   size_t adjust_initial_chunk_size(size_t requested) const;
 899 
 900   // Get the initial chunks size for this metaspace type.
 901   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 902 
 903   size_t sum_capacity_in_chunks_in_use() const;
 904   size_t sum_used_in_chunks_in_use() const;
 905   size_t sum_free_in_chunks_in_use() const;
 906   size_t sum_waste_in_chunks_in_use() const;
 907   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 908 
 909   size_t sum_count_in_chunks_in_use();
 910   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 911 
 912   Metachunk* get_new_chunk(size_t chunk_word_size);
 913 
 914   // Block allocation and deallocation.
 915   // Allocates a block from the current chunk
 916   MetaWord* allocate(size_t word_size);
 917   // Allocates a block from a small chunk
 918   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 919 
 920   // Helper for allocations
 921   MetaWord* allocate_work(size_t word_size);
 922 
 923   // Returns a block to the per manager freelist
 924   void deallocate(MetaWord* p, size_t word_size);
 925 
 926   // Based on the allocation size and a minimum chunk size,
 927   // returned chunk size (for expanding space for chunk allocation).
 928   size_t calc_chunk_size(size_t allocation_word_size);
 929 
 930   // Called when an allocation from the current chunk fails.
 931   // Gets a new chunk (may require getting a new virtual space),
 932   // and allocates from that chunk.
 933   MetaWord* grow_and_allocate(size_t word_size);
 934 
 935   // Notify memory usage to MemoryService.
 936   void track_metaspace_memory_usage();
 937 
 938   // debugging support.
 939 
 940   void dump(outputStream* const out) const;
 941   void print_on(outputStream* st) const;
 942   void locked_print_chunks_in_use_on(outputStream* st) const;
 943 
 944   void verify();
 945   void verify_chunk_size(Metachunk* chunk);
 946 #ifdef ASSERT
 947   void verify_allocated_blocks_words();
 948 #endif
 949 
 950   // This adjusts the size given to be greater than the minimum allocation size in
 951   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 952   size_t get_allocation_word_size(size_t word_size) {
 953     size_t byte_size = word_size * BytesPerWord;
 954 
 955     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 956     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 957 
 958     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 959     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 960 
 961     return raw_word_size;
 962   }
 963 };
 964 
 965 uint const SpaceManager::_small_chunk_limit = 4;
 966 
 967 const char* SpaceManager::_expand_lock_name =
 968   "SpaceManager chunk allocation lock";
 969 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 970 Mutex* const SpaceManager::_expand_lock =
 971   new Mutex(SpaceManager::_expand_lock_rank,
 972             SpaceManager::_expand_lock_name,
 973             Mutex::_allow_vm_block_flag,
 974             Monitor::_safepoint_check_never);
 975 
 976 void VirtualSpaceNode::inc_container_count() {
 977   assert_lock_strong(SpaceManager::expand_lock());
 978   _container_count++;
 979 }
 980 
 981 void VirtualSpaceNode::dec_container_count() {
 982   assert_lock_strong(SpaceManager::expand_lock());
 983   _container_count--;
 984 }
 985 
 986 #ifdef ASSERT
 987 void VirtualSpaceNode::verify_container_count() {
 988   assert(_container_count == container_count_slow(),
 989          "Inconsistency in container_count _container_count " UINTX_FORMAT
 990          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 991 }
 992 #endif
 993 
 994 // BlockFreelist methods
 995 
 996 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 997 
 998 BlockFreelist::~BlockFreelist() {
 999   delete _dictionary;
1000   if (_small_blocks != NULL) {
1001     delete _small_blocks;
1002   }
1003 }
1004 
1005 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1006   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1007 
1008   Metablock* free_chunk = ::new (p) Metablock(word_size);
1009   if (word_size < SmallBlocks::small_block_max_size()) {
1010     small_blocks()->return_block(free_chunk, word_size);
1011   } else {
1012   dictionary()->return_chunk(free_chunk);
1013 }
1014   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1015             SIZE_FORMAT, p2i(free_chunk), word_size);
1016 }
1017 
1018 MetaWord* BlockFreelist::get_block(size_t word_size) {
1019   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1020 
1021   // Try small_blocks first.
1022   if (word_size < SmallBlocks::small_block_max_size()) {
1023     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1024     // this space manager.
1025     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1026     if (new_block != NULL) {
1027       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1028               p2i(new_block), word_size);
1029       return new_block;
1030     }
1031   }
1032 
1033   if (word_size < BlockFreelist::min_dictionary_size()) {
1034     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1035     return NULL;
1036   }
1037 
1038   Metablock* free_block = dictionary()->get_chunk(word_size);
1039   if (free_block == NULL) {
1040     return NULL;
1041   }
1042 
1043   const size_t block_size = free_block->size();
1044   if (block_size > WasteMultiplier * word_size) {
1045     return_block((MetaWord*)free_block, block_size);
1046     return NULL;
1047   }
1048 
1049   MetaWord* new_block = (MetaWord*)free_block;
1050   assert(block_size >= word_size, "Incorrect size of block from freelist");
1051   const size_t unused = block_size - word_size;
1052   if (unused >= SmallBlocks::small_block_min_size()) {
1053     return_block(new_block + word_size, unused);
1054   }
1055 
1056   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1057             p2i(new_block), word_size);
1058   return new_block;
1059 }
1060 
1061 void BlockFreelist::print_on(outputStream* st) const {
1062   dictionary()->print_free_lists(st);
1063   if (_small_blocks != NULL) {
1064     _small_blocks->print_on(st);
1065   }
1066 }
1067 
1068 // VirtualSpaceNode methods
1069 
1070 VirtualSpaceNode::~VirtualSpaceNode() {
1071   _rs.release();
1072 #ifdef ASSERT
1073   size_t word_size = sizeof(*this) / BytesPerWord;
1074   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1075 #endif
1076 }
1077 
1078 size_t VirtualSpaceNode::used_words_in_vs() const {
1079   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1080 }
1081 
1082 // Space committed in the VirtualSpace
1083 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1084   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1085 }
1086 
1087 size_t VirtualSpaceNode::free_words_in_vs() const {
1088   return pointer_delta(end(), top(), sizeof(MetaWord));
1089 }
1090 
1091 // Allocates the chunk from the virtual space only.
1092 // This interface is also used internally for debugging.  Not all
1093 // chunks removed here are necessarily used for allocation.
1094 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1095   // Bottom of the new chunk
1096   MetaWord* chunk_limit = top();
1097   assert(chunk_limit != NULL, "Not safe to call this method");
1098 
1099   // The virtual spaces are always expanded by the
1100   // commit granularity to enforce the following condition.
1101   // Without this the is_available check will not work correctly.
1102   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1103       "The committed memory doesn't match the expanded memory.");
1104 
1105   if (!is_available(chunk_word_size)) {
1106     LogTarget(Debug, gc, metaspace, freelist) lt;
1107     if (lt.is_enabled()) {
1108       LogStream ls(lt);
1109       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1110       // Dump some information about the virtual space that is nearly full
1111       print_on(&ls);
1112     }
1113     return NULL;
1114   }
1115 
1116   // Take the space  (bump top on the current virtual space).
1117   inc_top(chunk_word_size);
1118 
1119   // Initialize the chunk
1120   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1121   return result;
1122 }
1123 
1124 
1125 // Expand the virtual space (commit more of the reserved space)
1126 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1127   size_t min_bytes = min_words * BytesPerWord;
1128   size_t preferred_bytes = preferred_words * BytesPerWord;
1129 
1130   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1131 
1132   if (uncommitted < min_bytes) {
1133     return false;
1134   }
1135 
1136   size_t commit = MIN2(preferred_bytes, uncommitted);
1137   bool result = virtual_space()->expand_by(commit, false);
1138 
1139   assert(result, "Failed to commit memory");
1140 
1141   return result;
1142 }
1143 
1144 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1145   assert_lock_strong(SpaceManager::expand_lock());
1146   Metachunk* result = take_from_committed(chunk_word_size);
1147   if (result != NULL) {
1148     inc_container_count();
1149   }
1150   return result;
1151 }
1152 
1153 bool VirtualSpaceNode::initialize() {
1154 
1155   if (!_rs.is_reserved()) {
1156     return false;
1157   }
1158 
1159   // These are necessary restriction to make sure that the virtual space always
1160   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1161   // aligned only the middle alignment of the VirtualSpace is used.
1162   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1163   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1164 
1165   // ReservedSpaces marked as special will have the entire memory
1166   // pre-committed. Setting a committed size will make sure that
1167   // committed_size and actual_committed_size agrees.
1168   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1169 
1170   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1171                                             Metaspace::commit_alignment());
1172   if (result) {
1173     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1174         "Checking that the pre-committed memory was registered by the VirtualSpace");
1175 
1176     set_top((MetaWord*)virtual_space()->low());
1177     set_reserved(MemRegion((HeapWord*)_rs.base(),
1178                  (HeapWord*)(_rs.base() + _rs.size())));
1179 
1180     assert(reserved()->start() == (HeapWord*) _rs.base(),
1181            "Reserved start was not set properly " PTR_FORMAT
1182            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1183     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1184            "Reserved size was not set properly " SIZE_FORMAT
1185            " != " SIZE_FORMAT, reserved()->word_size(),
1186            _rs.size() / BytesPerWord);
1187   }
1188 
1189   return result;
1190 }
1191 
1192 void VirtualSpaceNode::print_on(outputStream* st) const {
1193   size_t used = used_words_in_vs();
1194   size_t capacity = capacity_words_in_vs();
1195   VirtualSpace* vs = virtual_space();
1196   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1197            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1198            PTR_FORMAT ", " PTR_FORMAT ")",
1199            p2i(vs), capacity / K,
1200            capacity == 0 ? 0 : used * 100 / capacity,
1201            p2i(bottom()), p2i(top()), p2i(end()),
1202            p2i(vs->high_boundary()));
1203 }
1204 
1205 #ifdef ASSERT
1206 void VirtualSpaceNode::mangle() {
1207   size_t word_size = capacity_words_in_vs();
1208   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1209 }
1210 #endif // ASSERT
1211 
1212 // VirtualSpaceList methods
1213 // Space allocated from the VirtualSpace
1214 
1215 VirtualSpaceList::~VirtualSpaceList() {
1216   VirtualSpaceListIterator iter(virtual_space_list());
1217   while (iter.repeat()) {
1218     VirtualSpaceNode* vsl = iter.get_next();
1219     delete vsl;
1220   }
1221 }
1222 
1223 void VirtualSpaceList::inc_reserved_words(size_t v) {
1224   assert_lock_strong(SpaceManager::expand_lock());
1225   _reserved_words = _reserved_words + v;
1226 }
1227 void VirtualSpaceList::dec_reserved_words(size_t v) {
1228   assert_lock_strong(SpaceManager::expand_lock());
1229   _reserved_words = _reserved_words - v;
1230 }
1231 
1232 #define assert_committed_below_limit()                        \
1233   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1234          "Too much committed memory. Committed: " SIZE_FORMAT \
1235          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1236          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1237 
1238 void VirtualSpaceList::inc_committed_words(size_t v) {
1239   assert_lock_strong(SpaceManager::expand_lock());
1240   _committed_words = _committed_words + v;
1241 
1242   assert_committed_below_limit();
1243 }
1244 void VirtualSpaceList::dec_committed_words(size_t v) {
1245   assert_lock_strong(SpaceManager::expand_lock());
1246   _committed_words = _committed_words - v;
1247 
1248   assert_committed_below_limit();
1249 }
1250 
1251 void VirtualSpaceList::inc_virtual_space_count() {
1252   assert_lock_strong(SpaceManager::expand_lock());
1253   _virtual_space_count++;
1254 }
1255 void VirtualSpaceList::dec_virtual_space_count() {
1256   assert_lock_strong(SpaceManager::expand_lock());
1257   _virtual_space_count--;
1258 }
1259 
1260 void ChunkManager::remove_chunk(Metachunk* chunk) {
1261   size_t word_size = chunk->word_size();
1262   ChunkIndex index = list_index(word_size);
1263   if (index != HumongousIndex) {
1264     free_chunks(index)->remove_chunk(chunk);
1265   } else {
1266     humongous_dictionary()->remove_chunk(chunk);
1267   }
1268 
1269   // Chunk has been removed from the chunks free list, update counters.
1270   account_for_removed_chunk(chunk);
1271 }
1272 
1273 // Walk the list of VirtualSpaceNodes and delete
1274 // nodes with a 0 container_count.  Remove Metachunks in
1275 // the node from their respective freelists.
1276 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1277   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1278   assert_lock_strong(SpaceManager::expand_lock());
1279   // Don't use a VirtualSpaceListIterator because this
1280   // list is being changed and a straightforward use of an iterator is not safe.
1281   VirtualSpaceNode* purged_vsl = NULL;
1282   VirtualSpaceNode* prev_vsl = virtual_space_list();
1283   VirtualSpaceNode* next_vsl = prev_vsl;
1284   while (next_vsl != NULL) {
1285     VirtualSpaceNode* vsl = next_vsl;
1286     DEBUG_ONLY(vsl->verify_container_count();)
1287     next_vsl = vsl->next();
1288     // Don't free the current virtual space since it will likely
1289     // be needed soon.
1290     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1291       // Unlink it from the list
1292       if (prev_vsl == vsl) {
1293         // This is the case of the current node being the first node.
1294         assert(vsl == virtual_space_list(), "Expected to be the first node");
1295         set_virtual_space_list(vsl->next());
1296       } else {
1297         prev_vsl->set_next(vsl->next());
1298       }
1299 
1300       vsl->purge(chunk_manager);
1301       dec_reserved_words(vsl->reserved_words());
1302       dec_committed_words(vsl->committed_words());
1303       dec_virtual_space_count();
1304       purged_vsl = vsl;
1305       delete vsl;
1306     } else {
1307       prev_vsl = vsl;
1308     }
1309   }
1310 #ifdef ASSERT
1311   if (purged_vsl != NULL) {
1312     // List should be stable enough to use an iterator here.
1313     VirtualSpaceListIterator iter(virtual_space_list());
1314     while (iter.repeat()) {
1315       VirtualSpaceNode* vsl = iter.get_next();
1316       assert(vsl != purged_vsl, "Purge of vsl failed");
1317     }
1318   }
1319 #endif
1320 }
1321 
1322 
1323 // This function looks at the mmap regions in the metaspace without locking.
1324 // The chunks are added with store ordering and not deleted except for at
1325 // unloading time during a safepoint.
1326 bool VirtualSpaceList::contains(const void* ptr) {
1327   // List should be stable enough to use an iterator here because removing virtual
1328   // space nodes is only allowed at a safepoint.
1329   VirtualSpaceListIterator iter(virtual_space_list());
1330   while (iter.repeat()) {
1331     VirtualSpaceNode* vsn = iter.get_next();
1332     if (vsn->contains(ptr)) {
1333       return true;
1334     }
1335   }
1336   return false;
1337 }
1338 
1339 void VirtualSpaceList::retire_current_virtual_space() {
1340   assert_lock_strong(SpaceManager::expand_lock());
1341 
1342   VirtualSpaceNode* vsn = current_virtual_space();
1343 
1344   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1345                                   Metaspace::chunk_manager_metadata();
1346 
1347   vsn->retire(cm);
1348 }
1349 
1350 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1351   DEBUG_ONLY(verify_container_count();)
1352   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1353     ChunkIndex index = (ChunkIndex)i;
1354     size_t chunk_size = chunk_manager->size_by_index(index);
1355 
1356     while (free_words_in_vs() >= chunk_size) {
1357       Metachunk* chunk = get_chunk_vs(chunk_size);
1358       assert(chunk != NULL, "allocation should have been successful");
1359 
1360       chunk_manager->return_single_chunk(index, chunk);
1361     }
1362     DEBUG_ONLY(verify_container_count();)
1363   }
1364   assert(free_words_in_vs() == 0, "should be empty now");
1365 }
1366 
1367 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1368                                    _is_class(false),
1369                                    _virtual_space_list(NULL),
1370                                    _current_virtual_space(NULL),
1371                                    _reserved_words(0),
1372                                    _committed_words(0),
1373                                    _virtual_space_count(0) {
1374   MutexLockerEx cl(SpaceManager::expand_lock(),
1375                    Mutex::_no_safepoint_check_flag);
1376   create_new_virtual_space(word_size);
1377 }
1378 
1379 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1380                                    _is_class(true),
1381                                    _virtual_space_list(NULL),
1382                                    _current_virtual_space(NULL),
1383                                    _reserved_words(0),
1384                                    _committed_words(0),
1385                                    _virtual_space_count(0) {
1386   MutexLockerEx cl(SpaceManager::expand_lock(),
1387                    Mutex::_no_safepoint_check_flag);
1388   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1389   bool succeeded = class_entry->initialize();
1390   if (succeeded) {
1391     link_vs(class_entry);
1392   }
1393 }
1394 
1395 size_t VirtualSpaceList::free_bytes() {
1396   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
1397 }
1398 
1399 // Allocate another meta virtual space and add it to the list.
1400 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1401   assert_lock_strong(SpaceManager::expand_lock());
1402 
1403   if (is_class()) {
1404     assert(false, "We currently don't support more than one VirtualSpace for"
1405                   " the compressed class space. The initialization of the"
1406                   " CCS uses another code path and should not hit this path.");
1407     return false;
1408   }
1409 
1410   if (vs_word_size == 0) {
1411     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1412     return false;
1413   }
1414 
1415   // Reserve the space
1416   size_t vs_byte_size = vs_word_size * BytesPerWord;
1417   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1418 
1419   // Allocate the meta virtual space and initialize it.
1420   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1421   if (!new_entry->initialize()) {
1422     delete new_entry;
1423     return false;
1424   } else {
1425     assert(new_entry->reserved_words() == vs_word_size,
1426         "Reserved memory size differs from requested memory size");
1427     // ensure lock-free iteration sees fully initialized node
1428     OrderAccess::storestore();
1429     link_vs(new_entry);
1430     return true;
1431   }
1432 }
1433 
1434 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1435   if (virtual_space_list() == NULL) {
1436       set_virtual_space_list(new_entry);
1437   } else {
1438     current_virtual_space()->set_next(new_entry);
1439   }
1440   set_current_virtual_space(new_entry);
1441   inc_reserved_words(new_entry->reserved_words());
1442   inc_committed_words(new_entry->committed_words());
1443   inc_virtual_space_count();
1444 #ifdef ASSERT
1445   new_entry->mangle();
1446 #endif
1447   LogTarget(Trace, gc, metaspace) lt;
1448   if (lt.is_enabled()) {
1449     LogStream ls(lt);
1450     VirtualSpaceNode* vsl = current_virtual_space();
1451     ResourceMark rm;
1452     vsl->print_on(&ls);
1453   }
1454 }
1455 
1456 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1457                                       size_t min_words,
1458                                       size_t preferred_words) {
1459   size_t before = node->committed_words();
1460 
1461   bool result = node->expand_by(min_words, preferred_words);
1462 
1463   size_t after = node->committed_words();
1464 
1465   // after and before can be the same if the memory was pre-committed.
1466   assert(after >= before, "Inconsistency");
1467   inc_committed_words(after - before);
1468 
1469   return result;
1470 }
1471 
1472 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1473   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1474   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1475   assert(min_words <= preferred_words, "Invalid arguments");
1476 
1477   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1478     return  false;
1479   }
1480 
1481   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1482   if (allowed_expansion_words < min_words) {
1483     return false;
1484   }
1485 
1486   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1487 
1488   // Commit more memory from the the current virtual space.
1489   bool vs_expanded = expand_node_by(current_virtual_space(),
1490                                     min_words,
1491                                     max_expansion_words);
1492   if (vs_expanded) {
1493     return true;
1494   }
1495   retire_current_virtual_space();
1496 
1497   // Get another virtual space.
1498   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1499   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1500 
1501   if (create_new_virtual_space(grow_vs_words)) {
1502     if (current_virtual_space()->is_pre_committed()) {
1503       // The memory was pre-committed, so we are done here.
1504       assert(min_words <= current_virtual_space()->committed_words(),
1505           "The new VirtualSpace was pre-committed, so it"
1506           "should be large enough to fit the alloc request.");
1507       return true;
1508     }
1509 
1510     return expand_node_by(current_virtual_space(),
1511                           min_words,
1512                           max_expansion_words);
1513   }
1514 
1515   return false;
1516 }
1517 
1518 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1519 
1520   // Allocate a chunk out of the current virtual space.
1521   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1522 
1523   if (next != NULL) {
1524     return next;
1525   }
1526 
1527   // The expand amount is currently only determined by the requested sizes
1528   // and not how much committed memory is left in the current virtual space.
1529 
1530   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1531   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1532   if (min_word_size >= preferred_word_size) {
1533     // Can happen when humongous chunks are allocated.
1534     preferred_word_size = min_word_size;
1535   }
1536 
1537   bool expanded = expand_by(min_word_size, preferred_word_size);
1538   if (expanded) {
1539     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1540     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1541   }
1542 
1543    return next;
1544 }
1545 
1546 void VirtualSpaceList::print_on(outputStream* st) const {
1547   VirtualSpaceListIterator iter(virtual_space_list());
1548   while (iter.repeat()) {
1549     VirtualSpaceNode* node = iter.get_next();
1550     node->print_on(st);
1551   }
1552 }
1553 
1554 void VirtualSpaceList::print_map(outputStream* st) const {
1555   VirtualSpaceNode* list = virtual_space_list();
1556   VirtualSpaceListIterator iter(list);
1557   unsigned i = 0;
1558   while (iter.repeat()) {
1559     st->print_cr("Node %u:", i);
1560     VirtualSpaceNode* node = iter.get_next();
1561     node->print_map(st, this->is_class());
1562     i ++;
1563   }
1564 }
1565 
1566 // MetaspaceGC methods
1567 
1568 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1569 // Within the VM operation after the GC the attempt to allocate the metadata
1570 // should succeed.  If the GC did not free enough space for the metaspace
1571 // allocation, the HWM is increased so that another virtualspace will be
1572 // allocated for the metadata.  With perm gen the increase in the perm
1573 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1574 // metaspace policy uses those as the small and large steps for the HWM.
1575 //
1576 // After the GC the compute_new_size() for MetaspaceGC is called to
1577 // resize the capacity of the metaspaces.  The current implementation
1578 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1579 // to resize the Java heap by some GC's.  New flags can be implemented
1580 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1581 // free space is desirable in the metaspace capacity to decide how much
1582 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1583 // free space is desirable in the metaspace capacity before decreasing
1584 // the HWM.
1585 
1586 // Calculate the amount to increase the high water mark (HWM).
1587 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1588 // another expansion is not requested too soon.  If that is not
1589 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1590 // If that is still not enough, expand by the size of the allocation
1591 // plus some.
1592 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1593   size_t min_delta = MinMetaspaceExpansion;
1594   size_t max_delta = MaxMetaspaceExpansion;
1595   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1596 
1597   if (delta <= min_delta) {
1598     delta = min_delta;
1599   } else if (delta <= max_delta) {
1600     // Don't want to hit the high water mark on the next
1601     // allocation so make the delta greater than just enough
1602     // for this allocation.
1603     delta = max_delta;
1604   } else {
1605     // This allocation is large but the next ones are probably not
1606     // so increase by the minimum.
1607     delta = delta + min_delta;
1608   }
1609 
1610   assert_is_aligned(delta, Metaspace::commit_alignment());
1611 
1612   return delta;
1613 }
1614 
1615 size_t MetaspaceGC::capacity_until_GC() {
1616   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
1617   assert(value >= MetaspaceSize, "Not initialized properly?");
1618   return value;
1619 }
1620 
1621 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1622   assert_is_aligned(v, Metaspace::commit_alignment());
1623 
1624   intptr_t capacity_until_GC = _capacity_until_GC;
1625   intptr_t new_value = capacity_until_GC + v;
1626 
1627   if (new_value < capacity_until_GC) {
1628     // The addition wrapped around, set new_value to aligned max value.
1629     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1630   }
1631 
1632   intptr_t expected = _capacity_until_GC;
1633   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
1634 
1635   if (expected != actual) {
1636     return false;
1637   }
1638 
1639   if (new_cap_until_GC != NULL) {
1640     *new_cap_until_GC = new_value;
1641   }
1642   if (old_cap_until_GC != NULL) {
1643     *old_cap_until_GC = capacity_until_GC;
1644   }
1645   return true;
1646 }
1647 
1648 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1649   assert_is_aligned(v, Metaspace::commit_alignment());
1650 
1651   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
1652 }
1653 
1654 void MetaspaceGC::initialize() {
1655   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1656   // we can't do a GC during initialization.
1657   _capacity_until_GC = MaxMetaspaceSize;
1658 }
1659 
1660 void MetaspaceGC::post_initialize() {
1661   // Reset the high-water mark once the VM initialization is done.
1662   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1663 }
1664 
1665 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1666   // Check if the compressed class space is full.
1667   if (is_class && Metaspace::using_class_space()) {
1668     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1669     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1670       return false;
1671     }
1672   }
1673 
1674   // Check if the user has imposed a limit on the metaspace memory.
1675   size_t committed_bytes = MetaspaceAux::committed_bytes();
1676   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1677     return false;
1678   }
1679 
1680   return true;
1681 }
1682 
1683 size_t MetaspaceGC::allowed_expansion() {
1684   size_t committed_bytes = MetaspaceAux::committed_bytes();
1685   size_t capacity_until_gc = capacity_until_GC();
1686 
1687   assert(capacity_until_gc >= committed_bytes,
1688          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1689          capacity_until_gc, committed_bytes);
1690 
1691   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1692   size_t left_until_GC = capacity_until_gc - committed_bytes;
1693   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1694 
1695   return left_to_commit / BytesPerWord;
1696 }
1697 
1698 void MetaspaceGC::compute_new_size() {
1699   assert(_shrink_factor <= 100, "invalid shrink factor");
1700   uint current_shrink_factor = _shrink_factor;
1701   _shrink_factor = 0;
1702 
1703   // Using committed_bytes() for used_after_gc is an overestimation, since the
1704   // chunk free lists are included in committed_bytes() and the memory in an
1705   // un-fragmented chunk free list is available for future allocations.
1706   // However, if the chunk free lists becomes fragmented, then the memory may
1707   // not be available for future allocations and the memory is therefore "in use".
1708   // Including the chunk free lists in the definition of "in use" is therefore
1709   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1710   // shrink below committed_bytes() and this has caused serious bugs in the past.
1711   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1712   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1713 
1714   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1715   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1716 
1717   const double min_tmp = used_after_gc / maximum_used_percentage;
1718   size_t minimum_desired_capacity =
1719     (size_t)MIN2(min_tmp, double(max_uintx));
1720   // Don't shrink less than the initial generation size
1721   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1722                                   MetaspaceSize);
1723 
1724   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1725   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1726                            minimum_free_percentage, maximum_used_percentage);
1727   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1728 
1729 
1730   size_t shrink_bytes = 0;
1731   if (capacity_until_GC < minimum_desired_capacity) {
1732     // If we have less capacity below the metaspace HWM, then
1733     // increment the HWM.
1734     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1735     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1736     // Don't expand unless it's significant
1737     if (expand_bytes >= MinMetaspaceExpansion) {
1738       size_t new_capacity_until_GC = 0;
1739       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1740       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1741 
1742       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1743                                                new_capacity_until_GC,
1744                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1745       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1746                                minimum_desired_capacity / (double) K,
1747                                expand_bytes / (double) K,
1748                                MinMetaspaceExpansion / (double) K,
1749                                new_capacity_until_GC / (double) K);
1750     }
1751     return;
1752   }
1753 
1754   // No expansion, now see if we want to shrink
1755   // We would never want to shrink more than this
1756   assert(capacity_until_GC >= minimum_desired_capacity,
1757          SIZE_FORMAT " >= " SIZE_FORMAT,
1758          capacity_until_GC, minimum_desired_capacity);
1759   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1760 
1761   // Should shrinking be considered?
1762   if (MaxMetaspaceFreeRatio < 100) {
1763     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1764     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1765     const double max_tmp = used_after_gc / minimum_used_percentage;
1766     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1767     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1768                                     MetaspaceSize);
1769     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1770                              maximum_free_percentage, minimum_used_percentage);
1771     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1772                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1773 
1774     assert(minimum_desired_capacity <= maximum_desired_capacity,
1775            "sanity check");
1776 
1777     if (capacity_until_GC > maximum_desired_capacity) {
1778       // Capacity too large, compute shrinking size
1779       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1780       // We don't want shrink all the way back to initSize if people call
1781       // System.gc(), because some programs do that between "phases" and then
1782       // we'd just have to grow the heap up again for the next phase.  So we
1783       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1784       // on the third call, and 100% by the fourth call.  But if we recompute
1785       // size without shrinking, it goes back to 0%.
1786       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1787 
1788       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1789 
1790       assert(shrink_bytes <= max_shrink_bytes,
1791              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1792              shrink_bytes, max_shrink_bytes);
1793       if (current_shrink_factor == 0) {
1794         _shrink_factor = 10;
1795       } else {
1796         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1797       }
1798       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1799                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1800       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1801                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1802     }
1803   }
1804 
1805   // Don't shrink unless it's significant
1806   if (shrink_bytes >= MinMetaspaceExpansion &&
1807       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1808     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1809     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1810                                              new_capacity_until_GC,
1811                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1812   }
1813 }
1814 
1815 // Metadebug methods
1816 
1817 void Metadebug::init_allocation_fail_alot_count() {
1818   if (MetadataAllocationFailALot) {
1819     _allocation_fail_alot_count =
1820       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1821   }
1822 }
1823 
1824 #ifdef ASSERT
1825 bool Metadebug::test_metadata_failure() {
1826   if (MetadataAllocationFailALot &&
1827       Threads::is_vm_complete()) {
1828     if (_allocation_fail_alot_count > 0) {
1829       _allocation_fail_alot_count--;
1830     } else {
1831       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1832       init_allocation_fail_alot_count();
1833       return true;
1834     }
1835   }
1836   return false;
1837 }
1838 #endif
1839 
1840 // ChunkManager methods
1841 size_t ChunkManager::free_chunks_total_words() {
1842   return _free_chunks_total;
1843 }
1844 
1845 size_t ChunkManager::free_chunks_total_bytes() {
1846   return free_chunks_total_words() * BytesPerWord;
1847 }
1848 
1849 // Update internal accounting after a chunk was added
1850 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1851   assert_lock_strong(SpaceManager::expand_lock());
1852   _free_chunks_count ++;
1853   _free_chunks_total += c->word_size();
1854 }
1855 
1856 // Update internal accounting after a chunk was removed
1857 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1858   assert_lock_strong(SpaceManager::expand_lock());
1859   assert(_free_chunks_count >= 1,
1860     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1861   assert(_free_chunks_total >= c->word_size(),
1862     "ChunkManager::_free_chunks_total: about to go negative"
1863      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1864   _free_chunks_count --;
1865   _free_chunks_total -= c->word_size();
1866 }
1867 
1868 size_t ChunkManager::free_chunks_count() {
1869 #ifdef ASSERT
1870   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1871     MutexLockerEx cl(SpaceManager::expand_lock(),
1872                      Mutex::_no_safepoint_check_flag);
1873     // This lock is only needed in debug because the verification
1874     // of the _free_chunks_totals walks the list of free chunks
1875     slow_locked_verify_free_chunks_count();
1876   }
1877 #endif
1878   return _free_chunks_count;
1879 }
1880 
1881 ChunkIndex ChunkManager::list_index(size_t size) {
1882   if (size_by_index(SpecializedIndex) == size) {
1883     return SpecializedIndex;
1884   }
1885   if (size_by_index(SmallIndex) == size) {
1886     return SmallIndex;
1887   }
1888   const size_t med_size = size_by_index(MediumIndex);
1889   if (med_size == size) {
1890     return MediumIndex;
1891   }
1892 
1893   assert(size > med_size, "Not a humongous chunk");
1894   return HumongousIndex;
1895 }
1896 
1897 size_t ChunkManager::size_by_index(ChunkIndex index) const {
1898   index_bounds_check(index);
1899   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1900   return _free_chunks[index].size();
1901 }
1902 
1903 void ChunkManager::locked_verify_free_chunks_total() {
1904   assert_lock_strong(SpaceManager::expand_lock());
1905   assert(sum_free_chunks() == _free_chunks_total,
1906          "_free_chunks_total " SIZE_FORMAT " is not the"
1907          " same as sum " SIZE_FORMAT, _free_chunks_total,
1908          sum_free_chunks());
1909 }
1910 
1911 void ChunkManager::verify_free_chunks_total() {
1912   MutexLockerEx cl(SpaceManager::expand_lock(),
1913                      Mutex::_no_safepoint_check_flag);
1914   locked_verify_free_chunks_total();
1915 }
1916 
1917 void ChunkManager::locked_verify_free_chunks_count() {
1918   assert_lock_strong(SpaceManager::expand_lock());
1919   assert(sum_free_chunks_count() == _free_chunks_count,
1920          "_free_chunks_count " SIZE_FORMAT " is not the"
1921          " same as sum " SIZE_FORMAT, _free_chunks_count,
1922          sum_free_chunks_count());
1923 }
1924 
1925 void ChunkManager::verify_free_chunks_count() {
1926 #ifdef ASSERT
1927   MutexLockerEx cl(SpaceManager::expand_lock(),
1928                      Mutex::_no_safepoint_check_flag);
1929   locked_verify_free_chunks_count();
1930 #endif
1931 }
1932 
1933 void ChunkManager::verify() {
1934   MutexLockerEx cl(SpaceManager::expand_lock(),
1935                      Mutex::_no_safepoint_check_flag);
1936   locked_verify();
1937 }
1938 
1939 void ChunkManager::locked_verify() {
1940   locked_verify_free_chunks_count();
1941   locked_verify_free_chunks_total();
1942 }
1943 
1944 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1945   assert_lock_strong(SpaceManager::expand_lock());
1946   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1947                 _free_chunks_total, _free_chunks_count);
1948 }
1949 
1950 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1951   assert_lock_strong(SpaceManager::expand_lock());
1952   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1953                 sum_free_chunks(), sum_free_chunks_count());
1954 }
1955 
1956 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1957   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1958          "Bad index: %d", (int)index);
1959 
1960   return &_free_chunks[index];
1961 }
1962 
1963 // These methods that sum the free chunk lists are used in printing
1964 // methods that are used in product builds.
1965 size_t ChunkManager::sum_free_chunks() {
1966   assert_lock_strong(SpaceManager::expand_lock());
1967   size_t result = 0;
1968   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1969     ChunkList* list = free_chunks(i);
1970 
1971     if (list == NULL) {
1972       continue;
1973     }
1974 
1975     result = result + list->count() * list->size();
1976   }
1977   result = result + humongous_dictionary()->total_size();
1978   return result;
1979 }
1980 
1981 size_t ChunkManager::sum_free_chunks_count() {
1982   assert_lock_strong(SpaceManager::expand_lock());
1983   size_t count = 0;
1984   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1985     ChunkList* list = free_chunks(i);
1986     if (list == NULL) {
1987       continue;
1988     }
1989     count = count + list->count();
1990   }
1991   count = count + humongous_dictionary()->total_free_blocks();
1992   return count;
1993 }
1994 
1995 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1996   ChunkIndex index = list_index(word_size);
1997   assert(index < HumongousIndex, "No humongous list");
1998   return free_chunks(index);
1999 }
2000 
2001 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2002   assert_lock_strong(SpaceManager::expand_lock());
2003 
2004   slow_locked_verify();
2005 
2006   Metachunk* chunk = NULL;
2007   if (list_index(word_size) != HumongousIndex) {
2008     ChunkList* free_list = find_free_chunks_list(word_size);
2009     assert(free_list != NULL, "Sanity check");
2010 
2011     chunk = free_list->head();
2012 
2013     if (chunk == NULL) {
2014       return NULL;
2015     }
2016 
2017     // Remove the chunk as the head of the list.
2018     free_list->remove_chunk(chunk);
2019 
2020     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
2021                                        p2i(free_list), p2i(chunk), chunk->word_size());
2022   } else {
2023     chunk = humongous_dictionary()->get_chunk(word_size);
2024 
2025     if (chunk == NULL) {
2026       return NULL;
2027     }
2028 
2029     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2030                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2031   }
2032 
2033   // Chunk has been removed from the chunk manager; update counters.
2034   account_for_removed_chunk(chunk);
2035 
2036   // Remove it from the links to this freelist
2037   chunk->set_next(NULL);
2038   chunk->set_prev(NULL);
2039 
2040   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
2041   // work.
2042   chunk->set_is_tagged_free(false);
2043   chunk->container()->inc_container_count();
2044 
2045   slow_locked_verify();
2046   return chunk;
2047 }
2048 
2049 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2050   assert_lock_strong(SpaceManager::expand_lock());
2051   slow_locked_verify();
2052 
2053   // Take from the beginning of the list
2054   Metachunk* chunk = free_chunks_get(word_size);
2055   if (chunk == NULL) {
2056     return NULL;
2057   }
2058 
2059   assert((word_size <= chunk->word_size()) ||
2060          (list_index(chunk->word_size()) == HumongousIndex),
2061          "Non-humongous variable sized chunk");
2062   LogTarget(Debug, gc, metaspace, freelist) lt;
2063   if (lt.is_enabled()) {
2064     size_t list_count;
2065     if (list_index(word_size) < HumongousIndex) {
2066       ChunkList* list = find_free_chunks_list(word_size);
2067       list_count = list->count();
2068     } else {
2069       list_count = humongous_dictionary()->total_count();
2070     }
2071     LogStream ls(lt);
2072     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2073              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2074     ResourceMark rm;
2075     locked_print_free_chunks(&ls);
2076   }
2077 
2078   return chunk;
2079 }
2080 
2081 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2082   assert_lock_strong(SpaceManager::expand_lock());
2083   assert(chunk != NULL, "Expected chunk.");
2084   assert(chunk->container() != NULL, "Container should have been set.");
2085   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2086   index_bounds_check(index);
2087 
2088   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2089   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2090   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2091   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
2092 
2093   if (index != HumongousIndex) {
2094     // Return non-humongous chunk to freelist.
2095     ChunkList* list = free_chunks(index);
2096     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
2097     list->return_chunk_at_head(chunk);
2098     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
2099         chunk_size_name(index), p2i(chunk));
2100   } else {
2101     // Return humongous chunk to dictionary.
2102     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2103     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2104            "Humongous chunk has wrong alignment.");
2105     _humongous_dictionary.return_chunk(chunk);
2106     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2107         chunk_size_name(index), p2i(chunk), chunk->word_size());
2108   }
2109   chunk->container()->dec_container_count();
2110   chunk->set_is_tagged_free(true);
2111 
2112   // Chunk has been added; update counters.
2113   account_for_added_chunk(chunk);
2114 
2115 }
2116 
2117 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2118   index_bounds_check(index);
2119   if (chunks == NULL) {
2120     return;
2121   }
2122   LogTarget(Trace, gc, metaspace, freelist) log;
2123   if (log.is_enabled()) { // tracing
2124     log.print("returning list of %s chunks...", chunk_size_name(index));
2125   }
2126   unsigned num_chunks_returned = 0;
2127   size_t size_chunks_returned = 0;
2128   Metachunk* cur = chunks;
2129   while (cur != NULL) {
2130     // Capture the next link before it is changed
2131     // by the call to return_chunk_at_head();
2132     Metachunk* next = cur->next();
2133     if (log.is_enabled()) { // tracing
2134       num_chunks_returned ++;
2135       size_chunks_returned += cur->word_size();
2136     }
2137     return_single_chunk(index, cur);
2138     cur = next;
2139   }
2140   if (log.is_enabled()) { // tracing
2141     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2142         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2143     if (index != HumongousIndex) {
2144       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2145     } else {
2146       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2147     }
2148   }
2149 }
2150 
2151 void ChunkManager::print_on(outputStream* out) const {
2152   _humongous_dictionary.report_statistics(out);
2153 }
2154 
2155 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
2156   assert_lock_strong(SpaceManager::expand_lock());
2157   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2158     stat->num_by_type[i] = num_free_chunks(i);
2159     stat->single_size_by_type[i] = size_by_index(i);
2160     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
2161   }
2162   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
2163   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
2164 }
2165 
2166 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
2167   MutexLockerEx cl(SpaceManager::expand_lock(),
2168                    Mutex::_no_safepoint_check_flag);
2169   locked_get_statistics(stat);
2170 }
2171 
2172 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
2173   size_t total = 0;
2174   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
2175 
2176   const char* unit = scale_unit(scale);
2177   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2178     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
2179                    stat->num_by_type[i], chunk_size_name(i),
2180                    stat->single_size_by_type[i]);
2181     if (scale == 1) {
2182       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
2183     } else {
2184       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
2185     }
2186 
2187     total += stat->total_size_by_type[i];
2188   }
2189 
2190 
2191   total += stat->total_size_humongous_chunks;
2192 
2193   if (scale == 1) {
2194     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
2195     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
2196 
2197     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
2198   } else {
2199     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
2200     stat->num_humongous_chunks,
2201     (float)stat->total_size_humongous_chunks / scale, unit);
2202 
2203     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
2204   }
2205 
2206 }
2207 
2208 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
2209   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
2210 
2211   // Note: keep lock protection only to retrieving statistics; keep printing
2212   // out of lock protection
2213   ChunkManagerStatistics stat;
2214   out->print_cr("Chunkmanager (non-class):");
2215   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
2216   if (non_class_cm != NULL) {
2217     non_class_cm->get_statistics(&stat);
2218     ChunkManager::print_statistics(&stat, out, scale);
2219   } else {
2220     out->print_cr("unavailable.");
2221   }
2222   out->print_cr("Chunkmanager (class):");
2223   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
2224   if (class_cm != NULL) {
2225     class_cm->get_statistics(&stat);
2226     ChunkManager::print_statistics(&stat, out, scale);
2227   } else {
2228     out->print_cr("unavailable.");
2229   }
2230 }
2231 
2232 // SpaceManager methods
2233 
2234 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2235   size_t chunk_sizes[] = {
2236       specialized_chunk_size(is_class_space),
2237       small_chunk_size(is_class_space),
2238       medium_chunk_size(is_class_space)
2239   };
2240 
2241   // Adjust up to one of the fixed chunk sizes ...
2242   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2243     if (requested <= chunk_sizes[i]) {
2244       return chunk_sizes[i];
2245     }
2246   }
2247 
2248   // ... or return the size as a humongous chunk.
2249   return requested;
2250 }
2251 
2252 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2253   return adjust_initial_chunk_size(requested, is_class());
2254 }
2255 
2256 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2257   size_t requested;
2258 
2259   if (is_class()) {
2260     switch (type) {
2261     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2262     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2263     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2264     default:                                 requested = ClassSmallChunk; break;
2265     }
2266   } else {
2267     switch (type) {
2268     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2269     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2270     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2271     default:                                 requested = SmallChunk; break;
2272     }
2273   }
2274 
2275   // Adjust to one of the fixed chunk sizes (unless humongous)
2276   const size_t adjusted = adjust_initial_chunk_size(requested);
2277 
2278   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2279          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2280 
2281   return adjusted;
2282 }
2283 
2284 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2285   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2286   size_t free = 0;
2287   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2288     Metachunk* chunk = chunks_in_use(i);
2289     while (chunk != NULL) {
2290       free += chunk->free_word_size();
2291       chunk = chunk->next();
2292     }
2293   }
2294   return free;
2295 }
2296 
2297 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2298   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2299   size_t result = 0;
2300   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2301    result += sum_waste_in_chunks_in_use(i);
2302   }
2303 
2304   return result;
2305 }
2306 
2307 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2308   size_t result = 0;
2309   Metachunk* chunk = chunks_in_use(index);
2310   // Count the free space in all the chunk but not the
2311   // current chunk from which allocations are still being done.
2312   while (chunk != NULL) {
2313     if (chunk != current_chunk()) {
2314       result += chunk->free_word_size();
2315     }
2316     chunk = chunk->next();
2317   }
2318   return result;
2319 }
2320 
2321 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2322   // For CMS use "allocated_chunks_words()" which does not need the
2323   // Metaspace lock.  For the other collectors sum over the
2324   // lists.  Use both methods as a check that "allocated_chunks_words()"
2325   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2326   // to use in the product and allocated_chunks_words() should be used
2327   // but allow for  checking that allocated_chunks_words() returns the same
2328   // value as sum_capacity_in_chunks_in_use() which is the definitive
2329   // answer.
2330   if (UseConcMarkSweepGC) {
2331     return allocated_chunks_words();
2332   } else {
2333     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2334     size_t sum = 0;
2335     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2336       Metachunk* chunk = chunks_in_use(i);
2337       while (chunk != NULL) {
2338         sum += chunk->word_size();
2339         chunk = chunk->next();
2340       }
2341     }
2342   return sum;
2343   }
2344 }
2345 
2346 size_t SpaceManager::sum_count_in_chunks_in_use() {
2347   size_t count = 0;
2348   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2349     count = count + sum_count_in_chunks_in_use(i);
2350   }
2351 
2352   return count;
2353 }
2354 
2355 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2356   size_t count = 0;
2357   Metachunk* chunk = chunks_in_use(i);
2358   while (chunk != NULL) {
2359     count++;
2360     chunk = chunk->next();
2361   }
2362   return count;
2363 }
2364 
2365 
2366 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2367   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2368   size_t used = 0;
2369   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2370     Metachunk* chunk = chunks_in_use(i);
2371     while (chunk != NULL) {
2372       used += chunk->used_word_size();
2373       chunk = chunk->next();
2374     }
2375   }
2376   return used;
2377 }
2378 
2379 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2380 
2381   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2382     Metachunk* chunk = chunks_in_use(i);
2383     st->print("SpaceManager: %s " PTR_FORMAT,
2384                  chunk_size_name(i), p2i(chunk));
2385     if (chunk != NULL) {
2386       st->print_cr(" free " SIZE_FORMAT,
2387                    chunk->free_word_size());
2388     } else {
2389       st->cr();
2390     }
2391   }
2392 
2393   chunk_manager()->locked_print_free_chunks(st);
2394   chunk_manager()->locked_print_sum_free_chunks(st);
2395 }
2396 
2397 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2398 
2399   // Decide between a small chunk and a medium chunk.  Up to
2400   // _small_chunk_limit small chunks can be allocated.
2401   // After that a medium chunk is preferred.
2402   size_t chunk_word_size;
2403   if (chunks_in_use(MediumIndex) == NULL &&
2404       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2405     chunk_word_size = (size_t) small_chunk_size();
2406     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2407       chunk_word_size = medium_chunk_size();
2408     }
2409   } else {
2410     chunk_word_size = medium_chunk_size();
2411   }
2412 
2413   // Might still need a humongous chunk.  Enforce
2414   // humongous allocations sizes to be aligned up to
2415   // the smallest chunk size.
2416   size_t if_humongous_sized_chunk =
2417     align_up(word_size + Metachunk::overhead(),
2418                   smallest_chunk_size());
2419   chunk_word_size =
2420     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2421 
2422   assert(!SpaceManager::is_humongous(word_size) ||
2423          chunk_word_size == if_humongous_sized_chunk,
2424          "Size calculation is wrong, word_size " SIZE_FORMAT
2425          " chunk_word_size " SIZE_FORMAT,
2426          word_size, chunk_word_size);
2427   Log(gc, metaspace, alloc) log;
2428   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2429     log.debug("Metadata humongous allocation:");
2430     log.debug("  word_size " PTR_FORMAT, word_size);
2431     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2432     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2433   }
2434   return chunk_word_size;
2435 }
2436 
2437 void SpaceManager::track_metaspace_memory_usage() {
2438   if (is_init_completed()) {
2439     if (is_class()) {
2440       MemoryService::track_compressed_class_memory_usage();
2441     }
2442     MemoryService::track_metaspace_memory_usage();
2443   }
2444 }
2445 
2446 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2447   assert(vs_list()->current_virtual_space() != NULL,
2448          "Should have been set");
2449   assert(current_chunk() == NULL ||
2450          current_chunk()->allocate(word_size) == NULL,
2451          "Don't need to expand");
2452   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2453 
2454   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2455     size_t words_left = 0;
2456     size_t words_used = 0;
2457     if (current_chunk() != NULL) {
2458       words_left = current_chunk()->free_word_size();
2459       words_used = current_chunk()->used_word_size();
2460     }
2461     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2462                                        word_size, words_used, words_left);
2463   }
2464 
2465   // Get another chunk
2466   size_t chunk_word_size = calc_chunk_size(word_size);
2467   Metachunk* next = get_new_chunk(chunk_word_size);
2468 
2469   MetaWord* mem = NULL;
2470 
2471   // If a chunk was available, add it to the in-use chunk list
2472   // and do an allocation from it.
2473   if (next != NULL) {
2474     // Add to this manager's list of chunks in use.
2475     add_chunk(next, false);
2476     mem = next->allocate(word_size);
2477   }
2478 
2479   // Track metaspace memory usage statistic.
2480   track_metaspace_memory_usage();
2481 
2482   return mem;
2483 }
2484 
2485 void SpaceManager::print_on(outputStream* st) const {
2486 
2487   for (ChunkIndex i = ZeroIndex;
2488        i < NumberOfInUseLists ;
2489        i = next_chunk_index(i) ) {
2490     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2491                  p2i(chunks_in_use(i)),
2492                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2493   }
2494   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2495                " Humongous " SIZE_FORMAT,
2496                sum_waste_in_chunks_in_use(SmallIndex),
2497                sum_waste_in_chunks_in_use(MediumIndex),
2498                sum_waste_in_chunks_in_use(HumongousIndex));
2499   // block free lists
2500   if (block_freelists() != NULL) {
2501     st->print_cr("total in block free lists " SIZE_FORMAT,
2502       block_freelists()->total_size());
2503   }
2504 }
2505 
2506 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2507                            Mutex* lock) :
2508   _mdtype(mdtype),
2509   _allocated_blocks_words(0),
2510   _allocated_chunks_words(0),
2511   _allocated_chunks_count(0),
2512   _block_freelists(NULL),
2513   _lock(lock)
2514 {
2515   initialize();
2516 }
2517 
2518 void SpaceManager::inc_size_metrics(size_t words) {
2519   assert_lock_strong(SpaceManager::expand_lock());
2520   // Total of allocated Metachunks and allocated Metachunks count
2521   // for each SpaceManager
2522   _allocated_chunks_words = _allocated_chunks_words + words;
2523   _allocated_chunks_count++;
2524   // Global total of capacity in allocated Metachunks
2525   MetaspaceAux::inc_capacity(mdtype(), words);
2526   // Global total of allocated Metablocks.
2527   // used_words_slow() includes the overhead in each
2528   // Metachunk so include it in the used when the
2529   // Metachunk is first added (so only added once per
2530   // Metachunk).
2531   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2532 }
2533 
2534 void SpaceManager::inc_used_metrics(size_t words) {
2535   // Add to the per SpaceManager total
2536   Atomic::add(words, &_allocated_blocks_words);
2537   // Add to the global total
2538   MetaspaceAux::inc_used(mdtype(), words);
2539 }
2540 
2541 void SpaceManager::dec_total_from_size_metrics() {
2542   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2543   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2544   // Also deduct the overhead per Metachunk
2545   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2546 }
2547 
2548 void SpaceManager::initialize() {
2549   Metadebug::init_allocation_fail_alot_count();
2550   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2551     _chunks_in_use[i] = NULL;
2552   }
2553   _current_chunk = NULL;
2554   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2555 }
2556 
2557 SpaceManager::~SpaceManager() {
2558   // This call this->_lock which can't be done while holding expand_lock()
2559   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2560          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2561          " allocated_chunks_words() " SIZE_FORMAT,
2562          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2563 
2564   MutexLockerEx fcl(SpaceManager::expand_lock(),
2565                     Mutex::_no_safepoint_check_flag);
2566 
2567   chunk_manager()->slow_locked_verify();
2568 
2569   dec_total_from_size_metrics();
2570 
2571   Log(gc, metaspace, freelist) log;
2572   if (log.is_trace()) {
2573     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2574     ResourceMark rm;
2575     LogStream ls(log.trace());
2576     locked_print_chunks_in_use_on(&ls);
2577     if (block_freelists() != NULL) {
2578       block_freelists()->print_on(&ls);
2579     }
2580   }
2581 
2582   // Add all the chunks in use by this space manager
2583   // to the global list of free chunks.
2584 
2585   // Follow each list of chunks-in-use and add them to the
2586   // free lists.  Each list is NULL terminated.
2587 
2588   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2589     Metachunk* chunks = chunks_in_use(i);
2590     chunk_manager()->return_chunk_list(i, chunks);
2591     set_chunks_in_use(i, NULL);
2592   }
2593 
2594   chunk_manager()->slow_locked_verify();
2595 
2596   if (_block_freelists != NULL) {
2597     delete _block_freelists;
2598   }
2599 }
2600 
2601 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2602   assert_lock_strong(_lock);
2603   // Allocations and deallocations are in raw_word_size
2604   size_t raw_word_size = get_allocation_word_size(word_size);
2605   // Lazily create a block_freelist
2606   if (block_freelists() == NULL) {
2607     _block_freelists = new BlockFreelist();
2608   }
2609   block_freelists()->return_block(p, raw_word_size);
2610 }
2611 
2612 // Adds a chunk to the list of chunks in use.
2613 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2614 
2615   assert(new_chunk != NULL, "Should not be NULL");
2616   assert(new_chunk->next() == NULL, "Should not be on a list");
2617 
2618   new_chunk->reset_empty();
2619 
2620   // Find the correct list and and set the current
2621   // chunk for that list.
2622   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2623 
2624   if (index != HumongousIndex) {
2625     retire_current_chunk();
2626     set_current_chunk(new_chunk);
2627     new_chunk->set_next(chunks_in_use(index));
2628     set_chunks_in_use(index, new_chunk);
2629   } else {
2630     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2631     // small, so small will be null.  Link this first chunk as the current
2632     // chunk.
2633     if (make_current) {
2634       // Set as the current chunk but otherwise treat as a humongous chunk.
2635       set_current_chunk(new_chunk);
2636     }
2637     // Link at head.  The _current_chunk only points to a humongous chunk for
2638     // the null class loader metaspace (class and data virtual space managers)
2639     // any humongous chunks so will not point to the tail
2640     // of the humongous chunks list.
2641     new_chunk->set_next(chunks_in_use(HumongousIndex));
2642     set_chunks_in_use(HumongousIndex, new_chunk);
2643 
2644     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2645   }
2646 
2647   // Add to the running sum of capacity
2648   inc_size_metrics(new_chunk->word_size());
2649 
2650   assert(new_chunk->is_empty(), "Not ready for reuse");
2651   Log(gc, metaspace, freelist) log;
2652   if (log.is_trace()) {
2653     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2654     ResourceMark rm;
2655     LogStream ls(log.trace());
2656     new_chunk->print_on(&ls);
2657     chunk_manager()->locked_print_free_chunks(&ls);
2658   }
2659 }
2660 
2661 void SpaceManager::retire_current_chunk() {
2662   if (current_chunk() != NULL) {
2663     size_t remaining_words = current_chunk()->free_word_size();
2664     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2665       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2666       deallocate(ptr, remaining_words);
2667       inc_used_metrics(remaining_words);
2668     }
2669   }
2670 }
2671 
2672 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2673   // Get a chunk from the chunk freelist
2674   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2675 
2676   if (next == NULL) {
2677     next = vs_list()->get_new_chunk(chunk_word_size,
2678                                     medium_chunk_bunch());
2679   }
2680 
2681   Log(gc, metaspace, alloc) log;
2682   if (log.is_debug() && next != NULL &&
2683       SpaceManager::is_humongous(next->word_size())) {
2684     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2685   }
2686 
2687   return next;
2688 }
2689 
2690 /*
2691  * The policy is to allocate up to _small_chunk_limit small chunks
2692  * after which only medium chunks are allocated.  This is done to
2693  * reduce fragmentation.  In some cases, this can result in a lot
2694  * of small chunks being allocated to the point where it's not
2695  * possible to expand.  If this happens, there may be no medium chunks
2696  * available and OOME would be thrown.  Instead of doing that,
2697  * if the allocation request size fits in a small chunk, an attempt
2698  * will be made to allocate a small chunk.
2699  */
2700 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2701   size_t raw_word_size = get_allocation_word_size(word_size);
2702 
2703   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2704     return NULL;
2705   }
2706 
2707   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2708   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2709 
2710   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2711 
2712   MetaWord* mem = NULL;
2713 
2714   if (chunk != NULL) {
2715     // Add chunk to the in-use chunk list and do an allocation from it.
2716     // Add to this manager's list of chunks in use.
2717     add_chunk(chunk, false);
2718     mem = chunk->allocate(raw_word_size);
2719 
2720     inc_used_metrics(raw_word_size);
2721 
2722     // Track metaspace memory usage statistic.
2723     track_metaspace_memory_usage();
2724   }
2725 
2726   return mem;
2727 }
2728 
2729 MetaWord* SpaceManager::allocate(size_t word_size) {
2730   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2731   size_t raw_word_size = get_allocation_word_size(word_size);
2732   BlockFreelist* fl =  block_freelists();
2733   MetaWord* p = NULL;
2734   // Allocation from the dictionary is expensive in the sense that
2735   // the dictionary has to be searched for a size.  Don't allocate
2736   // from the dictionary until it starts to get fat.  Is this
2737   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2738   // for allocations.  Do some profiling.  JJJ
2739   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2740     p = fl->get_block(raw_word_size);
2741   }
2742   if (p == NULL) {
2743     p = allocate_work(raw_word_size);
2744   }
2745 
2746   return p;
2747 }
2748 
2749 // Returns the address of spaced allocated for "word_size".
2750 // This methods does not know about blocks (Metablocks)
2751 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2752   assert_lock_strong(_lock);
2753 #ifdef ASSERT
2754   if (Metadebug::test_metadata_failure()) {
2755     return NULL;
2756   }
2757 #endif
2758   // Is there space in the current chunk?
2759   MetaWord* result = NULL;
2760 
2761   if (current_chunk() != NULL) {
2762     result = current_chunk()->allocate(word_size);
2763   }
2764 
2765   if (result == NULL) {
2766     result = grow_and_allocate(word_size);
2767   }
2768 
2769   if (result != NULL) {
2770     inc_used_metrics(word_size);
2771     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2772            "Head of the list is being allocated");
2773   }
2774 
2775   return result;
2776 }
2777 
2778 void SpaceManager::verify() {
2779   // If there are blocks in the dictionary, then
2780   // verification of chunks does not work since
2781   // being in the dictionary alters a chunk.
2782   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2783     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2784       Metachunk* curr = chunks_in_use(i);
2785       while (curr != NULL) {
2786         curr->verify();
2787         verify_chunk_size(curr);
2788         curr = curr->next();
2789       }
2790     }
2791   }
2792 }
2793 
2794 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2795   assert(is_humongous(chunk->word_size()) ||
2796          chunk->word_size() == medium_chunk_size() ||
2797          chunk->word_size() == small_chunk_size() ||
2798          chunk->word_size() == specialized_chunk_size(),
2799          "Chunk size is wrong");
2800   return;
2801 }
2802 
2803 #ifdef ASSERT
2804 void SpaceManager::verify_allocated_blocks_words() {
2805   // Verification is only guaranteed at a safepoint.
2806   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2807     "Verification can fail if the applications is running");
2808   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2809          "allocation total is not consistent " SIZE_FORMAT
2810          " vs " SIZE_FORMAT,
2811          allocated_blocks_words(), sum_used_in_chunks_in_use());
2812 }
2813 
2814 #endif
2815 
2816 void SpaceManager::dump(outputStream* const out) const {
2817   size_t curr_total = 0;
2818   size_t waste = 0;
2819   uint i = 0;
2820   size_t used = 0;
2821   size_t capacity = 0;
2822 
2823   // Add up statistics for all chunks in this SpaceManager.
2824   for (ChunkIndex index = ZeroIndex;
2825        index < NumberOfInUseLists;
2826        index = next_chunk_index(index)) {
2827     for (Metachunk* curr = chunks_in_use(index);
2828          curr != NULL;
2829          curr = curr->next()) {
2830       out->print("%d) ", i++);
2831       curr->print_on(out);
2832       curr_total += curr->word_size();
2833       used += curr->used_word_size();
2834       capacity += curr->word_size();
2835       waste += curr->free_word_size() + curr->overhead();;
2836     }
2837   }
2838 
2839   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2840     if (block_freelists() != NULL) block_freelists()->print_on(out);
2841   }
2842 
2843   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2844   // Free space isn't wasted.
2845   waste -= free;
2846 
2847   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2848                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2849                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2850 }
2851 
2852 // MetaspaceAux
2853 
2854 
2855 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2856 volatile size_t MetaspaceAux::_used_words[] = {0, 0};
2857 
2858 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2859   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2860   return list == NULL ? 0 : list->free_bytes();
2861 }
2862 
2863 size_t MetaspaceAux::free_bytes() {
2864   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2865 }
2866 
2867 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2868   assert_lock_strong(SpaceManager::expand_lock());
2869   assert(words <= capacity_words(mdtype),
2870          "About to decrement below 0: words " SIZE_FORMAT
2871          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2872          words, mdtype, capacity_words(mdtype));
2873   _capacity_words[mdtype] -= words;
2874 }
2875 
2876 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2877   assert_lock_strong(SpaceManager::expand_lock());
2878   // Needs to be atomic
2879   _capacity_words[mdtype] += words;
2880 }
2881 
2882 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2883   assert(words <= used_words(mdtype),
2884          "About to decrement below 0: words " SIZE_FORMAT
2885          " is greater than _used_words[%u] " SIZE_FORMAT,
2886          words, mdtype, used_words(mdtype));
2887   // For CMS deallocation of the Metaspaces occurs during the
2888   // sweep which is a concurrent phase.  Protection by the expand_lock()
2889   // is not enough since allocation is on a per Metaspace basis
2890   // and protected by the Metaspace lock.
2891   Atomic::sub(words, &_used_words[mdtype]);
2892 }
2893 
2894 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2895   // _used_words tracks allocations for
2896   // each piece of metadata.  Those allocations are
2897   // generally done concurrently by different application
2898   // threads so must be done atomically.
2899   Atomic::add(words, &_used_words[mdtype]);
2900 }
2901 
2902 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2903   size_t used = 0;
2904   ClassLoaderDataGraphMetaspaceIterator iter;
2905   while (iter.repeat()) {
2906     Metaspace* msp = iter.get_next();
2907     // Sum allocated_blocks_words for each metaspace
2908     if (msp != NULL) {
2909       used += msp->used_words_slow(mdtype);
2910     }
2911   }
2912   return used * BytesPerWord;
2913 }
2914 
2915 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2916   size_t free = 0;
2917   ClassLoaderDataGraphMetaspaceIterator iter;
2918   while (iter.repeat()) {
2919     Metaspace* msp = iter.get_next();
2920     if (msp != NULL) {
2921       free += msp->free_words_slow(mdtype);
2922     }
2923   }
2924   return free * BytesPerWord;
2925 }
2926 
2927 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2928   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2929     return 0;
2930   }
2931   // Don't count the space in the freelists.  That space will be
2932   // added to the capacity calculation as needed.
2933   size_t capacity = 0;
2934   ClassLoaderDataGraphMetaspaceIterator iter;
2935   while (iter.repeat()) {
2936     Metaspace* msp = iter.get_next();
2937     if (msp != NULL) {
2938       capacity += msp->capacity_words_slow(mdtype);
2939     }
2940   }
2941   return capacity * BytesPerWord;
2942 }
2943 
2944 size_t MetaspaceAux::capacity_bytes_slow() {
2945 #ifdef PRODUCT
2946   // Use capacity_bytes() in PRODUCT instead of this function.
2947   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2948 #endif
2949   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2950   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2951   assert(capacity_bytes() == class_capacity + non_class_capacity,
2952          "bad accounting: capacity_bytes() " SIZE_FORMAT
2953          " class_capacity + non_class_capacity " SIZE_FORMAT
2954          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2955          capacity_bytes(), class_capacity + non_class_capacity,
2956          class_capacity, non_class_capacity);
2957 
2958   return class_capacity + non_class_capacity;
2959 }
2960 
2961 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2962   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2963   return list == NULL ? 0 : list->reserved_bytes();
2964 }
2965 
2966 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2967   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2968   return list == NULL ? 0 : list->committed_bytes();
2969 }
2970 
2971 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2972 
2973 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2974   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2975   if (chunk_manager == NULL) {
2976     return 0;
2977   }
2978   chunk_manager->slow_verify();
2979   return chunk_manager->free_chunks_total_words();
2980 }
2981 
2982 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2983   return free_chunks_total_words(mdtype) * BytesPerWord;
2984 }
2985 
2986 size_t MetaspaceAux::free_chunks_total_words() {
2987   return free_chunks_total_words(Metaspace::ClassType) +
2988          free_chunks_total_words(Metaspace::NonClassType);
2989 }
2990 
2991 size_t MetaspaceAux::free_chunks_total_bytes() {
2992   return free_chunks_total_words() * BytesPerWord;
2993 }
2994 
2995 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2996   return Metaspace::get_chunk_manager(mdtype) != NULL;
2997 }
2998 
2999 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3000   if (!has_chunk_free_list(mdtype)) {
3001     return MetaspaceChunkFreeListSummary();
3002   }
3003 
3004   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3005   return cm->chunk_free_list_summary();
3006 }
3007 
3008 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
3009   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3010                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3011 }
3012 
3013 void MetaspaceAux::print_on(outputStream* out) {
3014   Metaspace::MetadataType nct = Metaspace::NonClassType;
3015 
3016   out->print_cr(" Metaspace       "
3017                 "used "      SIZE_FORMAT "K, "
3018                 "capacity "  SIZE_FORMAT "K, "
3019                 "committed " SIZE_FORMAT "K, "
3020                 "reserved "  SIZE_FORMAT "K",
3021                 used_bytes()/K,
3022                 capacity_bytes()/K,
3023                 committed_bytes()/K,
3024                 reserved_bytes()/K);
3025 
3026   if (Metaspace::using_class_space()) {
3027     Metaspace::MetadataType ct = Metaspace::ClassType;
3028     out->print_cr("  class space    "
3029                   "used "      SIZE_FORMAT "K, "
3030                   "capacity "  SIZE_FORMAT "K, "
3031                   "committed " SIZE_FORMAT "K, "
3032                   "reserved "  SIZE_FORMAT "K",
3033                   used_bytes(ct)/K,
3034                   capacity_bytes(ct)/K,
3035                   committed_bytes(ct)/K,
3036                   reserved_bytes(ct)/K);
3037   }
3038 }
3039 
3040 // Print information for class space and data space separately.
3041 // This is almost the same as above.
3042 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3043   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3044   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3045   size_t used_bytes = used_bytes_slow(mdtype);
3046   size_t free_bytes = free_bytes_slow(mdtype);
3047   size_t used_and_free = used_bytes + free_bytes +
3048                            free_chunks_capacity_bytes;
3049   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3050              "K + unused in chunks " SIZE_FORMAT "K  + "
3051              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3052              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3053              used_bytes / K,
3054              free_bytes / K,
3055              free_chunks_capacity_bytes / K,
3056              used_and_free / K,
3057              capacity_bytes / K);
3058   // Accounting can only be correct if we got the values during a safepoint
3059   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3060 }
3061 
3062 // Print total fragmentation for class metaspaces
3063 void MetaspaceAux::print_class_waste(outputStream* out) {
3064   assert(Metaspace::using_class_space(), "class metaspace not used");
3065   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3066   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3067   ClassLoaderDataGraphMetaspaceIterator iter;
3068   while (iter.repeat()) {
3069     Metaspace* msp = iter.get_next();
3070     if (msp != NULL) {
3071       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3072       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3073       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3074       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3075       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3076       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3077       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3078     }
3079   }
3080   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3081                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3082                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3083                 "large count " SIZE_FORMAT,
3084                 cls_specialized_count, cls_specialized_waste,
3085                 cls_small_count, cls_small_waste,
3086                 cls_medium_count, cls_medium_waste, cls_humongous_count);
3087 }
3088 
3089 // Print total fragmentation for data and class metaspaces separately
3090 void MetaspaceAux::print_waste(outputStream* out) {
3091   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3092   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3093 
3094   ClassLoaderDataGraphMetaspaceIterator iter;
3095   while (iter.repeat()) {
3096     Metaspace* msp = iter.get_next();
3097     if (msp != NULL) {
3098       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3099       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3100       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3101       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3102       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3103       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
3104       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3105     }
3106   }
3107   out->print_cr("Total fragmentation waste (words) doesn't count free space");
3108   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3109                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3110                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3111                         "large count " SIZE_FORMAT,
3112              specialized_count, specialized_waste, small_count,
3113              small_waste, medium_count, medium_waste, humongous_count);
3114   if (Metaspace::using_class_space()) {
3115     print_class_waste(out);
3116   }
3117 }
3118 
3119 class MetadataStats VALUE_OBJ_CLASS_SPEC {
3120 private:
3121   size_t _capacity;
3122   size_t _used;
3123   size_t _free;
3124   size_t _waste;
3125 
3126 public:
3127   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
3128   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
3129   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
3130 
3131   void add(const MetadataStats& stats) {
3132     _capacity += stats.capacity();
3133     _used += stats.used();
3134     _free += stats.free();
3135     _waste += stats.waste();
3136   }
3137 
3138   size_t capacity() const { return _capacity; }
3139   size_t used() const     { return _used; }
3140   size_t free() const     { return _free; }
3141   size_t waste() const    { return _waste; }
3142 
3143   void print_on(outputStream* out, size_t scale) const;
3144 };
3145 
3146 
3147 void MetadataStats::print_on(outputStream* out, size_t scale) const {
3148   const char* unit = scale_unit(scale);
3149   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
3150     (float)capacity() / scale, unit,
3151     (float)used() / scale, unit,
3152     (float)free() / scale, unit,
3153     (float)waste() / scale, unit);
3154 }
3155 
3156 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3157 private:
3158   outputStream*  _out;
3159   size_t         _scale;
3160 
3161   size_t         _total_count;
3162   MetadataStats  _total_metadata;
3163   MetadataStats  _total_class;
3164 
3165   size_t         _total_anon_count;
3166   MetadataStats  _total_anon_metadata;
3167   MetadataStats  _total_anon_class;
3168 
3169 public:
3170   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
3171   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
3172 
3173   ~PrintCLDMetaspaceInfoClosure() {
3174     print_summary();
3175   }
3176 
3177   void do_cld(ClassLoaderData* cld) {
3178     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3179 
3180     if (cld->is_unloading()) return;
3181     Metaspace* msp = cld->metaspace_or_null();
3182     if (msp == NULL) {
3183       return;
3184     }
3185 
3186     bool anonymous = false;
3187     if (cld->is_anonymous()) {
3188       _out->print_cr("ClassLoader: for anonymous class");
3189       anonymous = true;
3190     } else {
3191       ResourceMark rm;
3192       _out->print_cr("ClassLoader: %s", cld->loader_name());
3193     }
3194 
3195     print_metaspace(msp, anonymous);
3196     _out->cr();
3197   }
3198 
3199 private:
3200   void print_metaspace(Metaspace* msp, bool anonymous);
3201   void print_summary() const;
3202 };
3203 
3204 void PrintCLDMetaspaceInfoClosure::print_metaspace(Metaspace* msp, bool anonymous){
3205   assert(msp != NULL, "Sanity");
3206   SpaceManager* vsm = msp->vsm();
3207   const char* unit = scale_unit(_scale);
3208 
3209   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
3210   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
3211   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
3212   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
3213 
3214   _total_count ++;
3215   MetadataStats metadata_stats(capacity, used, free, waste);
3216   _total_metadata.add(metadata_stats);
3217 
3218   if (anonymous) {
3219     _total_anon_count ++;
3220     _total_anon_metadata.add(metadata_stats);
3221   }
3222 
3223   _out->print("  Metadata   ");
3224   metadata_stats.print_on(_out, _scale);
3225 
3226   if (Metaspace::using_class_space()) {
3227     vsm = msp->class_vsm();
3228 
3229     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
3230     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
3231     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
3232     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
3233 
3234     MetadataStats class_stats(capacity, used, free, waste);
3235     _total_class.add(class_stats);
3236 
3237     if (anonymous) {
3238       _total_anon_class.add(class_stats);
3239     }
3240 
3241     _out->print("  Class data ");
3242     class_stats.print_on(_out, _scale);
3243   }
3244 }
3245 
3246 void PrintCLDMetaspaceInfoClosure::print_summary() const {
3247   const char* unit = scale_unit(_scale);
3248   _out->cr();
3249   _out->print_cr("Summary:");
3250 
3251   MetadataStats total;
3252   total.add(_total_metadata);
3253   total.add(_total_class);
3254 
3255   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
3256   total.print_on(_out, _scale);
3257 
3258   _out->print("                    Metadata ");
3259   _total_metadata.print_on(_out, _scale);
3260 
3261   if (Metaspace::using_class_space()) {
3262     _out->print("                  Class data ");
3263     _total_class.print_on(_out, _scale);
3264   }
3265   _out->cr();
3266 
3267   MetadataStats total_anon;
3268   total_anon.add(_total_anon_metadata);
3269   total_anon.add(_total_anon_class);
3270 
3271   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
3272   total_anon.print_on(_out, _scale);
3273 
3274   _out->print("                    Metadata ");
3275   _total_anon_metadata.print_on(_out, _scale);
3276 
3277   if (Metaspace::using_class_space()) {
3278     _out->print("                  Class data ");
3279     _total_anon_class.print_on(_out, _scale);
3280   }
3281 }
3282 
3283 void MetaspaceAux::print_metadata_for_nmt(outputStream* out, size_t scale) {
3284   const char* unit = scale_unit(scale);
3285   out->print_cr("Metaspaces:");
3286   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
3287     reserved_bytes(Metaspace::NonClassType) / scale, unit,
3288     committed_bytes(Metaspace::NonClassType) / scale, unit);
3289   if (Metaspace::using_class_space()) {
3290     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
3291     reserved_bytes(Metaspace::ClassType) / scale, unit,
3292     committed_bytes(Metaspace::ClassType) / scale, unit);
3293   }
3294 
3295   out->cr();
3296   ChunkManager::print_all_chunkmanagers(out, scale);
3297 
3298   out->cr();
3299   out->print_cr("Per-classloader metadata:");
3300   out->cr();
3301 
3302   PrintCLDMetaspaceInfoClosure cl(out, scale);
3303   ClassLoaderDataGraph::cld_do(&cl);
3304 }
3305 
3306 
3307 // Dump global metaspace things from the end of ClassLoaderDataGraph
3308 void MetaspaceAux::dump(outputStream* out) {
3309   out->print_cr("All Metaspace:");
3310   out->print("data space: "); print_on(out, Metaspace::NonClassType);
3311   out->print("class space: "); print_on(out, Metaspace::ClassType);
3312   print_waste(out);
3313 }
3314 
3315 // Prints an ASCII representation of the given space.
3316 void MetaspaceAux::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
3317   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3318   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
3319   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
3320   if (vsl != NULL) {
3321     if (for_class) {
3322       if (!Metaspace::using_class_space()) {
3323         out->print_cr("No Class Space.");
3324         return;
3325       }
3326       out->print_raw("---- Metaspace Map (Class Space) ----");
3327     } else {
3328       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
3329     }
3330     // Print legend:
3331     out->cr();
3332     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
3333     out->cr();
3334     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
3335     vsl->print_map(out);
3336     out->cr();
3337   }
3338 }
3339 
3340 void MetaspaceAux::verify_free_chunks() {
3341   Metaspace::chunk_manager_metadata()->verify();
3342   if (Metaspace::using_class_space()) {
3343     Metaspace::chunk_manager_class()->verify();
3344   }
3345 }
3346 
3347 void MetaspaceAux::verify_capacity() {
3348 #ifdef ASSERT
3349   size_t running_sum_capacity_bytes = capacity_bytes();
3350   // For purposes of the running sum of capacity, verify against capacity
3351   size_t capacity_in_use_bytes = capacity_bytes_slow();
3352   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
3353          "capacity_words() * BytesPerWord " SIZE_FORMAT
3354          " capacity_bytes_slow()" SIZE_FORMAT,
3355          running_sum_capacity_bytes, capacity_in_use_bytes);
3356   for (Metaspace::MetadataType i = Metaspace::ClassType;
3357        i < Metaspace:: MetadataTypeCount;
3358        i = (Metaspace::MetadataType)(i + 1)) {
3359     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
3360     assert(capacity_bytes(i) == capacity_in_use_bytes,
3361            "capacity_bytes(%u) " SIZE_FORMAT
3362            " capacity_bytes_slow(%u)" SIZE_FORMAT,
3363            i, capacity_bytes(i), i, capacity_in_use_bytes);
3364   }
3365 #endif
3366 }
3367 
3368 void MetaspaceAux::verify_used() {
3369 #ifdef ASSERT
3370   size_t running_sum_used_bytes = used_bytes();
3371   // For purposes of the running sum of used, verify against used
3372   size_t used_in_use_bytes = used_bytes_slow();
3373   assert(used_bytes() == used_in_use_bytes,
3374          "used_bytes() " SIZE_FORMAT
3375          " used_bytes_slow()" SIZE_FORMAT,
3376          used_bytes(), used_in_use_bytes);
3377   for (Metaspace::MetadataType i = Metaspace::ClassType;
3378        i < Metaspace:: MetadataTypeCount;
3379        i = (Metaspace::MetadataType)(i + 1)) {
3380     size_t used_in_use_bytes = used_bytes_slow(i);
3381     assert(used_bytes(i) == used_in_use_bytes,
3382            "used_bytes(%u) " SIZE_FORMAT
3383            " used_bytes_slow(%u)" SIZE_FORMAT,
3384            i, used_bytes(i), i, used_in_use_bytes);
3385   }
3386 #endif
3387 }
3388 
3389 void MetaspaceAux::verify_metrics() {
3390   verify_capacity();
3391   verify_used();
3392 }
3393 
3394 
3395 // Metaspace methods
3396 
3397 size_t Metaspace::_first_chunk_word_size = 0;
3398 size_t Metaspace::_first_class_chunk_word_size = 0;
3399 
3400 size_t Metaspace::_commit_alignment = 0;
3401 size_t Metaspace::_reserve_alignment = 0;
3402 
3403 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3404   initialize(lock, type);
3405 }
3406 
3407 Metaspace::~Metaspace() {
3408   delete _vsm;
3409   if (using_class_space()) {
3410     delete _class_vsm;
3411   }
3412 }
3413 
3414 VirtualSpaceList* Metaspace::_space_list = NULL;
3415 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3416 
3417 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3418 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3419 
3420 #define VIRTUALSPACEMULTIPLIER 2
3421 
3422 #ifdef _LP64
3423 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3424 
3425 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3426   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
3427   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3428   // narrow_klass_base is the lower of the metaspace base and the cds base
3429   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3430   // between the lower base and higher address.
3431   address lower_base;
3432   address higher_address;
3433 #if INCLUDE_CDS
3434   if (UseSharedSpaces) {
3435     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
3436                           (address)(metaspace_base + compressed_class_space_size()));
3437     lower_base = MIN2(metaspace_base, cds_base);
3438   } else
3439 #endif
3440   {
3441     higher_address = metaspace_base + compressed_class_space_size();
3442     lower_base = metaspace_base;
3443 
3444     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3445     // If compressed class space fits in lower 32G, we don't need a base.
3446     if (higher_address <= (address)klass_encoding_max) {
3447       lower_base = 0; // Effectively lower base is zero.
3448     }
3449   }
3450 
3451   Universe::set_narrow_klass_base(lower_base);
3452 
3453   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
3454   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
3455   // how dump time narrow_klass_shift is set. Although, CDS can work
3456   // with zero-shift mode also, to be consistent with AOT it uses
3457   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
3458   // can be used at same time as AOT code.
3459   if (!UseSharedSpaces
3460       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3461     Universe::set_narrow_klass_shift(0);
3462   } else {
3463     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3464   }
3465   AOTLoader::set_narrow_klass_shift();
3466 }
3467 
3468 #if INCLUDE_CDS
3469 // Return TRUE if the specified metaspace_base and cds_base are close enough
3470 // to work with compressed klass pointers.
3471 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3472   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3473   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3474   address lower_base = MIN2((address)metaspace_base, cds_base);
3475   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
3476                                 (address)(metaspace_base + compressed_class_space_size()));
3477   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3478 }
3479 #endif
3480 
3481 // Try to allocate the metaspace at the requested addr.
3482 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3483   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
3484   assert(using_class_space(), "called improperly");
3485   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3486   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3487          "Metaspace size is too big");
3488   assert_is_aligned(requested_addr, _reserve_alignment);
3489   assert_is_aligned(cds_base, _reserve_alignment);
3490   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3491 
3492   // Don't use large pages for the class space.
3493   bool large_pages = false;
3494 
3495 #if !(defined(AARCH64) || defined(AIX))
3496   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3497                                              _reserve_alignment,
3498                                              large_pages,
3499                                              requested_addr);
3500 #else // AARCH64
3501   ReservedSpace metaspace_rs;
3502 
3503   // Our compressed klass pointers may fit nicely into the lower 32
3504   // bits.
3505   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3506     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3507                                  _reserve_alignment,
3508                                  large_pages,
3509                                  requested_addr);
3510   }
3511 
3512   if (! metaspace_rs.is_reserved()) {
3513     // Aarch64: Try to align metaspace so that we can decode a compressed
3514     // klass with a single MOVK instruction.  We can do this iff the
3515     // compressed class base is a multiple of 4G.
3516     // Aix: Search for a place where we can find memory. If we need to load
3517     // the base, 4G alignment is helpful, too.
3518     size_t increment = AARCH64_ONLY(4*)G;
3519     for (char *a = align_up(requested_addr, increment);
3520          a < (char*)(1024*G);
3521          a += increment) {
3522       if (a == (char *)(32*G)) {
3523         // Go faster from here on. Zero-based is no longer possible.
3524         increment = 4*G;
3525       }
3526 
3527 #if INCLUDE_CDS
3528       if (UseSharedSpaces
3529           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3530         // We failed to find an aligned base that will reach.  Fall
3531         // back to using our requested addr.
3532         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3533                                      _reserve_alignment,
3534                                      large_pages,
3535                                      requested_addr);
3536         break;
3537       }
3538 #endif
3539 
3540       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3541                                    _reserve_alignment,
3542                                    large_pages,
3543                                    a);
3544       if (metaspace_rs.is_reserved())
3545         break;
3546     }
3547   }
3548 
3549 #endif // AARCH64
3550 
3551   if (!metaspace_rs.is_reserved()) {
3552 #if INCLUDE_CDS
3553     if (UseSharedSpaces) {
3554       size_t increment = align_up(1*G, _reserve_alignment);
3555 
3556       // Keep trying to allocate the metaspace, increasing the requested_addr
3557       // by 1GB each time, until we reach an address that will no longer allow
3558       // use of CDS with compressed klass pointers.
3559       char *addr = requested_addr;
3560       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3561              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3562         addr = addr + increment;
3563         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3564                                      _reserve_alignment, large_pages, addr);
3565       }
3566     }
3567 #endif
3568     // If no successful allocation then try to allocate the space anywhere.  If
3569     // that fails then OOM doom.  At this point we cannot try allocating the
3570     // metaspace as if UseCompressedClassPointers is off because too much
3571     // initialization has happened that depends on UseCompressedClassPointers.
3572     // So, UseCompressedClassPointers cannot be turned off at this point.
3573     if (!metaspace_rs.is_reserved()) {
3574       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3575                                    _reserve_alignment, large_pages);
3576       if (!metaspace_rs.is_reserved()) {
3577         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3578                                               compressed_class_space_size()));
3579       }
3580     }
3581   }
3582 
3583   // If we got here then the metaspace got allocated.
3584   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3585 
3586 #if INCLUDE_CDS
3587   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3588   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3589     FileMapInfo::stop_sharing_and_unmap(
3590         "Could not allocate metaspace at a compatible address");
3591   }
3592 #endif
3593   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3594                                   UseSharedSpaces ? (address)cds_base : 0);
3595 
3596   initialize_class_space(metaspace_rs);
3597 
3598   LogTarget(Trace, gc, metaspace) lt;
3599   if (lt.is_enabled()) {
3600     ResourceMark rm;
3601     LogStream ls(lt);
3602     print_compressed_class_space(&ls, requested_addr);
3603   }
3604 }
3605 
3606 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3607   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3608                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3609   if (_class_space_list != NULL) {
3610     address base = (address)_class_space_list->current_virtual_space()->bottom();
3611     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3612                  compressed_class_space_size(), p2i(base));
3613     if (requested_addr != 0) {
3614       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3615     }
3616     st->cr();
3617   }
3618 }
3619 
3620 // For UseCompressedClassPointers the class space is reserved above the top of
3621 // the Java heap.  The argument passed in is at the base of the compressed space.
3622 void Metaspace::initialize_class_space(ReservedSpace rs) {
3623   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3624   assert(rs.size() >= CompressedClassSpaceSize,
3625          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3626   assert(using_class_space(), "Must be using class space");
3627   _class_space_list = new VirtualSpaceList(rs);
3628   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3629 
3630   if (!_class_space_list->initialization_succeeded()) {
3631     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3632   }
3633 }
3634 
3635 #endif
3636 
3637 void Metaspace::ergo_initialize() {
3638   if (DumpSharedSpaces) {
3639     // Using large pages when dumping the shared archive is currently not implemented.
3640     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3641   }
3642 
3643   size_t page_size = os::vm_page_size();
3644   if (UseLargePages && UseLargePagesInMetaspace) {
3645     page_size = os::large_page_size();
3646   }
3647 
3648   _commit_alignment  = page_size;
3649   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3650 
3651   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3652   // override if MaxMetaspaceSize was set on the command line or not.
3653   // This information is needed later to conform to the specification of the
3654   // java.lang.management.MemoryUsage API.
3655   //
3656   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3657   // globals.hpp to the aligned value, but this is not possible, since the
3658   // alignment depends on other flags being parsed.
3659   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3660 
3661   if (MetaspaceSize > MaxMetaspaceSize) {
3662     MetaspaceSize = MaxMetaspaceSize;
3663   }
3664 
3665   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3666 
3667   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3668 
3669   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3670   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3671 
3672   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3673   set_compressed_class_space_size(CompressedClassSpaceSize);
3674 
3675   // Initial virtual space size will be calculated at global_initialize()
3676   size_t min_metaspace_sz =
3677       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
3678   if (UseCompressedClassPointers) {
3679     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
3680       if (min_metaspace_sz >= MaxMetaspaceSize) {
3681         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
3682       } else {
3683         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
3684                       MaxMetaspaceSize - min_metaspace_sz);
3685       }
3686     }
3687   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
3688     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
3689                   min_metaspace_sz);
3690   }
3691 
3692 }
3693 
3694 void Metaspace::global_initialize() {
3695   MetaspaceGC::initialize();
3696 
3697 #if INCLUDE_CDS
3698   if (DumpSharedSpaces) {
3699     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
3700   } else if (UseSharedSpaces) {
3701     // If any of the archived space fails to map, UseSharedSpaces
3702     // is reset to false. Fall through to the
3703     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
3704     // metaspace.
3705     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
3706   }
3707 
3708   if (!DumpSharedSpaces && !UseSharedSpaces)
3709 #endif // INCLUDE_CDS
3710   {
3711 #ifdef _LP64
3712     if (using_class_space()) {
3713       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3714       allocate_metaspace_compressed_klass_ptrs(base, 0);
3715     }
3716 #endif // _LP64
3717   }
3718 
3719   // Initialize these before initializing the VirtualSpaceList
3720   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3721   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3722   // Make the first class chunk bigger than a medium chunk so it's not put
3723   // on the medium chunk list.   The next chunk will be small and progress
3724   // from there.  This size calculated by -version.
3725   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3726                                      (CompressedClassSpaceSize/BytesPerWord)*2);
3727   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3728   // Arbitrarily set the initial virtual space to a multiple
3729   // of the boot class loader size.
3730   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3731   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3732 
3733   // Initialize the list of virtual spaces.
3734   _space_list = new VirtualSpaceList(word_size);
3735   _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3736 
3737   if (!_space_list->initialization_succeeded()) {
3738     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3739   }
3740 
3741   _tracer = new MetaspaceTracer();
3742 }
3743 
3744 void Metaspace::post_initialize() {
3745   MetaspaceGC::post_initialize();
3746 }
3747 
3748 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3749   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3750   if (chunk != NULL) {
3751     // Add to this manager's list of chunks in use and current_chunk().
3752     get_space_manager(mdtype)->add_chunk(chunk, true);
3753   }
3754 }
3755 
3756 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3757   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3758 
3759   // Get a chunk from the chunk freelist
3760   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3761 
3762   if (chunk == NULL) {
3763     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3764                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3765   }
3766 
3767   return chunk;
3768 }
3769 
3770 void Metaspace::verify_global_initialization() {
3771   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3772   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3773 
3774   if (using_class_space()) {
3775     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3776     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3777   }
3778 }
3779 
3780 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3781   verify_global_initialization();
3782 
3783   // Allocate SpaceManager for metadata objects.
3784   _vsm = new SpaceManager(NonClassType, lock);
3785 
3786   if (using_class_space()) {
3787     // Allocate SpaceManager for classes.
3788     _class_vsm = new SpaceManager(ClassType, lock);
3789   }
3790 
3791   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3792 
3793   // Allocate chunk for metadata objects
3794   initialize_first_chunk(type, NonClassType);
3795 
3796   // Allocate chunk for class metadata objects
3797   if (using_class_space()) {
3798     initialize_first_chunk(type, ClassType);
3799   }
3800 }
3801 
3802 size_t Metaspace::align_word_size_up(size_t word_size) {
3803   size_t byte_size = word_size * wordSize;
3804   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3805 }
3806 
3807 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3808   assert(!_frozen, "sanity");
3809   // Don't use class_vsm() unless UseCompressedClassPointers is true.
3810   if (is_class_space_allocation(mdtype)) {
3811     return  class_vsm()->allocate(word_size);
3812   } else {
3813     return  vsm()->allocate(word_size);
3814   }
3815 }
3816 
3817 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3818   assert(!_frozen, "sanity");
3819   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3820   assert(delta_bytes > 0, "Must be");
3821 
3822   size_t before = 0;
3823   size_t after = 0;
3824   MetaWord* res;
3825   bool incremented;
3826 
3827   // Each thread increments the HWM at most once. Even if the thread fails to increment
3828   // the HWM, an allocation is still attempted. This is because another thread must then
3829   // have incremented the HWM and therefore the allocation might still succeed.
3830   do {
3831     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3832     res = allocate(word_size, mdtype);
3833   } while (!incremented && res == NULL);
3834 
3835   if (incremented) {
3836     tracer()->report_gc_threshold(before, after,
3837                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3838     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3839   }
3840 
3841   return res;
3842 }
3843 
3844 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3845   if (mdtype == ClassType) {
3846     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3847   } else {
3848     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3849   }
3850 }
3851 
3852 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3853   assert(!_frozen, "sanity");
3854   if (mdtype == ClassType) {
3855     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3856   } else {
3857     return vsm()->sum_free_in_chunks_in_use();
3858   }
3859 }
3860 
3861 // Space capacity in the Metaspace.  It includes
3862 // space in the list of chunks from which allocations
3863 // have been made. Don't include space in the global freelist and
3864 // in the space available in the dictionary which
3865 // is already counted in some chunk.
3866 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3867   if (mdtype == ClassType) {
3868     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3869   } else {
3870     return vsm()->sum_capacity_in_chunks_in_use();
3871   }
3872 }
3873 
3874 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3875   return used_words_slow(mdtype) * BytesPerWord;
3876 }
3877 
3878 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3879   return capacity_words_slow(mdtype) * BytesPerWord;
3880 }
3881 
3882 size_t Metaspace::allocated_blocks_bytes() const {
3883   return vsm()->allocated_blocks_bytes() +
3884       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3885 }
3886 
3887 size_t Metaspace::allocated_chunks_bytes() const {
3888   return vsm()->allocated_chunks_bytes() +
3889       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3890 }
3891 
3892 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3893   assert(!_frozen, "sanity");
3894   assert(!SafepointSynchronize::is_at_safepoint()
3895          || Thread::current()->is_VM_thread(), "should be the VM thread");
3896 
3897   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3898 
3899   if (is_class && using_class_space()) {
3900     class_vsm()->deallocate(ptr, word_size);
3901   } else {
3902     vsm()->deallocate(ptr, word_size);
3903   }
3904 }
3905 
3906 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3907                               MetaspaceObj::Type type, TRAPS) {
3908   assert(!_frozen, "sanity");
3909   if (HAS_PENDING_EXCEPTION) {
3910     assert(false, "Should not allocate with exception pending");
3911     return NULL;  // caller does a CHECK_NULL too
3912   }
3913 
3914   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3915         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3916 
3917   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3918 
3919   // Try to allocate metadata.
3920   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3921 
3922   if (result == NULL) {
3923     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3924 
3925     // Allocation failed.
3926     if (is_init_completed()) {
3927       // Only start a GC if the bootstrapping has completed.
3928 
3929       // Try to clean out some memory and retry.
3930       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3931           loader_data, word_size, mdtype);
3932     }
3933   }
3934 
3935   if (result == NULL) {
3936     SpaceManager* sm;
3937     if (is_class_space_allocation(mdtype)) {
3938       sm = loader_data->metaspace_non_null()->class_vsm();
3939     } else {
3940       sm = loader_data->metaspace_non_null()->vsm();
3941     }
3942 
3943     result = sm->get_small_chunk_and_allocate(word_size);
3944 
3945     if (result == NULL) {
3946       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3947     }
3948   }
3949 
3950   // Zero initialize.
3951   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3952 
3953   return result;
3954 }
3955 
3956 size_t Metaspace::class_chunk_size(size_t word_size) {
3957   assert(using_class_space(), "Has to use class space");
3958   return class_vsm()->calc_chunk_size(word_size);
3959 }
3960 
3961 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3962   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3963 
3964   // If result is still null, we are out of memory.
3965   Log(gc, metaspace, freelist) log;
3966   if (log.is_info()) {
3967     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3968              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3969     ResourceMark rm;
3970     if (log.is_debug()) {
3971       if (loader_data->metaspace_or_null() != NULL) {
3972         LogStream ls(log.debug());
3973         loader_data->dump(&ls);
3974       }
3975     }
3976     LogStream ls(log.info());
3977     MetaspaceAux::dump(&ls);
3978     MetaspaceAux::print_metaspace_map(&ls, mdtype);
3979     ChunkManager::print_all_chunkmanagers(&ls);
3980   }
3981 
3982   bool out_of_compressed_class_space = false;
3983   if (is_class_space_allocation(mdtype)) {
3984     Metaspace* metaspace = loader_data->metaspace_non_null();
3985     out_of_compressed_class_space =
3986       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3987       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3988       CompressedClassSpaceSize;
3989   }
3990 
3991   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3992   const char* space_string = out_of_compressed_class_space ?
3993     "Compressed class space" : "Metaspace";
3994 
3995   report_java_out_of_memory(space_string);
3996 
3997   if (JvmtiExport::should_post_resource_exhausted()) {
3998     JvmtiExport::post_resource_exhausted(
3999         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4000         space_string);
4001   }
4002 
4003   if (!is_init_completed()) {
4004     vm_exit_during_initialization("OutOfMemoryError", space_string);
4005   }
4006 
4007   if (out_of_compressed_class_space) {
4008     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4009   } else {
4010     THROW_OOP(Universe::out_of_memory_error_metaspace());
4011   }
4012 }
4013 
4014 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4015   switch (mdtype) {
4016     case Metaspace::ClassType: return "Class";
4017     case Metaspace::NonClassType: return "Metadata";
4018     default:
4019       assert(false, "Got bad mdtype: %d", (int) mdtype);
4020       return NULL;
4021   }
4022 }
4023 
4024 void Metaspace::purge(MetadataType mdtype) {
4025   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4026 }
4027 
4028 void Metaspace::purge() {
4029   MutexLockerEx cl(SpaceManager::expand_lock(),
4030                    Mutex::_no_safepoint_check_flag);
4031   purge(NonClassType);
4032   if (using_class_space()) {
4033     purge(ClassType);
4034   }
4035 }
4036 
4037 void Metaspace::print_on(outputStream* out) const {
4038   // Print both class virtual space counts and metaspace.
4039   if (Verbose) {
4040     vsm()->print_on(out);
4041     if (using_class_space()) {
4042       class_vsm()->print_on(out);
4043     }
4044   }
4045 }
4046 
4047 bool Metaspace::contains(const void* ptr) {
4048   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
4049     return true;
4050   }
4051   return contains_non_shared(ptr);
4052 }
4053 
4054 bool Metaspace::contains_non_shared(const void* ptr) {
4055   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4056      return true;
4057   }
4058 
4059   return get_space_list(NonClassType)->contains(ptr);
4060 }
4061 
4062 void Metaspace::verify() {
4063   vsm()->verify();
4064   if (using_class_space()) {
4065     class_vsm()->verify();
4066   }
4067 }
4068 
4069 void Metaspace::dump(outputStream* const out) const {
4070   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4071   vsm()->dump(out);
4072   if (using_class_space()) {
4073     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4074     class_vsm()->dump(out);
4075   }
4076 }
4077 
4078 /////////////// Unit tests ///////////////
4079 
4080 #ifndef PRODUCT
4081 
4082 class TestMetaspaceAuxTest : AllStatic {
4083  public:
4084   static void test_reserved() {
4085     size_t reserved = MetaspaceAux::reserved_bytes();
4086 
4087     assert(reserved > 0, "assert");
4088 
4089     size_t committed  = MetaspaceAux::committed_bytes();
4090     assert(committed <= reserved, "assert");
4091 
4092     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
4093     assert(reserved_metadata > 0, "assert");
4094     assert(reserved_metadata <= reserved, "assert");
4095 
4096     if (UseCompressedClassPointers) {
4097       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
4098       assert(reserved_class > 0, "assert");
4099       assert(reserved_class < reserved, "assert");
4100     }
4101   }
4102 
4103   static void test_committed() {
4104     size_t committed = MetaspaceAux::committed_bytes();
4105 
4106     assert(committed > 0, "assert");
4107 
4108     size_t reserved  = MetaspaceAux::reserved_bytes();
4109     assert(committed <= reserved, "assert");
4110 
4111     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
4112     assert(committed_metadata > 0, "assert");
4113     assert(committed_metadata <= committed, "assert");
4114 
4115     if (UseCompressedClassPointers) {
4116       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
4117       assert(committed_class > 0, "assert");
4118       assert(committed_class < committed, "assert");
4119     }
4120   }
4121 
4122   static void test_virtual_space_list_large_chunk() {
4123     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
4124     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4125     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
4126     // vm_allocation_granularity aligned on Windows.
4127     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
4128     large_size += (os::vm_page_size()/BytesPerWord);
4129     vs_list->get_new_chunk(large_size, 0);
4130   }
4131 
4132   static void test() {
4133     test_reserved();
4134     test_committed();
4135     test_virtual_space_list_large_chunk();
4136   }
4137 };
4138 
4139 void TestMetaspaceAux_test() {
4140   TestMetaspaceAuxTest::test();
4141 }
4142 
4143 class TestVirtualSpaceNodeTest {
4144   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
4145                                           size_t& num_small_chunks,
4146                                           size_t& num_specialized_chunks) {
4147     num_medium_chunks = words_left / MediumChunk;
4148     words_left = words_left % MediumChunk;
4149 
4150     num_small_chunks = words_left / SmallChunk;
4151     words_left = words_left % SmallChunk;
4152     // how many specialized chunks can we get?
4153     num_specialized_chunks = words_left / SpecializedChunk;
4154     assert(words_left % SpecializedChunk == 0, "should be nothing left");
4155   }
4156 
4157  public:
4158   static void test() {
4159     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4160     const size_t vsn_test_size_words = MediumChunk  * 4;
4161     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
4162 
4163     // The chunk sizes must be multiples of eachother, or this will fail
4164     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
4165     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
4166 
4167     { // No committed memory in VSN
4168       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4169       VirtualSpaceNode vsn(vsn_test_size_bytes);
4170       vsn.initialize();
4171       vsn.retire(&cm);
4172       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
4173     }
4174 
4175     { // All of VSN is committed, half is used by chunks
4176       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4177       VirtualSpaceNode vsn(vsn_test_size_bytes);
4178       vsn.initialize();
4179       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
4180       vsn.get_chunk_vs(MediumChunk);
4181       vsn.get_chunk_vs(MediumChunk);
4182       vsn.retire(&cm);
4183       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
4184       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
4185     }
4186 
4187     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
4188     // This doesn't work for systems with vm_page_size >= 16K.
4189     if (page_chunks < MediumChunk) {
4190       // 4 pages of VSN is committed, some is used by chunks
4191       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4192       VirtualSpaceNode vsn(vsn_test_size_bytes);
4193 
4194       vsn.initialize();
4195       vsn.expand_by(page_chunks, page_chunks);
4196       vsn.get_chunk_vs(SmallChunk);
4197       vsn.get_chunk_vs(SpecializedChunk);
4198       vsn.retire(&cm);
4199 
4200       // committed - used = words left to retire
4201       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
4202 
4203       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
4204       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
4205 
4206       assert(num_medium_chunks == 0, "should not get any medium chunks");
4207       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
4208       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
4209     }
4210 
4211     { // Half of VSN is committed, a humongous chunk is used
4212       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4213       VirtualSpaceNode vsn(vsn_test_size_bytes);
4214       vsn.initialize();
4215       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
4216       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
4217       vsn.retire(&cm);
4218 
4219       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
4220       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
4221       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
4222 
4223       assert(num_medium_chunks == 0, "should not get any medium chunks");
4224       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
4225       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
4226     }
4227 
4228   }
4229 
4230 #define assert_is_available_positive(word_size) \
4231   assert(vsn.is_available(word_size), \
4232          #word_size ": " PTR_FORMAT " bytes were not available in " \
4233          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4234          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4235 
4236 #define assert_is_available_negative(word_size) \
4237   assert(!vsn.is_available(word_size), \
4238          #word_size ": " PTR_FORMAT " bytes should not be available in " \
4239          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4240          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4241 
4242   static void test_is_available_positive() {
4243     // Reserve some memory.
4244     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4245     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4246 
4247     // Commit some memory.
4248     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4249     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4250     assert(expanded, "Failed to commit");
4251 
4252     // Check that is_available accepts the committed size.
4253     assert_is_available_positive(commit_word_size);
4254 
4255     // Check that is_available accepts half the committed size.
4256     size_t expand_word_size = commit_word_size / 2;
4257     assert_is_available_positive(expand_word_size);
4258   }
4259 
4260   static void test_is_available_negative() {
4261     // Reserve some memory.
4262     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4263     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4264 
4265     // Commit some memory.
4266     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4267     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4268     assert(expanded, "Failed to commit");
4269 
4270     // Check that is_available doesn't accept a too large size.
4271     size_t two_times_commit_word_size = commit_word_size * 2;
4272     assert_is_available_negative(two_times_commit_word_size);
4273   }
4274 
4275   static void test_is_available_overflow() {
4276     // Reserve some memory.
4277     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4278     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4279 
4280     // Commit some memory.
4281     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4282     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4283     assert(expanded, "Failed to commit");
4284 
4285     // Calculate a size that will overflow the virtual space size.
4286     void* virtual_space_max = (void*)(uintptr_t)-1;
4287     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4288     size_t overflow_size = bottom_to_max + BytesPerWord;
4289     size_t overflow_word_size = overflow_size / BytesPerWord;
4290 
4291     // Check that is_available can handle the overflow.
4292     assert_is_available_negative(overflow_word_size);
4293   }
4294 
4295   static void test_is_available() {
4296     TestVirtualSpaceNodeTest::test_is_available_positive();
4297     TestVirtualSpaceNodeTest::test_is_available_negative();
4298     TestVirtualSpaceNodeTest::test_is_available_overflow();
4299   }
4300 };
4301 
4302 void TestVirtualSpaceNode_test() {
4303   TestVirtualSpaceNodeTest::test();
4304   TestVirtualSpaceNodeTest::test_is_available();
4305 }
4306 
4307 // The following test is placed here instead of a gtest / unittest file
4308 // because the ChunkManager class is only available in this file.
4309 void ChunkManager_test_list_index() {
4310   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4311 
4312   // Test previous bug where a query for a humongous class metachunk,
4313   // incorrectly matched the non-class medium metachunk size.
4314   {
4315     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4316 
4317     ChunkIndex index = manager.list_index(MediumChunk);
4318 
4319     assert(index == HumongousIndex,
4320            "Requested size is larger than ClassMediumChunk,"
4321            " so should return HumongousIndex. Got index: %d", (int)index);
4322   }
4323 
4324   // Check the specified sizes as well.
4325   {
4326     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4327     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4328   }
4329   {
4330     ChunkIndex index = manager.list_index(ClassSmallChunk);
4331     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4332   }
4333   {
4334     ChunkIndex index = manager.list_index(ClassMediumChunk);
4335     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4336   }
4337   {
4338     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4339     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4340   }
4341 }
4342 
4343 #endif // !PRODUCT
4344 
4345 #ifdef ASSERT
4346 
4347 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4348 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4349 // content.
4350 class ChunkManagerReturnTestImpl {
4351 
4352   VirtualSpaceNode _vsn;
4353   ChunkManager _cm;
4354 
4355   // The expected content of the chunk manager.
4356   unsigned _chunks_in_chunkmanager;
4357   size_t _words_in_chunkmanager;
4358 
4359   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4360   static const int num_chunks = 256;
4361   Metachunk* _pool[num_chunks];
4362 
4363   // Helper, return a random position into the chunk pool.
4364   static int get_random_position() {
4365     return os::random() % num_chunks;
4366   }
4367 
4368   // Asserts that ChunkManager counters match expectations.
4369   void assert_counters() {
4370     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4371     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4372     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4373   }
4374 
4375   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4376   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4377   size_t get_random_chunk_size() {
4378     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4379     const int rand = os::random() % 4;
4380     if (rand < 3) {
4381       return sizes[rand];
4382     } else {
4383       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4384       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4385     }
4386   }
4387 
4388   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4389   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4390   int next_matching_chunk(int start, bool is_free) const {
4391     assert(start >= 0 && start < num_chunks, "invalid parameter");
4392     int pos = start;
4393     do {
4394       if (++pos == num_chunks) {
4395         pos = 0;
4396       }
4397       if (_pool[pos]->is_tagged_free() == is_free) {
4398         return pos;
4399       }
4400     } while (pos != start);
4401     return -1;
4402   }
4403 
4404   // A structure to keep information about a chunk list including which
4405   // chunks are part of this list. This is needed to keep information about a chunk list
4406   // we will to return to the ChunkManager, because the original list will be destroyed.
4407   struct AChunkList {
4408     Metachunk* head;
4409     Metachunk* all[num_chunks];
4410     size_t size;
4411     int num;
4412     ChunkIndex index;
4413   };
4414 
4415   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4416   // a random chunk list of max. length <list_size> of chunks with the same
4417   // ChunkIndex (chunk size).
4418   // Returns false if list cannot be assembled. List is returned in the <out>
4419   // structure. Returned list may be smaller than <list_size>.
4420   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4421     // Choose a random in-use chunk from the pool...
4422     const int headpos = next_matching_chunk(get_random_position(), false);
4423     if (headpos == -1) {
4424       return false;
4425     }
4426     Metachunk* const head = _pool[headpos];
4427     out->all[0] = head;
4428     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4429     // ..then go from there, chain it up with up to list_size - 1 number of other
4430     // in-use chunks of the same index.
4431     const ChunkIndex index = _cm.list_index(head->word_size());
4432     int num_added = 1;
4433     size_t size_added = head->word_size();
4434     int pos = headpos;
4435     Metachunk* tail = head;
4436     do {
4437       pos = next_matching_chunk(pos, false);
4438       if (pos != headpos) {
4439         Metachunk* c = _pool[pos];
4440         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4441         if (index == _cm.list_index(c->word_size())) {
4442           tail->set_next(c);
4443           c->set_prev(tail);
4444           tail = c;
4445           out->all[num_added] = c;
4446           num_added ++;
4447           size_added += c->word_size();
4448         }
4449       }
4450     } while (num_added < list_size && pos != headpos);
4451     out->head = head;
4452     out->index = index;
4453     out->size = size_added;
4454     out->num = num_added;
4455     return true;
4456   }
4457 
4458   // Take a single random chunk from the ChunkManager.
4459   bool take_single_random_chunk_from_chunkmanager() {
4460     assert_counters();
4461     _cm.locked_verify();
4462     int pos = next_matching_chunk(get_random_position(), true);
4463     if (pos == -1) {
4464       return false;
4465     }
4466     Metachunk* c = _pool[pos];
4467     assert(c->is_tagged_free(), "Chunk state mismatch");
4468     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4469     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4470     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4471     // normal chunk allocation.
4472     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4473     assert(c2 != NULL, "Unexpected.");
4474     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4475     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4476     _chunks_in_chunkmanager --;
4477     _words_in_chunkmanager -= c->word_size();
4478     assert_counters();
4479     _cm.locked_verify();
4480     return true;
4481   }
4482 
4483   // Returns a single random chunk to the chunk manager. Returns false if that
4484   // was not possible (all chunks are already in the chunk manager).
4485   bool return_single_random_chunk_to_chunkmanager() {
4486     assert_counters();
4487     _cm.locked_verify();
4488     int pos = next_matching_chunk(get_random_position(), false);
4489     if (pos == -1) {
4490       return false;
4491     }
4492     Metachunk* c = _pool[pos];
4493     assert(c->is_tagged_free() == false, "wrong chunk information");
4494     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4495     _chunks_in_chunkmanager ++;
4496     _words_in_chunkmanager += c->word_size();
4497     assert(c->is_tagged_free() == true, "wrong chunk information");
4498     assert_counters();
4499     _cm.locked_verify();
4500     return true;
4501   }
4502 
4503   // Return a random chunk list to the chunk manager. Returns the length of the
4504   // returned list.
4505   int return_random_chunk_list_to_chunkmanager(int list_size) {
4506     assert_counters();
4507     _cm.locked_verify();
4508     AChunkList aChunkList;
4509     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4510       return 0;
4511     }
4512     // Before returning chunks are returned, they should be tagged in use.
4513     for (int i = 0; i < aChunkList.num; i ++) {
4514       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4515     }
4516     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4517     _chunks_in_chunkmanager += aChunkList.num;
4518     _words_in_chunkmanager += aChunkList.size;
4519     // After all chunks are returned, check that they are now tagged free.
4520     for (int i = 0; i < aChunkList.num; i ++) {
4521       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4522     }
4523     assert_counters();
4524     _cm.locked_verify();
4525     return aChunkList.num;
4526   }
4527 
4528 public:
4529 
4530   ChunkManagerReturnTestImpl()
4531     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4532     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4533     , _chunks_in_chunkmanager(0)
4534     , _words_in_chunkmanager(0)
4535   {
4536     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4537     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4538     // "in use", because not yet added to any chunk manager.
4539     _vsn.initialize();
4540     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4541     for (int i = 0; i < num_chunks; i ++) {
4542       const size_t size = get_random_chunk_size();
4543       _pool[i] = _vsn.get_chunk_vs(size);
4544       assert(_pool[i] != NULL, "allocation failed");
4545     }
4546     assert_counters();
4547     _cm.locked_verify();
4548   }
4549 
4550   // Test entry point.
4551   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4552   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4553   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4554   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4555   // chunks manager, thereby emptying or filling it completely.
4556   void do_test(float phase_length_factor) {
4557     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4558     assert_counters();
4559     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4560     const int num_max_ops = num_chunks * 100;
4561     int num_ops = num_max_ops;
4562     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4563     int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4564     bool return_phase = true;
4565     while (num_ops > 0) {
4566       int chunks_moved = 0;
4567       if (return_phase) {
4568         // Randomly switch between returning a single chunk or a random length chunk list.
4569         if (os::random() % 2 == 0) {
4570           if (return_single_random_chunk_to_chunkmanager()) {
4571             chunks_moved = 1;
4572           }
4573         } else {
4574           const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4575           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4576         }
4577       } else {
4578         // Breath out.
4579         if (take_single_random_chunk_from_chunkmanager()) {
4580           chunks_moved = 1;
4581         }
4582       }
4583       num_ops -= chunks_moved;
4584       num_ops_until_switch -= chunks_moved;
4585       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4586         return_phase = !return_phase;
4587         num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4588       }
4589     }
4590   }
4591 };
4592 
4593 void* setup_chunkmanager_returntests() {
4594   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4595   return p;
4596 }
4597 
4598 void teardown_chunkmanager_returntests(void* p) {
4599   delete (ChunkManagerReturnTestImpl*) p;
4600 }
4601 
4602 void run_chunkmanager_returntests(void* p, float phase_length) {
4603   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4604   test->do_test(phase_length);
4605 }
4606 
4607 // The following test is placed here instead of a gtest / unittest file
4608 // because the ChunkManager class is only available in this file.
4609 class SpaceManagerTest : AllStatic {
4610   friend void SpaceManager_test_adjust_initial_chunk_size();
4611 
4612   static void test_adjust_initial_chunk_size(bool is_class) {
4613     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4614     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4615     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4616 
4617 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4618     do {                                                                         \
4619       size_t v = value;                                                          \
4620       size_t e = expected;                                                       \
4621       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4622              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4623     } while (0)
4624 
4625     // Smallest (specialized)
4626     test_adjust_initial_chunk_size(1,            smallest, is_class);
4627     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4628     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4629 
4630     // Small
4631     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4632     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4633     test_adjust_initial_chunk_size(normal,       normal, is_class);
4634 
4635     // Medium
4636     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4637     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4638     test_adjust_initial_chunk_size(medium,     medium, is_class);
4639 
4640     // Humongous
4641     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4642 
4643 #undef test_adjust_initial_chunk_size
4644   }
4645 
4646   static void test_adjust_initial_chunk_size() {
4647     test_adjust_initial_chunk_size(false);
4648     test_adjust_initial_chunk_size(true);
4649   }
4650 };
4651 
4652 void SpaceManager_test_adjust_initial_chunk_size() {
4653   SpaceManagerTest::test_adjust_initial_chunk_size();
4654 }
4655 
4656 #endif // ASSERT