1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Set this constant to enable slow integrity checking of the free chunk lists
  58 const bool metaspace_slow_verify = false;
  59 
  60 size_t const allocation_from_dictionary_limit = 4 * K;
  61 
  62 MetaWord* last_allocated = 0;
  63 
  64 size_t Metaspace::_compressed_class_space_size;
  65 const MetaspaceTracer* Metaspace::_tracer = NULL;
  66 
  67 // Used in declarations in SpaceManager and ChunkManager
  68 enum ChunkIndex {
  69   ZeroIndex = 0,
  70   SpecializedIndex = ZeroIndex,
  71   SmallIndex = SpecializedIndex + 1,
  72   MediumIndex = SmallIndex + 1,
  73   HumongousIndex = MediumIndex + 1,
  74   NumberOfFreeLists = 3,
  75   NumberOfInUseLists = 4
  76 };
  77 
  78 // Helper, returns a descriptive name for the given index.
  79 static const char* chunk_size_name(ChunkIndex index) {
  80   switch (index) {
  81     case SpecializedIndex:
  82       return "specialized";
  83     case SmallIndex:
  84       return "small";
  85     case MediumIndex:
  86       return "medium";
  87     case HumongousIndex:
  88       return "humongous";
  89     default:
  90       return "Invalid index";
  91   }
  92 }
  93 
  94 enum ChunkSizes {    // in words.
  95   ClassSpecializedChunk = 128,
  96   SpecializedChunk = 128,
  97   ClassSmallChunk = 256,
  98   SmallChunk = 512,
  99   ClassMediumChunk = 4 * K,
 100   MediumChunk = 8 * K
 101 };
 102 
 103 static ChunkIndex next_chunk_index(ChunkIndex i) {
 104   assert(i < NumberOfInUseLists, "Out of bound");
 105   return (ChunkIndex) (i+1);
 106 }
 107 
 108 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 109 uint MetaspaceGC::_shrink_factor = 0;
 110 bool MetaspaceGC::_should_concurrent_collect = false;
 111 
 112 typedef class FreeList<Metachunk> ChunkList;
 113 
 114 // Manages the global free lists of chunks.
 115 class ChunkManager : public CHeapObj<mtInternal> {
 116   friend class TestVirtualSpaceNodeTest;
 117 
 118   // Free list of chunks of different sizes.
 119   //   SpecializedChunk
 120   //   SmallChunk
 121   //   MediumChunk
 122   ChunkList _free_chunks[NumberOfFreeLists];
 123 
 124   // Return non-humongous chunk list by its index.
 125   ChunkList* free_chunks(ChunkIndex index);
 126 
 127   // Returns non-humongous chunk list for the given chunk word size.
 128   ChunkList* find_free_chunks_list(size_t word_size);
 129 
 130   //   HumongousChunk
 131   ChunkTreeDictionary _humongous_dictionary;
 132 
 133   // Returns the humongous chunk dictionary.
 134   ChunkTreeDictionary* humongous_dictionary() {
 135     return &_humongous_dictionary;
 136   }
 137 
 138   // Size, in metaspace words, of all chunks managed by this ChunkManager
 139   size_t _free_chunks_total;
 140   // Number of chunks in this ChunkManager
 141   size_t _free_chunks_count;
 142 
 143   // Update counters after a chunk was added or removed removed.
 144   void account_for_added_chunk(const Metachunk* c);
 145   void account_for_removed_chunk(const Metachunk* c);
 146 
 147   // Debug support
 148 
 149   size_t sum_free_chunks();
 150   size_t sum_free_chunks_count();
 151 
 152   void locked_verify_free_chunks_total();
 153   void slow_locked_verify_free_chunks_total() {
 154     if (metaspace_slow_verify) {
 155       locked_verify_free_chunks_total();
 156     }
 157   }
 158   void locked_verify_free_chunks_count();
 159   void slow_locked_verify_free_chunks_count() {
 160     if (metaspace_slow_verify) {
 161       locked_verify_free_chunks_count();
 162     }
 163   }
 164   void verify_free_chunks_count();
 165 
 166  public:
 167 
 168   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 169       : _free_chunks_total(0), _free_chunks_count(0) {
 170     _free_chunks[SpecializedIndex].set_size(specialized_size);
 171     _free_chunks[SmallIndex].set_size(small_size);
 172     _free_chunks[MediumIndex].set_size(medium_size);
 173   }
 174 
 175   // add or delete (return) a chunk to the global freelist.
 176   Metachunk* chunk_freelist_allocate(size_t word_size);
 177 
 178   // Map a size to a list index assuming that there are lists
 179   // for special, small, medium, and humongous chunks.
 180   ChunkIndex list_index(size_t size);
 181 
 182   // Map a given index to the chunk size.
 183   size_t size_by_index(ChunkIndex index);
 184 
 185   // Take a chunk from the ChunkManager. The chunk is expected to be in
 186   // the chunk manager (the freelist if non-humongous, the dictionary if
 187   // humongous).
 188   void remove_chunk(Metachunk* chunk);
 189 
 190   // Return a single chunk of type index to the ChunkManager.
 191   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 192 
 193   // Add the simple linked list of chunks to the freelist of chunks
 194   // of type index.
 195   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 196 
 197   // Total of the space in the free chunks list
 198   size_t free_chunks_total_words();
 199   size_t free_chunks_total_bytes();
 200 
 201   // Number of chunks in the free chunks list
 202   size_t free_chunks_count();
 203 
 204   // Remove from a list by size.  Selects list based on size of chunk.
 205   Metachunk* free_chunks_get(size_t chunk_word_size);
 206 
 207 #define index_bounds_check(index)                                         \
 208   assert(index == SpecializedIndex ||                                     \
 209          index == SmallIndex ||                                           \
 210          index == MediumIndex ||                                          \
 211          index == HumongousIndex, "Bad index: %d", (int) index)
 212 
 213   size_t num_free_chunks(ChunkIndex index) const {
 214     index_bounds_check(index);
 215 
 216     if (index == HumongousIndex) {
 217       return _humongous_dictionary.total_free_blocks();
 218     }
 219 
 220     ssize_t count = _free_chunks[index].count();
 221     return count == -1 ? 0 : (size_t) count;
 222   }
 223 
 224   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 225     index_bounds_check(index);
 226 
 227     size_t word_size = 0;
 228     if (index == HumongousIndex) {
 229       word_size = _humongous_dictionary.total_size();
 230     } else {
 231       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 232       word_size = size_per_chunk_in_words * num_free_chunks(index);
 233     }
 234 
 235     return word_size * BytesPerWord;
 236   }
 237 
 238   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 239     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 240                                          num_free_chunks(SmallIndex),
 241                                          num_free_chunks(MediumIndex),
 242                                          num_free_chunks(HumongousIndex),
 243                                          size_free_chunks_in_bytes(SpecializedIndex),
 244                                          size_free_chunks_in_bytes(SmallIndex),
 245                                          size_free_chunks_in_bytes(MediumIndex),
 246                                          size_free_chunks_in_bytes(HumongousIndex));
 247   }
 248 
 249   // Debug support
 250   void verify();
 251   void slow_verify() {
 252     if (metaspace_slow_verify) {
 253       verify();
 254     }
 255   }
 256   void locked_verify();
 257   void slow_locked_verify() {
 258     if (metaspace_slow_verify) {
 259       locked_verify();
 260     }
 261   }
 262   void verify_free_chunks_total();
 263 
 264   void locked_print_free_chunks(outputStream* st);
 265   void locked_print_sum_free_chunks(outputStream* st);
 266 
 267   void print_on(outputStream* st) const;
 268 };
 269 
 270 class SmallBlocks : public CHeapObj<mtClass> {
 271   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 272   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 273 
 274  private:
 275   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 276 
 277   FreeList<Metablock>& list_at(size_t word_size) {
 278     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 279     return _small_lists[word_size - _small_block_min_size];
 280   }
 281 
 282  public:
 283   SmallBlocks() {
 284     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 285       uint k = i - _small_block_min_size;
 286       _small_lists[k].set_size(i);
 287     }
 288   }
 289 
 290   size_t total_size() const {
 291     size_t result = 0;
 292     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 293       uint k = i - _small_block_min_size;
 294       result = result + _small_lists[k].count() * _small_lists[k].size();
 295     }
 296     return result;
 297   }
 298 
 299   static uint small_block_max_size() { return _small_block_max_size; }
 300   static uint small_block_min_size() { return _small_block_min_size; }
 301 
 302   MetaWord* get_block(size_t word_size) {
 303     if (list_at(word_size).count() > 0) {
 304       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 305       return new_block;
 306     } else {
 307       return NULL;
 308     }
 309   }
 310   void return_block(Metablock* free_chunk, size_t word_size) {
 311     list_at(word_size).return_chunk_at_head(free_chunk, false);
 312     assert(list_at(word_size).count() > 0, "Should have a chunk");
 313   }
 314 
 315   void print_on(outputStream* st) const {
 316     st->print_cr("SmallBlocks:");
 317     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 318       uint k = i - _small_block_min_size;
 319       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 320     }
 321   }
 322 };
 323 
 324 // Used to manage the free list of Metablocks (a block corresponds
 325 // to the allocation of a quantum of metadata).
 326 class BlockFreelist : public CHeapObj<mtClass> {
 327   BlockTreeDictionary* const _dictionary;
 328   SmallBlocks* _small_blocks;
 329 
 330   // Only allocate and split from freelist if the size of the allocation
 331   // is at least 1/4th the size of the available block.
 332   const static int WasteMultiplier = 4;
 333 
 334   // Accessors
 335   BlockTreeDictionary* dictionary() const { return _dictionary; }
 336   SmallBlocks* small_blocks() {
 337     if (_small_blocks == NULL) {
 338       _small_blocks = new SmallBlocks();
 339     }
 340     return _small_blocks;
 341   }
 342 
 343  public:
 344   BlockFreelist();
 345   ~BlockFreelist();
 346 
 347   // Get and return a block to the free list
 348   MetaWord* get_block(size_t word_size);
 349   void return_block(MetaWord* p, size_t word_size);
 350 
 351   size_t total_size() const  {
 352     size_t result = dictionary()->total_size();
 353     if (_small_blocks != NULL) {
 354       result = result + _small_blocks->total_size();
 355     }
 356     return result;
 357   }
 358 
 359   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 360   void print_on(outputStream* st) const;
 361 };
 362 
 363 // A VirtualSpaceList node.
 364 class VirtualSpaceNode : public CHeapObj<mtClass> {
 365   friend class VirtualSpaceList;
 366 
 367   // Link to next VirtualSpaceNode
 368   VirtualSpaceNode* _next;
 369 
 370   // total in the VirtualSpace
 371   MemRegion _reserved;
 372   ReservedSpace _rs;
 373   VirtualSpace _virtual_space;
 374   MetaWord* _top;
 375   // count of chunks contained in this VirtualSpace
 376   uintx _container_count;
 377 
 378   // Convenience functions to access the _virtual_space
 379   char* low()  const { return virtual_space()->low(); }
 380   char* high() const { return virtual_space()->high(); }
 381 
 382   // The first Metachunk will be allocated at the bottom of the
 383   // VirtualSpace
 384   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 385 
 386   // Committed but unused space in the virtual space
 387   size_t free_words_in_vs() const;
 388  public:
 389 
 390   VirtualSpaceNode(size_t byte_size);
 391   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 392   ~VirtualSpaceNode();
 393 
 394   // Convenience functions for logical bottom and end
 395   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 396   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 397 
 398   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 399 
 400   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 401   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 402 
 403   bool is_pre_committed() const { return _virtual_space.special(); }
 404 
 405   // address of next available space in _virtual_space;
 406   // Accessors
 407   VirtualSpaceNode* next() { return _next; }
 408   void set_next(VirtualSpaceNode* v) { _next = v; }
 409 
 410   void set_reserved(MemRegion const v) { _reserved = v; }
 411   void set_top(MetaWord* v) { _top = v; }
 412 
 413   // Accessors
 414   MemRegion* reserved() { return &_reserved; }
 415   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 416 
 417   // Returns true if "word_size" is available in the VirtualSpace
 418   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 419 
 420   MetaWord* top() const { return _top; }
 421   void inc_top(size_t word_size) { _top += word_size; }
 422 
 423   uintx container_count() { return _container_count; }
 424   void inc_container_count();
 425   void dec_container_count();
 426 #ifdef ASSERT
 427   uintx container_count_slow();
 428   void verify_container_count();
 429 #endif
 430 
 431   // used and capacity in this single entry in the list
 432   size_t used_words_in_vs() const;
 433   size_t capacity_words_in_vs() const;
 434 
 435   bool initialize();
 436 
 437   // get space from the virtual space
 438   Metachunk* take_from_committed(size_t chunk_word_size);
 439 
 440   // Allocate a chunk from the virtual space and return it.
 441   Metachunk* get_chunk_vs(size_t chunk_word_size);
 442 
 443   // Expands/shrinks the committed space in a virtual space.  Delegates
 444   // to Virtualspace
 445   bool expand_by(size_t min_words, size_t preferred_words);
 446 
 447   // In preparation for deleting this node, remove all the chunks
 448   // in the node from any freelist.
 449   void purge(ChunkManager* chunk_manager);
 450 
 451   // If an allocation doesn't fit in the current node a new node is created.
 452   // Allocate chunks out of the remaining committed space in this node
 453   // to avoid wasting that memory.
 454   // This always adds up because all the chunk sizes are multiples of
 455   // the smallest chunk size.
 456   void retire(ChunkManager* chunk_manager);
 457 
 458 #ifdef ASSERT
 459   // Debug support
 460   void mangle();
 461 #endif
 462 
 463   void print_on(outputStream* st) const;
 464 };
 465 
 466 #define assert_is_ptr_aligned(ptr, alignment) \
 467   assert(is_ptr_aligned(ptr, alignment),      \
 468          PTR_FORMAT " is not aligned to "     \
 469          SIZE_FORMAT, p2i(ptr), alignment)
 470 
 471 #define assert_is_size_aligned(size, alignment) \
 472   assert(is_size_aligned(size, alignment),      \
 473          SIZE_FORMAT " is not aligned to "      \
 474          SIZE_FORMAT, size, alignment)
 475 
 476 
 477 // Decide if large pages should be committed when the memory is reserved.
 478 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 479   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 480     size_t words = bytes / BytesPerWord;
 481     bool is_class = false; // We never reserve large pages for the class space.
 482     if (MetaspaceGC::can_expand(words, is_class) &&
 483         MetaspaceGC::allowed_expansion() >= words) {
 484       return true;
 485     }
 486   }
 487 
 488   return false;
 489 }
 490 
 491   // byte_size is the size of the associated virtualspace.
 492 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 493   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 494 
 495 #if INCLUDE_CDS
 496   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 497   // configurable address, generally at the top of the Java heap so other
 498   // memory addresses don't conflict.
 499   if (DumpSharedSpaces) {
 500     bool large_pages = false; // No large pages when dumping the CDS archive.
 501     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 502 
 503     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 504     if (_rs.is_reserved()) {
 505       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 506     } else {
 507       // Get a mmap region anywhere if the SharedBaseAddress fails.
 508       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 509     }
 510     if (!_rs.is_reserved()) {
 511       vm_exit_during_initialization("Unable to allocate memory for shared space",
 512         err_msg(SIZE_FORMAT " bytes.", bytes));
 513     }
 514     MetaspaceShared::initialize_shared_rs(&_rs);
 515   } else
 516 #endif
 517   {
 518     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 519 
 520     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 521   }
 522 
 523   if (_rs.is_reserved()) {
 524     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 525     assert(_rs.size() != 0, "Catch if we get a 0 size");
 526     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 527     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 528 
 529     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 530   }
 531 }
 532 
 533 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 534   Metachunk* chunk = first_chunk();
 535   Metachunk* invalid_chunk = (Metachunk*) top();
 536   while (chunk < invalid_chunk ) {
 537     assert(chunk->is_tagged_free(), "Should be tagged free");
 538     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 539     chunk_manager->remove_chunk(chunk);
 540     assert(chunk->next() == NULL &&
 541            chunk->prev() == NULL,
 542            "Was not removed from its list");
 543     chunk = (Metachunk*) next;
 544   }
 545 }
 546 
 547 #ifdef ASSERT
 548 uintx VirtualSpaceNode::container_count_slow() {
 549   uintx count = 0;
 550   Metachunk* chunk = first_chunk();
 551   Metachunk* invalid_chunk = (Metachunk*) top();
 552   while (chunk < invalid_chunk ) {
 553     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 554     // Don't count the chunks on the free lists.  Those are
 555     // still part of the VirtualSpaceNode but not currently
 556     // counted.
 557     if (!chunk->is_tagged_free()) {
 558       count++;
 559     }
 560     chunk = (Metachunk*) next;
 561   }
 562   return count;
 563 }
 564 #endif
 565 
 566 // List of VirtualSpaces for metadata allocation.
 567 class VirtualSpaceList : public CHeapObj<mtClass> {
 568   friend class VirtualSpaceNode;
 569 
 570   enum VirtualSpaceSizes {
 571     VirtualSpaceSize = 256 * K
 572   };
 573 
 574   // Head of the list
 575   VirtualSpaceNode* _virtual_space_list;
 576   // virtual space currently being used for allocations
 577   VirtualSpaceNode* _current_virtual_space;
 578 
 579   // Is this VirtualSpaceList used for the compressed class space
 580   bool _is_class;
 581 
 582   // Sum of reserved and committed memory in the virtual spaces
 583   size_t _reserved_words;
 584   size_t _committed_words;
 585 
 586   // Number of virtual spaces
 587   size_t _virtual_space_count;
 588 
 589   ~VirtualSpaceList();
 590 
 591   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 592 
 593   void set_virtual_space_list(VirtualSpaceNode* v) {
 594     _virtual_space_list = v;
 595   }
 596   void set_current_virtual_space(VirtualSpaceNode* v) {
 597     _current_virtual_space = v;
 598   }
 599 
 600   void link_vs(VirtualSpaceNode* new_entry);
 601 
 602   // Get another virtual space and add it to the list.  This
 603   // is typically prompted by a failed attempt to allocate a chunk
 604   // and is typically followed by the allocation of a chunk.
 605   bool create_new_virtual_space(size_t vs_word_size);
 606 
 607   // Chunk up the unused committed space in the current
 608   // virtual space and add the chunks to the free list.
 609   void retire_current_virtual_space();
 610 
 611  public:
 612   VirtualSpaceList(size_t word_size);
 613   VirtualSpaceList(ReservedSpace rs);
 614 
 615   size_t free_bytes();
 616 
 617   Metachunk* get_new_chunk(size_t chunk_word_size,
 618                            size_t suggested_commit_granularity);
 619 
 620   bool expand_node_by(VirtualSpaceNode* node,
 621                       size_t min_words,
 622                       size_t preferred_words);
 623 
 624   bool expand_by(size_t min_words,
 625                  size_t preferred_words);
 626 
 627   VirtualSpaceNode* current_virtual_space() {
 628     return _current_virtual_space;
 629   }
 630 
 631   bool is_class() const { return _is_class; }
 632 
 633   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 634 
 635   size_t reserved_words()  { return _reserved_words; }
 636   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 637   size_t committed_words() { return _committed_words; }
 638   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 639 
 640   void inc_reserved_words(size_t v);
 641   void dec_reserved_words(size_t v);
 642   void inc_committed_words(size_t v);
 643   void dec_committed_words(size_t v);
 644   void inc_virtual_space_count();
 645   void dec_virtual_space_count();
 646 
 647   bool contains(const void* ptr);
 648 
 649   // Unlink empty VirtualSpaceNodes and free it.
 650   void purge(ChunkManager* chunk_manager);
 651 
 652   void print_on(outputStream* st) const;
 653 
 654   class VirtualSpaceListIterator : public StackObj {
 655     VirtualSpaceNode* _virtual_spaces;
 656    public:
 657     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 658       _virtual_spaces(virtual_spaces) {}
 659 
 660     bool repeat() {
 661       return _virtual_spaces != NULL;
 662     }
 663 
 664     VirtualSpaceNode* get_next() {
 665       VirtualSpaceNode* result = _virtual_spaces;
 666       if (_virtual_spaces != NULL) {
 667         _virtual_spaces = _virtual_spaces->next();
 668       }
 669       return result;
 670     }
 671   };
 672 };
 673 
 674 class Metadebug : AllStatic {
 675   // Debugging support for Metaspaces
 676   static int _allocation_fail_alot_count;
 677 
 678  public:
 679 
 680   static void init_allocation_fail_alot_count();
 681 #ifdef ASSERT
 682   static bool test_metadata_failure();
 683 #endif
 684 };
 685 
 686 int Metadebug::_allocation_fail_alot_count = 0;
 687 
 688 //  SpaceManager - used by Metaspace to handle allocations
 689 class SpaceManager : public CHeapObj<mtClass> {
 690   friend class Metaspace;
 691   friend class Metadebug;
 692 
 693  private:
 694 
 695   // protects allocations
 696   Mutex* const _lock;
 697 
 698   // Type of metadata allocated.
 699   Metaspace::MetadataType _mdtype;
 700 
 701   // List of chunks in use by this SpaceManager.  Allocations
 702   // are done from the current chunk.  The list is used for deallocating
 703   // chunks when the SpaceManager is freed.
 704   Metachunk* _chunks_in_use[NumberOfInUseLists];
 705   Metachunk* _current_chunk;
 706 
 707   // Maximum number of small chunks to allocate to a SpaceManager
 708   static uint const _small_chunk_limit;
 709 
 710   // Sum of all space in allocated chunks
 711   size_t _allocated_blocks_words;
 712 
 713   // Sum of all allocated chunks
 714   size_t _allocated_chunks_words;
 715   size_t _allocated_chunks_count;
 716 
 717   // Free lists of blocks are per SpaceManager since they
 718   // are assumed to be in chunks in use by the SpaceManager
 719   // and all chunks in use by a SpaceManager are freed when
 720   // the class loader using the SpaceManager is collected.
 721   BlockFreelist* _block_freelists;
 722 
 723   // protects virtualspace and chunk expansions
 724   static const char*  _expand_lock_name;
 725   static const int    _expand_lock_rank;
 726   static Mutex* const _expand_lock;
 727 
 728  private:
 729   // Accessors
 730   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 731   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 732     _chunks_in_use[index] = v;
 733   }
 734 
 735   BlockFreelist* block_freelists() const { return _block_freelists; }
 736 
 737   Metaspace::MetadataType mdtype() { return _mdtype; }
 738 
 739   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 740   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 741 
 742   Metachunk* current_chunk() const { return _current_chunk; }
 743   void set_current_chunk(Metachunk* v) {
 744     _current_chunk = v;
 745   }
 746 
 747   Metachunk* find_current_chunk(size_t word_size);
 748 
 749   // Add chunk to the list of chunks in use
 750   void add_chunk(Metachunk* v, bool make_current);
 751   void retire_current_chunk();
 752 
 753   Mutex* lock() const { return _lock; }
 754 
 755  protected:
 756   void initialize();
 757 
 758  public:
 759   SpaceManager(Metaspace::MetadataType mdtype,
 760                Mutex* lock);
 761   ~SpaceManager();
 762 
 763   enum ChunkMultiples {
 764     MediumChunkMultiple = 4
 765   };
 766 
 767   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 768   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 769   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 770 
 771   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 772 
 773   // Accessors
 774   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 775 
 776   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 777   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 778   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 779 
 780   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 781 
 782   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 783 
 784   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 785   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 786   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 787   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 788   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 789 
 790   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 791 
 792   static Mutex* expand_lock() { return _expand_lock; }
 793 
 794   // Increment the per Metaspace and global running sums for Metachunks
 795   // by the given size.  This is used when a Metachunk to added to
 796   // the in-use list.
 797   void inc_size_metrics(size_t words);
 798   // Increment the per Metaspace and global running sums Metablocks by the given
 799   // size.  This is used when a Metablock is allocated.
 800   void inc_used_metrics(size_t words);
 801   // Delete the portion of the running sums for this SpaceManager. That is,
 802   // the globals running sums for the Metachunks and Metablocks are
 803   // decremented for all the Metachunks in-use by this SpaceManager.
 804   void dec_total_from_size_metrics();
 805 
 806   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 807   // or return the unadjusted size if the requested size is humongous.
 808   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 809   size_t adjust_initial_chunk_size(size_t requested) const;
 810 
 811   // Get the initial chunks size for this metaspace type.
 812   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 813 
 814   size_t sum_capacity_in_chunks_in_use() const;
 815   size_t sum_used_in_chunks_in_use() const;
 816   size_t sum_free_in_chunks_in_use() const;
 817   size_t sum_waste_in_chunks_in_use() const;
 818   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 819 
 820   size_t sum_count_in_chunks_in_use();
 821   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 822 
 823   Metachunk* get_new_chunk(size_t chunk_word_size);
 824 
 825   // Block allocation and deallocation.
 826   // Allocates a block from the current chunk
 827   MetaWord* allocate(size_t word_size);
 828   // Allocates a block from a small chunk
 829   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 830 
 831   // Helper for allocations
 832   MetaWord* allocate_work(size_t word_size);
 833 
 834   // Returns a block to the per manager freelist
 835   void deallocate(MetaWord* p, size_t word_size);
 836 
 837   // Based on the allocation size and a minimum chunk size,
 838   // returned chunk size (for expanding space for chunk allocation).
 839   size_t calc_chunk_size(size_t allocation_word_size);
 840 
 841   // Called when an allocation from the current chunk fails.
 842   // Gets a new chunk (may require getting a new virtual space),
 843   // and allocates from that chunk.
 844   MetaWord* grow_and_allocate(size_t word_size);
 845 
 846   // Notify memory usage to MemoryService.
 847   void track_metaspace_memory_usage();
 848 
 849   // debugging support.
 850 
 851   void dump(outputStream* const out) const;
 852   void print_on(outputStream* st) const;
 853   void locked_print_chunks_in_use_on(outputStream* st) const;
 854 
 855   void verify();
 856   void verify_chunk_size(Metachunk* chunk);
 857 #ifdef ASSERT
 858   void verify_allocated_blocks_words();
 859 #endif
 860 
 861   // This adjusts the size given to be greater than the minimum allocation size in
 862   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 863   size_t get_allocation_word_size(size_t word_size) {
 864     size_t byte_size = word_size * BytesPerWord;
 865 
 866     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 867     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 868 
 869     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 870     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 871 
 872     return raw_word_size;
 873   }
 874 };
 875 
 876 uint const SpaceManager::_small_chunk_limit = 4;
 877 
 878 const char* SpaceManager::_expand_lock_name =
 879   "SpaceManager chunk allocation lock";
 880 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 881 Mutex* const SpaceManager::_expand_lock =
 882   new Mutex(SpaceManager::_expand_lock_rank,
 883             SpaceManager::_expand_lock_name,
 884             Mutex::_allow_vm_block_flag,
 885             Monitor::_safepoint_check_never);
 886 
 887 void VirtualSpaceNode::inc_container_count() {
 888   assert_lock_strong(SpaceManager::expand_lock());
 889   _container_count++;
 890 }
 891 
 892 void VirtualSpaceNode::dec_container_count() {
 893   assert_lock_strong(SpaceManager::expand_lock());
 894   _container_count--;
 895 }
 896 
 897 #ifdef ASSERT
 898 void VirtualSpaceNode::verify_container_count() {
 899   assert(_container_count == container_count_slow(),
 900          "Inconsistency in container_count _container_count " UINTX_FORMAT
 901          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 902 }
 903 #endif
 904 
 905 // BlockFreelist methods
 906 
 907 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 908 
 909 BlockFreelist::~BlockFreelist() {
 910   delete _dictionary;
 911   if (_small_blocks != NULL) {
 912     delete _small_blocks;
 913   }
 914 }
 915 
 916 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 917   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
 918 
 919   Metablock* free_chunk = ::new (p) Metablock(word_size);
 920   if (word_size < SmallBlocks::small_block_max_size()) {
 921     small_blocks()->return_block(free_chunk, word_size);
 922   } else {
 923   dictionary()->return_chunk(free_chunk);
 924 }
 925   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
 926             SIZE_FORMAT, p2i(free_chunk), word_size);
 927 }
 928 
 929 MetaWord* BlockFreelist::get_block(size_t word_size) {
 930   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
 931 
 932   // Try small_blocks first.
 933   if (word_size < SmallBlocks::small_block_max_size()) {
 934     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
 935     // this space manager.
 936     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
 937     if (new_block != NULL) {
 938       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 939               p2i(new_block), word_size);
 940       return new_block;
 941     }
 942   }
 943 
 944   if (word_size < BlockFreelist::min_dictionary_size()) {
 945     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
 946     return NULL;
 947   }
 948 
 949   Metablock* free_block =
 950     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 951   if (free_block == NULL) {
 952     return NULL;
 953   }
 954 
 955   const size_t block_size = free_block->size();
 956   if (block_size > WasteMultiplier * word_size) {
 957     return_block((MetaWord*)free_block, block_size);
 958     return NULL;
 959   }
 960 
 961   MetaWord* new_block = (MetaWord*)free_block;
 962   assert(block_size >= word_size, "Incorrect size of block from freelist");
 963   const size_t unused = block_size - word_size;
 964   if (unused >= SmallBlocks::small_block_min_size()) {
 965     return_block(new_block + word_size, unused);
 966   }
 967 
 968   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 969             p2i(new_block), word_size);
 970   return new_block;
 971 }
 972 
 973 void BlockFreelist::print_on(outputStream* st) const {
 974   dictionary()->print_free_lists(st);
 975   if (_small_blocks != NULL) {
 976     _small_blocks->print_on(st);
 977   }
 978 }
 979 
 980 // VirtualSpaceNode methods
 981 
 982 VirtualSpaceNode::~VirtualSpaceNode() {
 983   _rs.release();
 984 #ifdef ASSERT
 985   size_t word_size = sizeof(*this) / BytesPerWord;
 986   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 987 #endif
 988 }
 989 
 990 size_t VirtualSpaceNode::used_words_in_vs() const {
 991   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 992 }
 993 
 994 // Space committed in the VirtualSpace
 995 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 996   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 997 }
 998 
 999 size_t VirtualSpaceNode::free_words_in_vs() const {
1000   return pointer_delta(end(), top(), sizeof(MetaWord));
1001 }
1002 
1003 // Allocates the chunk from the virtual space only.
1004 // This interface is also used internally for debugging.  Not all
1005 // chunks removed here are necessarily used for allocation.
1006 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1007   // Bottom of the new chunk
1008   MetaWord* chunk_limit = top();
1009   assert(chunk_limit != NULL, "Not safe to call this method");
1010 
1011   // The virtual spaces are always expanded by the
1012   // commit granularity to enforce the following condition.
1013   // Without this the is_available check will not work correctly.
1014   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1015       "The committed memory doesn't match the expanded memory.");
1016 
1017   if (!is_available(chunk_word_size)) {
1018     LogTarget(Debug, gc, metaspace, freelist) lt;
1019     if (lt.is_enabled()) {
1020       LogStream ls(lt);
1021       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1022       // Dump some information about the virtual space that is nearly full
1023       print_on(&ls);
1024     }
1025     return NULL;
1026   }
1027 
1028   // Take the space  (bump top on the current virtual space).
1029   inc_top(chunk_word_size);
1030 
1031   // Initialize the chunk
1032   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1033   return result;
1034 }
1035 
1036 
1037 // Expand the virtual space (commit more of the reserved space)
1038 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1039   size_t min_bytes = min_words * BytesPerWord;
1040   size_t preferred_bytes = preferred_words * BytesPerWord;
1041 
1042   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1043 
1044   if (uncommitted < min_bytes) {
1045     return false;
1046   }
1047 
1048   size_t commit = MIN2(preferred_bytes, uncommitted);
1049   bool result = virtual_space()->expand_by(commit, false);
1050 
1051   assert(result, "Failed to commit memory");
1052 
1053   return result;
1054 }
1055 
1056 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1057   assert_lock_strong(SpaceManager::expand_lock());
1058   Metachunk* result = take_from_committed(chunk_word_size);
1059   if (result != NULL) {
1060     inc_container_count();
1061   }
1062   return result;
1063 }
1064 
1065 bool VirtualSpaceNode::initialize() {
1066 
1067   if (!_rs.is_reserved()) {
1068     return false;
1069   }
1070 
1071   // These are necessary restriction to make sure that the virtual space always
1072   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1073   // aligned only the middle alignment of the VirtualSpace is used.
1074   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
1075   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
1076 
1077   // ReservedSpaces marked as special will have the entire memory
1078   // pre-committed. Setting a committed size will make sure that
1079   // committed_size and actual_committed_size agrees.
1080   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1081 
1082   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1083                                             Metaspace::commit_alignment());
1084   if (result) {
1085     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1086         "Checking that the pre-committed memory was registered by the VirtualSpace");
1087 
1088     set_top((MetaWord*)virtual_space()->low());
1089     set_reserved(MemRegion((HeapWord*)_rs.base(),
1090                  (HeapWord*)(_rs.base() + _rs.size())));
1091 
1092     assert(reserved()->start() == (HeapWord*) _rs.base(),
1093            "Reserved start was not set properly " PTR_FORMAT
1094            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1095     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1096            "Reserved size was not set properly " SIZE_FORMAT
1097            " != " SIZE_FORMAT, reserved()->word_size(),
1098            _rs.size() / BytesPerWord);
1099   }
1100 
1101   return result;
1102 }
1103 
1104 void VirtualSpaceNode::print_on(outputStream* st) const {
1105   size_t used = used_words_in_vs();
1106   size_t capacity = capacity_words_in_vs();
1107   VirtualSpace* vs = virtual_space();
1108   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1109            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1110            PTR_FORMAT ", " PTR_FORMAT ")",
1111            p2i(vs), capacity / K,
1112            capacity == 0 ? 0 : used * 100 / capacity,
1113            p2i(bottom()), p2i(top()), p2i(end()),
1114            p2i(vs->high_boundary()));
1115 }
1116 
1117 #ifdef ASSERT
1118 void VirtualSpaceNode::mangle() {
1119   size_t word_size = capacity_words_in_vs();
1120   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1121 }
1122 #endif // ASSERT
1123 
1124 // VirtualSpaceList methods
1125 // Space allocated from the VirtualSpace
1126 
1127 VirtualSpaceList::~VirtualSpaceList() {
1128   VirtualSpaceListIterator iter(virtual_space_list());
1129   while (iter.repeat()) {
1130     VirtualSpaceNode* vsl = iter.get_next();
1131     delete vsl;
1132   }
1133 }
1134 
1135 void VirtualSpaceList::inc_reserved_words(size_t v) {
1136   assert_lock_strong(SpaceManager::expand_lock());
1137   _reserved_words = _reserved_words + v;
1138 }
1139 void VirtualSpaceList::dec_reserved_words(size_t v) {
1140   assert_lock_strong(SpaceManager::expand_lock());
1141   _reserved_words = _reserved_words - v;
1142 }
1143 
1144 #define assert_committed_below_limit()                        \
1145   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1146          "Too much committed memory. Committed: " SIZE_FORMAT \
1147          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1148          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1149 
1150 void VirtualSpaceList::inc_committed_words(size_t v) {
1151   assert_lock_strong(SpaceManager::expand_lock());
1152   _committed_words = _committed_words + v;
1153 
1154   assert_committed_below_limit();
1155 }
1156 void VirtualSpaceList::dec_committed_words(size_t v) {
1157   assert_lock_strong(SpaceManager::expand_lock());
1158   _committed_words = _committed_words - v;
1159 
1160   assert_committed_below_limit();
1161 }
1162 
1163 void VirtualSpaceList::inc_virtual_space_count() {
1164   assert_lock_strong(SpaceManager::expand_lock());
1165   _virtual_space_count++;
1166 }
1167 void VirtualSpaceList::dec_virtual_space_count() {
1168   assert_lock_strong(SpaceManager::expand_lock());
1169   _virtual_space_count--;
1170 }
1171 
1172 void ChunkManager::remove_chunk(Metachunk* chunk) {
1173   size_t word_size = chunk->word_size();
1174   ChunkIndex index = list_index(word_size);
1175   if (index != HumongousIndex) {
1176     free_chunks(index)->remove_chunk(chunk);
1177   } else {
1178     humongous_dictionary()->remove_chunk(chunk);
1179   }
1180 
1181   // Chunk has been removed from the chunks free list, update counters.
1182   account_for_removed_chunk(chunk);
1183 }
1184 
1185 // Walk the list of VirtualSpaceNodes and delete
1186 // nodes with a 0 container_count.  Remove Metachunks in
1187 // the node from their respective freelists.
1188 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1189   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1190   assert_lock_strong(SpaceManager::expand_lock());
1191   // Don't use a VirtualSpaceListIterator because this
1192   // list is being changed and a straightforward use of an iterator is not safe.
1193   VirtualSpaceNode* purged_vsl = NULL;
1194   VirtualSpaceNode* prev_vsl = virtual_space_list();
1195   VirtualSpaceNode* next_vsl = prev_vsl;
1196   while (next_vsl != NULL) {
1197     VirtualSpaceNode* vsl = next_vsl;
1198     DEBUG_ONLY(vsl->verify_container_count();)
1199     next_vsl = vsl->next();
1200     // Don't free the current virtual space since it will likely
1201     // be needed soon.
1202     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1203       // Unlink it from the list
1204       if (prev_vsl == vsl) {
1205         // This is the case of the current node being the first node.
1206         assert(vsl == virtual_space_list(), "Expected to be the first node");
1207         set_virtual_space_list(vsl->next());
1208       } else {
1209         prev_vsl->set_next(vsl->next());
1210       }
1211 
1212       vsl->purge(chunk_manager);
1213       dec_reserved_words(vsl->reserved_words());
1214       dec_committed_words(vsl->committed_words());
1215       dec_virtual_space_count();
1216       purged_vsl = vsl;
1217       delete vsl;
1218     } else {
1219       prev_vsl = vsl;
1220     }
1221   }
1222 #ifdef ASSERT
1223   if (purged_vsl != NULL) {
1224     // List should be stable enough to use an iterator here.
1225     VirtualSpaceListIterator iter(virtual_space_list());
1226     while (iter.repeat()) {
1227       VirtualSpaceNode* vsl = iter.get_next();
1228       assert(vsl != purged_vsl, "Purge of vsl failed");
1229     }
1230   }
1231 #endif
1232 }
1233 
1234 
1235 // This function looks at the mmap regions in the metaspace without locking.
1236 // The chunks are added with store ordering and not deleted except for at
1237 // unloading time during a safepoint.
1238 bool VirtualSpaceList::contains(const void* ptr) {
1239   // List should be stable enough to use an iterator here because removing virtual
1240   // space nodes is only allowed at a safepoint.
1241   VirtualSpaceListIterator iter(virtual_space_list());
1242   while (iter.repeat()) {
1243     VirtualSpaceNode* vsn = iter.get_next();
1244     if (vsn->contains(ptr)) {
1245       return true;
1246     }
1247   }
1248   return false;
1249 }
1250 
1251 void VirtualSpaceList::retire_current_virtual_space() {
1252   assert_lock_strong(SpaceManager::expand_lock());
1253 
1254   VirtualSpaceNode* vsn = current_virtual_space();
1255 
1256   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1257                                   Metaspace::chunk_manager_metadata();
1258 
1259   vsn->retire(cm);
1260 }
1261 
1262 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1263   DEBUG_ONLY(verify_container_count();)
1264   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1265     ChunkIndex index = (ChunkIndex)i;
1266     size_t chunk_size = chunk_manager->size_by_index(index);
1267 
1268     while (free_words_in_vs() >= chunk_size) {
1269       Metachunk* chunk = get_chunk_vs(chunk_size);
1270       assert(chunk != NULL, "allocation should have been successful");
1271 
1272       chunk_manager->return_single_chunk(index, chunk);
1273     }
1274     DEBUG_ONLY(verify_container_count();)
1275   }
1276   assert(free_words_in_vs() == 0, "should be empty now");
1277 }
1278 
1279 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1280                                    _is_class(false),
1281                                    _virtual_space_list(NULL),
1282                                    _current_virtual_space(NULL),
1283                                    _reserved_words(0),
1284                                    _committed_words(0),
1285                                    _virtual_space_count(0) {
1286   MutexLockerEx cl(SpaceManager::expand_lock(),
1287                    Mutex::_no_safepoint_check_flag);
1288   create_new_virtual_space(word_size);
1289 }
1290 
1291 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1292                                    _is_class(true),
1293                                    _virtual_space_list(NULL),
1294                                    _current_virtual_space(NULL),
1295                                    _reserved_words(0),
1296                                    _committed_words(0),
1297                                    _virtual_space_count(0) {
1298   MutexLockerEx cl(SpaceManager::expand_lock(),
1299                    Mutex::_no_safepoint_check_flag);
1300   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1301   bool succeeded = class_entry->initialize();
1302   if (succeeded) {
1303     link_vs(class_entry);
1304   }
1305 }
1306 
1307 size_t VirtualSpaceList::free_bytes() {
1308   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1309 }
1310 
1311 // Allocate another meta virtual space and add it to the list.
1312 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1313   assert_lock_strong(SpaceManager::expand_lock());
1314 
1315   if (is_class()) {
1316     assert(false, "We currently don't support more than one VirtualSpace for"
1317                   " the compressed class space. The initialization of the"
1318                   " CCS uses another code path and should not hit this path.");
1319     return false;
1320   }
1321 
1322   if (vs_word_size == 0) {
1323     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1324     return false;
1325   }
1326 
1327   // Reserve the space
1328   size_t vs_byte_size = vs_word_size * BytesPerWord;
1329   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1330 
1331   // Allocate the meta virtual space and initialize it.
1332   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1333   if (!new_entry->initialize()) {
1334     delete new_entry;
1335     return false;
1336   } else {
1337     assert(new_entry->reserved_words() == vs_word_size,
1338         "Reserved memory size differs from requested memory size");
1339     // ensure lock-free iteration sees fully initialized node
1340     OrderAccess::storestore();
1341     link_vs(new_entry);
1342     return true;
1343   }
1344 }
1345 
1346 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1347   if (virtual_space_list() == NULL) {
1348       set_virtual_space_list(new_entry);
1349   } else {
1350     current_virtual_space()->set_next(new_entry);
1351   }
1352   set_current_virtual_space(new_entry);
1353   inc_reserved_words(new_entry->reserved_words());
1354   inc_committed_words(new_entry->committed_words());
1355   inc_virtual_space_count();
1356 #ifdef ASSERT
1357   new_entry->mangle();
1358 #endif
1359   LogTarget(Trace, gc, metaspace) lt;
1360   if (lt.is_enabled()) {
1361     LogStream ls(lt);
1362     VirtualSpaceNode* vsl = current_virtual_space();
1363     ResourceMark rm;
1364     vsl->print_on(&ls);
1365   }
1366 }
1367 
1368 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1369                                       size_t min_words,
1370                                       size_t preferred_words) {
1371   size_t before = node->committed_words();
1372 
1373   bool result = node->expand_by(min_words, preferred_words);
1374 
1375   size_t after = node->committed_words();
1376 
1377   // after and before can be the same if the memory was pre-committed.
1378   assert(after >= before, "Inconsistency");
1379   inc_committed_words(after - before);
1380 
1381   return result;
1382 }
1383 
1384 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1385   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1386   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1387   assert(min_words <= preferred_words, "Invalid arguments");
1388 
1389   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1390     return  false;
1391   }
1392 
1393   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1394   if (allowed_expansion_words < min_words) {
1395     return false;
1396   }
1397 
1398   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1399 
1400   // Commit more memory from the the current virtual space.
1401   bool vs_expanded = expand_node_by(current_virtual_space(),
1402                                     min_words,
1403                                     max_expansion_words);
1404   if (vs_expanded) {
1405     return true;
1406   }
1407   retire_current_virtual_space();
1408 
1409   // Get another virtual space.
1410   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1411   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1412 
1413   if (create_new_virtual_space(grow_vs_words)) {
1414     if (current_virtual_space()->is_pre_committed()) {
1415       // The memory was pre-committed, so we are done here.
1416       assert(min_words <= current_virtual_space()->committed_words(),
1417           "The new VirtualSpace was pre-committed, so it"
1418           "should be large enough to fit the alloc request.");
1419       return true;
1420     }
1421 
1422     return expand_node_by(current_virtual_space(),
1423                           min_words,
1424                           max_expansion_words);
1425   }
1426 
1427   return false;
1428 }
1429 
1430 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1431 
1432   // Allocate a chunk out of the current virtual space.
1433   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1434 
1435   if (next != NULL) {
1436     return next;
1437   }
1438 
1439   // The expand amount is currently only determined by the requested sizes
1440   // and not how much committed memory is left in the current virtual space.
1441 
1442   size_t min_word_size       = align_size_up(chunk_word_size,              Metaspace::commit_alignment_words());
1443   size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1444   if (min_word_size >= preferred_word_size) {
1445     // Can happen when humongous chunks are allocated.
1446     preferred_word_size = min_word_size;
1447   }
1448 
1449   bool expanded = expand_by(min_word_size, preferred_word_size);
1450   if (expanded) {
1451     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1452     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1453   }
1454 
1455    return next;
1456 }
1457 
1458 void VirtualSpaceList::print_on(outputStream* st) const {
1459   VirtualSpaceListIterator iter(virtual_space_list());
1460   while (iter.repeat()) {
1461     VirtualSpaceNode* node = iter.get_next();
1462     node->print_on(st);
1463   }
1464 }
1465 
1466 // MetaspaceGC methods
1467 
1468 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1469 // Within the VM operation after the GC the attempt to allocate the metadata
1470 // should succeed.  If the GC did not free enough space for the metaspace
1471 // allocation, the HWM is increased so that another virtualspace will be
1472 // allocated for the metadata.  With perm gen the increase in the perm
1473 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1474 // metaspace policy uses those as the small and large steps for the HWM.
1475 //
1476 // After the GC the compute_new_size() for MetaspaceGC is called to
1477 // resize the capacity of the metaspaces.  The current implementation
1478 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1479 // to resize the Java heap by some GC's.  New flags can be implemented
1480 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1481 // free space is desirable in the metaspace capacity to decide how much
1482 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1483 // free space is desirable in the metaspace capacity before decreasing
1484 // the HWM.
1485 
1486 // Calculate the amount to increase the high water mark (HWM).
1487 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1488 // another expansion is not requested too soon.  If that is not
1489 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1490 // If that is still not enough, expand by the size of the allocation
1491 // plus some.
1492 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1493   size_t min_delta = MinMetaspaceExpansion;
1494   size_t max_delta = MaxMetaspaceExpansion;
1495   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1496 
1497   if (delta <= min_delta) {
1498     delta = min_delta;
1499   } else if (delta <= max_delta) {
1500     // Don't want to hit the high water mark on the next
1501     // allocation so make the delta greater than just enough
1502     // for this allocation.
1503     delta = max_delta;
1504   } else {
1505     // This allocation is large but the next ones are probably not
1506     // so increase by the minimum.
1507     delta = delta + min_delta;
1508   }
1509 
1510   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1511 
1512   return delta;
1513 }
1514 
1515 size_t MetaspaceGC::capacity_until_GC() {
1516   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1517   assert(value >= MetaspaceSize, "Not initialized properly?");
1518   return value;
1519 }
1520 
1521 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1522   assert_is_size_aligned(v, Metaspace::commit_alignment());
1523 
1524   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1525   size_t new_value = capacity_until_GC + v;
1526 
1527   if (new_value < capacity_until_GC) {
1528     // The addition wrapped around, set new_value to aligned max value.
1529     new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1530   }
1531 
1532   intptr_t expected = (intptr_t) capacity_until_GC;
1533   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1534 
1535   if (expected != actual) {
1536     return false;
1537   }
1538 
1539   if (new_cap_until_GC != NULL) {
1540     *new_cap_until_GC = new_value;
1541   }
1542   if (old_cap_until_GC != NULL) {
1543     *old_cap_until_GC = capacity_until_GC;
1544   }
1545   return true;
1546 }
1547 
1548 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1549   assert_is_size_aligned(v, Metaspace::commit_alignment());
1550 
1551   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1552 }
1553 
1554 void MetaspaceGC::initialize() {
1555   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1556   // we can't do a GC during initialization.
1557   _capacity_until_GC = MaxMetaspaceSize;
1558 }
1559 
1560 void MetaspaceGC::post_initialize() {
1561   // Reset the high-water mark once the VM initialization is done.
1562   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1563 }
1564 
1565 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1566   // Check if the compressed class space is full.
1567   if (is_class && Metaspace::using_class_space()) {
1568     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1569     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1570       return false;
1571     }
1572   }
1573 
1574   // Check if the user has imposed a limit on the metaspace memory.
1575   size_t committed_bytes = MetaspaceAux::committed_bytes();
1576   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1577     return false;
1578   }
1579 
1580   return true;
1581 }
1582 
1583 size_t MetaspaceGC::allowed_expansion() {
1584   size_t committed_bytes = MetaspaceAux::committed_bytes();
1585   size_t capacity_until_gc = capacity_until_GC();
1586 
1587   assert(capacity_until_gc >= committed_bytes,
1588          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1589          capacity_until_gc, committed_bytes);
1590 
1591   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1592   size_t left_until_GC = capacity_until_gc - committed_bytes;
1593   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1594 
1595   return left_to_commit / BytesPerWord;
1596 }
1597 
1598 void MetaspaceGC::compute_new_size() {
1599   assert(_shrink_factor <= 100, "invalid shrink factor");
1600   uint current_shrink_factor = _shrink_factor;
1601   _shrink_factor = 0;
1602 
1603   // Using committed_bytes() for used_after_gc is an overestimation, since the
1604   // chunk free lists are included in committed_bytes() and the memory in an
1605   // un-fragmented chunk free list is available for future allocations.
1606   // However, if the chunk free lists becomes fragmented, then the memory may
1607   // not be available for future allocations and the memory is therefore "in use".
1608   // Including the chunk free lists in the definition of "in use" is therefore
1609   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1610   // shrink below committed_bytes() and this has caused serious bugs in the past.
1611   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1612   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1613 
1614   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1615   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1616 
1617   const double min_tmp = used_after_gc / maximum_used_percentage;
1618   size_t minimum_desired_capacity =
1619     (size_t)MIN2(min_tmp, double(max_uintx));
1620   // Don't shrink less than the initial generation size
1621   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1622                                   MetaspaceSize);
1623 
1624   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1625   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1626                            minimum_free_percentage, maximum_used_percentage);
1627   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1628 
1629 
1630   size_t shrink_bytes = 0;
1631   if (capacity_until_GC < minimum_desired_capacity) {
1632     // If we have less capacity below the metaspace HWM, then
1633     // increment the HWM.
1634     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1635     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1636     // Don't expand unless it's significant
1637     if (expand_bytes >= MinMetaspaceExpansion) {
1638       size_t new_capacity_until_GC = 0;
1639       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1640       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1641 
1642       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1643                                                new_capacity_until_GC,
1644                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1645       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1646                                minimum_desired_capacity / (double) K,
1647                                expand_bytes / (double) K,
1648                                MinMetaspaceExpansion / (double) K,
1649                                new_capacity_until_GC / (double) K);
1650     }
1651     return;
1652   }
1653 
1654   // No expansion, now see if we want to shrink
1655   // We would never want to shrink more than this
1656   assert(capacity_until_GC >= minimum_desired_capacity,
1657          SIZE_FORMAT " >= " SIZE_FORMAT,
1658          capacity_until_GC, minimum_desired_capacity);
1659   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1660 
1661   // Should shrinking be considered?
1662   if (MaxMetaspaceFreeRatio < 100) {
1663     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1664     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1665     const double max_tmp = used_after_gc / minimum_used_percentage;
1666     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1667     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1668                                     MetaspaceSize);
1669     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1670                              maximum_free_percentage, minimum_used_percentage);
1671     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1672                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1673 
1674     assert(minimum_desired_capacity <= maximum_desired_capacity,
1675            "sanity check");
1676 
1677     if (capacity_until_GC > maximum_desired_capacity) {
1678       // Capacity too large, compute shrinking size
1679       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1680       // We don't want shrink all the way back to initSize if people call
1681       // System.gc(), because some programs do that between "phases" and then
1682       // we'd just have to grow the heap up again for the next phase.  So we
1683       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1684       // on the third call, and 100% by the fourth call.  But if we recompute
1685       // size without shrinking, it goes back to 0%.
1686       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1687 
1688       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1689 
1690       assert(shrink_bytes <= max_shrink_bytes,
1691              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1692              shrink_bytes, max_shrink_bytes);
1693       if (current_shrink_factor == 0) {
1694         _shrink_factor = 10;
1695       } else {
1696         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1697       }
1698       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1699                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1700       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1701                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1702     }
1703   }
1704 
1705   // Don't shrink unless it's significant
1706   if (shrink_bytes >= MinMetaspaceExpansion &&
1707       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1708     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1709     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1710                                              new_capacity_until_GC,
1711                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1712   }
1713 }
1714 
1715 // Metadebug methods
1716 
1717 void Metadebug::init_allocation_fail_alot_count() {
1718   if (MetadataAllocationFailALot) {
1719     _allocation_fail_alot_count =
1720       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1721   }
1722 }
1723 
1724 #ifdef ASSERT
1725 bool Metadebug::test_metadata_failure() {
1726   if (MetadataAllocationFailALot &&
1727       Threads::is_vm_complete()) {
1728     if (_allocation_fail_alot_count > 0) {
1729       _allocation_fail_alot_count--;
1730     } else {
1731       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1732       init_allocation_fail_alot_count();
1733       return true;
1734     }
1735   }
1736   return false;
1737 }
1738 #endif
1739 
1740 // ChunkManager methods
1741 
1742 size_t ChunkManager::free_chunks_total_words() {
1743   return _free_chunks_total;
1744 }
1745 
1746 size_t ChunkManager::free_chunks_total_bytes() {
1747   return free_chunks_total_words() * BytesPerWord;
1748 }
1749 
1750 // Update internal accounting after a chunk was added
1751 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1752   assert_lock_strong(SpaceManager::expand_lock());
1753   _free_chunks_count ++;
1754   _free_chunks_total += c->word_size();
1755 }
1756 
1757 // Update internal accounting after a chunk was removed
1758 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1759   assert_lock_strong(SpaceManager::expand_lock());
1760   assert(_free_chunks_count >= 1,
1761     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1762   assert(_free_chunks_total >= c->word_size(),
1763     "ChunkManager::_free_chunks_total: about to go negative"
1764      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1765   _free_chunks_count --;
1766   _free_chunks_total -= c->word_size();
1767 }
1768 
1769 size_t ChunkManager::free_chunks_count() {
1770 #ifdef ASSERT
1771   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1772     MutexLockerEx cl(SpaceManager::expand_lock(),
1773                      Mutex::_no_safepoint_check_flag);
1774     // This lock is only needed in debug because the verification
1775     // of the _free_chunks_totals walks the list of free chunks
1776     slow_locked_verify_free_chunks_count();
1777   }
1778 #endif
1779   return _free_chunks_count;
1780 }
1781 
1782 ChunkIndex ChunkManager::list_index(size_t size) {
1783   if (size_by_index(SpecializedIndex) == size) {
1784     return SpecializedIndex;
1785   }
1786   if (size_by_index(SmallIndex) == size) {
1787     return SmallIndex;
1788   }
1789   const size_t med_size = size_by_index(MediumIndex);
1790   if (med_size == size) {
1791     return MediumIndex;
1792   }
1793 
1794   assert(size > med_size, "Not a humongous chunk");
1795   return HumongousIndex;
1796 }
1797 
1798 size_t ChunkManager::size_by_index(ChunkIndex index) {
1799   index_bounds_check(index);
1800   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1801   return free_chunks(index)->size();
1802 }
1803 
1804 void ChunkManager::locked_verify_free_chunks_total() {
1805   assert_lock_strong(SpaceManager::expand_lock());
1806   assert(sum_free_chunks() == _free_chunks_total,
1807          "_free_chunks_total " SIZE_FORMAT " is not the"
1808          " same as sum " SIZE_FORMAT, _free_chunks_total,
1809          sum_free_chunks());
1810 }
1811 
1812 void ChunkManager::verify_free_chunks_total() {
1813   MutexLockerEx cl(SpaceManager::expand_lock(),
1814                      Mutex::_no_safepoint_check_flag);
1815   locked_verify_free_chunks_total();
1816 }
1817 
1818 void ChunkManager::locked_verify_free_chunks_count() {
1819   assert_lock_strong(SpaceManager::expand_lock());
1820   assert(sum_free_chunks_count() == _free_chunks_count,
1821          "_free_chunks_count " SIZE_FORMAT " is not the"
1822          " same as sum " SIZE_FORMAT, _free_chunks_count,
1823          sum_free_chunks_count());
1824 }
1825 
1826 void ChunkManager::verify_free_chunks_count() {
1827 #ifdef ASSERT
1828   MutexLockerEx cl(SpaceManager::expand_lock(),
1829                      Mutex::_no_safepoint_check_flag);
1830   locked_verify_free_chunks_count();
1831 #endif
1832 }
1833 
1834 void ChunkManager::verify() {
1835   MutexLockerEx cl(SpaceManager::expand_lock(),
1836                      Mutex::_no_safepoint_check_flag);
1837   locked_verify();
1838 }
1839 
1840 void ChunkManager::locked_verify() {
1841   locked_verify_free_chunks_count();
1842   locked_verify_free_chunks_total();
1843 }
1844 
1845 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1846   assert_lock_strong(SpaceManager::expand_lock());
1847   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1848                 _free_chunks_total, _free_chunks_count);
1849 }
1850 
1851 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1852   assert_lock_strong(SpaceManager::expand_lock());
1853   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1854                 sum_free_chunks(), sum_free_chunks_count());
1855 }
1856 
1857 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1858   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1859          "Bad index: %d", (int)index);
1860 
1861   return &_free_chunks[index];
1862 }
1863 
1864 // These methods that sum the free chunk lists are used in printing
1865 // methods that are used in product builds.
1866 size_t ChunkManager::sum_free_chunks() {
1867   assert_lock_strong(SpaceManager::expand_lock());
1868   size_t result = 0;
1869   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1870     ChunkList* list = free_chunks(i);
1871 
1872     if (list == NULL) {
1873       continue;
1874     }
1875 
1876     result = result + list->count() * list->size();
1877   }
1878   result = result + humongous_dictionary()->total_size();
1879   return result;
1880 }
1881 
1882 size_t ChunkManager::sum_free_chunks_count() {
1883   assert_lock_strong(SpaceManager::expand_lock());
1884   size_t count = 0;
1885   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1886     ChunkList* list = free_chunks(i);
1887     if (list == NULL) {
1888       continue;
1889     }
1890     count = count + list->count();
1891   }
1892   count = count + humongous_dictionary()->total_free_blocks();
1893   return count;
1894 }
1895 
1896 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1897   ChunkIndex index = list_index(word_size);
1898   assert(index < HumongousIndex, "No humongous list");
1899   return free_chunks(index);
1900 }
1901 
1902 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1903   assert_lock_strong(SpaceManager::expand_lock());
1904 
1905   slow_locked_verify();
1906 
1907   Metachunk* chunk = NULL;
1908   if (list_index(word_size) != HumongousIndex) {
1909     ChunkList* free_list = find_free_chunks_list(word_size);
1910     assert(free_list != NULL, "Sanity check");
1911 
1912     chunk = free_list->head();
1913 
1914     if (chunk == NULL) {
1915       return NULL;
1916     }
1917 
1918     // Remove the chunk as the head of the list.
1919     free_list->remove_chunk(chunk);
1920 
1921     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1922                                        p2i(free_list), p2i(chunk), chunk->word_size());
1923   } else {
1924     chunk = humongous_dictionary()->get_chunk(
1925       word_size,
1926       FreeBlockDictionary<Metachunk>::atLeast);
1927 
1928     if (chunk == NULL) {
1929       return NULL;
1930     }
1931 
1932     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1933                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
1934   }
1935 
1936   // Chunk has been removed from the chunk manager; update counters.
1937   account_for_removed_chunk(chunk);
1938 
1939   // Remove it from the links to this freelist
1940   chunk->set_next(NULL);
1941   chunk->set_prev(NULL);
1942 #ifdef ASSERT
1943   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1944   // work.
1945   chunk->set_is_tagged_free(false);
1946 #endif
1947   chunk->container()->inc_container_count();
1948 
1949   slow_locked_verify();
1950   return chunk;
1951 }
1952 
1953 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1954   assert_lock_strong(SpaceManager::expand_lock());
1955   slow_locked_verify();
1956 
1957   // Take from the beginning of the list
1958   Metachunk* chunk = free_chunks_get(word_size);
1959   if (chunk == NULL) {
1960     return NULL;
1961   }
1962 
1963   assert((word_size <= chunk->word_size()) ||
1964          (list_index(chunk->word_size()) == HumongousIndex),
1965          "Non-humongous variable sized chunk");
1966   LogTarget(Debug, gc, metaspace, freelist) lt;
1967   if (lt.is_enabled()) {
1968     size_t list_count;
1969     if (list_index(word_size) < HumongousIndex) {
1970       ChunkList* list = find_free_chunks_list(word_size);
1971       list_count = list->count();
1972     } else {
1973       list_count = humongous_dictionary()->total_count();
1974     }
1975     LogStream ls(lt);
1976     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1977              p2i(this), p2i(chunk), chunk->word_size(), list_count);
1978     ResourceMark rm;
1979     locked_print_free_chunks(&ls);
1980   }
1981 
1982   return chunk;
1983 }
1984 
1985 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1986   assert_lock_strong(SpaceManager::expand_lock());
1987   assert(chunk != NULL, "Expected chunk.");
1988   assert(chunk->container() != NULL, "Container should have been set.");
1989   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1990   index_bounds_check(index);
1991 
1992   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1993   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1994   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1995   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1996 
1997   if (index != HumongousIndex) {
1998     // Return non-humongous chunk to freelist.
1999     ChunkList* list = free_chunks(index);
2000     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
2001     list->return_chunk_at_head(chunk);
2002     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
2003         chunk_size_name(index), p2i(chunk));
2004   } else {
2005     // Return humongous chunk to dictionary.
2006     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2007     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2008            "Humongous chunk has wrong alignment.");
2009     _humongous_dictionary.return_chunk(chunk);
2010     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2011         chunk_size_name(index), p2i(chunk), chunk->word_size());
2012   }
2013   chunk->container()->dec_container_count();
2014   DEBUG_ONLY(chunk->set_is_tagged_free(true);)
2015 
2016   // Chunk has been added; update counters.
2017   account_for_added_chunk(chunk);
2018 
2019 }
2020 
2021 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2022   index_bounds_check(index);
2023   if (chunks == NULL) {
2024     return;
2025   }
2026   LogTarget(Trace, gc, metaspace, freelist) log;
2027   if (log.is_enabled()) { // tracing
2028     log.print("returning list of %s chunks...", chunk_size_name(index));
2029   }
2030   unsigned num_chunks_returned = 0;
2031   size_t size_chunks_returned = 0;
2032   Metachunk* cur = chunks;
2033   while (cur != NULL) {
2034     // Capture the next link before it is changed
2035     // by the call to return_chunk_at_head();
2036     Metachunk* next = cur->next();
2037     if (log.is_enabled()) { // tracing
2038       num_chunks_returned ++;
2039       size_chunks_returned += cur->word_size();
2040     }
2041     return_single_chunk(index, cur);
2042     cur = next;
2043   }
2044   if (log.is_enabled()) { // tracing
2045     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2046         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2047     if (index != HumongousIndex) {
2048       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2049     } else {
2050       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2051     }
2052   }
2053 }
2054 
2055 void ChunkManager::print_on(outputStream* out) const {
2056   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
2057 }
2058 
2059 // SpaceManager methods
2060 
2061 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2062   size_t chunk_sizes[] = {
2063       specialized_chunk_size(is_class_space),
2064       small_chunk_size(is_class_space),
2065       medium_chunk_size(is_class_space)
2066   };
2067 
2068   // Adjust up to one of the fixed chunk sizes ...
2069   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2070     if (requested <= chunk_sizes[i]) {
2071       return chunk_sizes[i];
2072     }
2073   }
2074 
2075   // ... or return the size as a humongous chunk.
2076   return requested;
2077 }
2078 
2079 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2080   return adjust_initial_chunk_size(requested, is_class());
2081 }
2082 
2083 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2084   size_t requested;
2085 
2086   if (is_class()) {
2087     switch (type) {
2088     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2089     case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
2090     case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
2091     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2092     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2093     default:                                 requested = ClassSmallChunk; break;
2094     }
2095   } else {
2096     switch (type) {
2097     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2098     case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
2099     case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
2100     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2101     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2102     default:                                 requested = SmallChunk; break;
2103     }
2104   }
2105 
2106   // Adjust to one of the fixed chunk sizes (unless humongous)
2107   const size_t adjusted = adjust_initial_chunk_size(requested);
2108 
2109   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2110          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2111 
2112   return adjusted;
2113 }
2114 
2115 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2116   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2117   size_t free = 0;
2118   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2119     Metachunk* chunk = chunks_in_use(i);
2120     while (chunk != NULL) {
2121       free += chunk->free_word_size();
2122       chunk = chunk->next();
2123     }
2124   }
2125   return free;
2126 }
2127 
2128 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2129   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2130   size_t result = 0;
2131   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2132    result += sum_waste_in_chunks_in_use(i);
2133   }
2134 
2135   return result;
2136 }
2137 
2138 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2139   size_t result = 0;
2140   Metachunk* chunk = chunks_in_use(index);
2141   // Count the free space in all the chunk but not the
2142   // current chunk from which allocations are still being done.
2143   while (chunk != NULL) {
2144     if (chunk != current_chunk()) {
2145       result += chunk->free_word_size();
2146     }
2147     chunk = chunk->next();
2148   }
2149   return result;
2150 }
2151 
2152 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2153   // For CMS use "allocated_chunks_words()" which does not need the
2154   // Metaspace lock.  For the other collectors sum over the
2155   // lists.  Use both methods as a check that "allocated_chunks_words()"
2156   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2157   // to use in the product and allocated_chunks_words() should be used
2158   // but allow for  checking that allocated_chunks_words() returns the same
2159   // value as sum_capacity_in_chunks_in_use() which is the definitive
2160   // answer.
2161   if (UseConcMarkSweepGC) {
2162     return allocated_chunks_words();
2163   } else {
2164     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2165     size_t sum = 0;
2166     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2167       Metachunk* chunk = chunks_in_use(i);
2168       while (chunk != NULL) {
2169         sum += chunk->word_size();
2170         chunk = chunk->next();
2171       }
2172     }
2173   return sum;
2174   }
2175 }
2176 
2177 size_t SpaceManager::sum_count_in_chunks_in_use() {
2178   size_t count = 0;
2179   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2180     count = count + sum_count_in_chunks_in_use(i);
2181   }
2182 
2183   return count;
2184 }
2185 
2186 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2187   size_t count = 0;
2188   Metachunk* chunk = chunks_in_use(i);
2189   while (chunk != NULL) {
2190     count++;
2191     chunk = chunk->next();
2192   }
2193   return count;
2194 }
2195 
2196 
2197 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2198   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2199   size_t used = 0;
2200   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2201     Metachunk* chunk = chunks_in_use(i);
2202     while (chunk != NULL) {
2203       used += chunk->used_word_size();
2204       chunk = chunk->next();
2205     }
2206   }
2207   return used;
2208 }
2209 
2210 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2211 
2212   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2213     Metachunk* chunk = chunks_in_use(i);
2214     st->print("SpaceManager: %s " PTR_FORMAT,
2215                  chunk_size_name(i), p2i(chunk));
2216     if (chunk != NULL) {
2217       st->print_cr(" free " SIZE_FORMAT,
2218                    chunk->free_word_size());
2219     } else {
2220       st->cr();
2221     }
2222   }
2223 
2224   chunk_manager()->locked_print_free_chunks(st);
2225   chunk_manager()->locked_print_sum_free_chunks(st);
2226 }
2227 
2228 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2229 
2230   // Decide between a small chunk and a medium chunk.  Up to
2231   // _small_chunk_limit small chunks can be allocated.
2232   // After that a medium chunk is preferred.
2233   size_t chunk_word_size;
2234   if (chunks_in_use(MediumIndex) == NULL &&
2235       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2236     chunk_word_size = (size_t) small_chunk_size();
2237     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2238       chunk_word_size = medium_chunk_size();
2239     }
2240   } else {
2241     chunk_word_size = medium_chunk_size();
2242   }
2243 
2244   // Might still need a humongous chunk.  Enforce
2245   // humongous allocations sizes to be aligned up to
2246   // the smallest chunk size.
2247   size_t if_humongous_sized_chunk =
2248     align_size_up(word_size + Metachunk::overhead(),
2249                   smallest_chunk_size());
2250   chunk_word_size =
2251     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2252 
2253   assert(!SpaceManager::is_humongous(word_size) ||
2254          chunk_word_size == if_humongous_sized_chunk,
2255          "Size calculation is wrong, word_size " SIZE_FORMAT
2256          " chunk_word_size " SIZE_FORMAT,
2257          word_size, chunk_word_size);
2258   Log(gc, metaspace, alloc) log;
2259   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2260     log.debug("Metadata humongous allocation:");
2261     log.debug("  word_size " PTR_FORMAT, word_size);
2262     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2263     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2264   }
2265   return chunk_word_size;
2266 }
2267 
2268 void SpaceManager::track_metaspace_memory_usage() {
2269   if (is_init_completed()) {
2270     if (is_class()) {
2271       MemoryService::track_compressed_class_memory_usage();
2272     }
2273     MemoryService::track_metaspace_memory_usage();
2274   }
2275 }
2276 
2277 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2278   assert(vs_list()->current_virtual_space() != NULL,
2279          "Should have been set");
2280   assert(current_chunk() == NULL ||
2281          current_chunk()->allocate(word_size) == NULL,
2282          "Don't need to expand");
2283   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2284 
2285   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2286     size_t words_left = 0;
2287     size_t words_used = 0;
2288     if (current_chunk() != NULL) {
2289       words_left = current_chunk()->free_word_size();
2290       words_used = current_chunk()->used_word_size();
2291     }
2292     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2293                                        word_size, words_used, words_left);
2294   }
2295 
2296   // Get another chunk
2297   size_t chunk_word_size = calc_chunk_size(word_size);
2298   Metachunk* next = get_new_chunk(chunk_word_size);
2299 
2300   MetaWord* mem = NULL;
2301 
2302   // If a chunk was available, add it to the in-use chunk list
2303   // and do an allocation from it.
2304   if (next != NULL) {
2305     // Add to this manager's list of chunks in use.
2306     add_chunk(next, false);
2307     mem = next->allocate(word_size);
2308   }
2309 
2310   // Track metaspace memory usage statistic.
2311   track_metaspace_memory_usage();
2312 
2313   return mem;
2314 }
2315 
2316 void SpaceManager::print_on(outputStream* st) const {
2317 
2318   for (ChunkIndex i = ZeroIndex;
2319        i < NumberOfInUseLists ;
2320        i = next_chunk_index(i) ) {
2321     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2322                  p2i(chunks_in_use(i)),
2323                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2324   }
2325   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2326                " Humongous " SIZE_FORMAT,
2327                sum_waste_in_chunks_in_use(SmallIndex),
2328                sum_waste_in_chunks_in_use(MediumIndex),
2329                sum_waste_in_chunks_in_use(HumongousIndex));
2330   // block free lists
2331   if (block_freelists() != NULL) {
2332     st->print_cr("total in block free lists " SIZE_FORMAT,
2333       block_freelists()->total_size());
2334   }
2335 }
2336 
2337 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2338                            Mutex* lock) :
2339   _mdtype(mdtype),
2340   _allocated_blocks_words(0),
2341   _allocated_chunks_words(0),
2342   _allocated_chunks_count(0),
2343   _block_freelists(NULL),
2344   _lock(lock)
2345 {
2346   initialize();
2347 }
2348 
2349 void SpaceManager::inc_size_metrics(size_t words) {
2350   assert_lock_strong(SpaceManager::expand_lock());
2351   // Total of allocated Metachunks and allocated Metachunks count
2352   // for each SpaceManager
2353   _allocated_chunks_words = _allocated_chunks_words + words;
2354   _allocated_chunks_count++;
2355   // Global total of capacity in allocated Metachunks
2356   MetaspaceAux::inc_capacity(mdtype(), words);
2357   // Global total of allocated Metablocks.
2358   // used_words_slow() includes the overhead in each
2359   // Metachunk so include it in the used when the
2360   // Metachunk is first added (so only added once per
2361   // Metachunk).
2362   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2363 }
2364 
2365 void SpaceManager::inc_used_metrics(size_t words) {
2366   // Add to the per SpaceManager total
2367   Atomic::add_ptr(words, &_allocated_blocks_words);
2368   // Add to the global total
2369   MetaspaceAux::inc_used(mdtype(), words);
2370 }
2371 
2372 void SpaceManager::dec_total_from_size_metrics() {
2373   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2374   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2375   // Also deduct the overhead per Metachunk
2376   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2377 }
2378 
2379 void SpaceManager::initialize() {
2380   Metadebug::init_allocation_fail_alot_count();
2381   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2382     _chunks_in_use[i] = NULL;
2383   }
2384   _current_chunk = NULL;
2385   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2386 }
2387 
2388 SpaceManager::~SpaceManager() {
2389   // This call this->_lock which can't be done while holding expand_lock()
2390   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2391          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2392          " allocated_chunks_words() " SIZE_FORMAT,
2393          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2394 
2395   MutexLockerEx fcl(SpaceManager::expand_lock(),
2396                     Mutex::_no_safepoint_check_flag);
2397 
2398   chunk_manager()->slow_locked_verify();
2399 
2400   dec_total_from_size_metrics();
2401 
2402   Log(gc, metaspace, freelist) log;
2403   if (log.is_trace()) {
2404     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2405     ResourceMark rm;
2406     LogStream ls(log.trace());
2407     locked_print_chunks_in_use_on(&ls);
2408     if (block_freelists() != NULL) {
2409       block_freelists()->print_on(&ls);
2410     }
2411   }
2412 
2413   // Add all the chunks in use by this space manager
2414   // to the global list of free chunks.
2415 
2416   // Follow each list of chunks-in-use and add them to the
2417   // free lists.  Each list is NULL terminated.
2418 
2419   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2420     Metachunk* chunks = chunks_in_use(i);
2421     chunk_manager()->return_chunk_list(i, chunks);
2422     set_chunks_in_use(i, NULL);
2423   }
2424 
2425   chunk_manager()->slow_locked_verify();
2426 
2427   if (_block_freelists != NULL) {
2428     delete _block_freelists;
2429   }
2430 }
2431 
2432 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2433   assert_lock_strong(_lock);
2434   // Allocations and deallocations are in raw_word_size
2435   size_t raw_word_size = get_allocation_word_size(word_size);
2436   // Lazily create a block_freelist
2437   if (block_freelists() == NULL) {
2438     _block_freelists = new BlockFreelist();
2439   }
2440   block_freelists()->return_block(p, raw_word_size);
2441 }
2442 
2443 // Adds a chunk to the list of chunks in use.
2444 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2445 
2446   assert(new_chunk != NULL, "Should not be NULL");
2447   assert(new_chunk->next() == NULL, "Should not be on a list");
2448 
2449   new_chunk->reset_empty();
2450 
2451   // Find the correct list and and set the current
2452   // chunk for that list.
2453   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2454 
2455   if (index != HumongousIndex) {
2456     retire_current_chunk();
2457     set_current_chunk(new_chunk);
2458     new_chunk->set_next(chunks_in_use(index));
2459     set_chunks_in_use(index, new_chunk);
2460   } else {
2461     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2462     // small, so small will be null.  Link this first chunk as the current
2463     // chunk.
2464     if (make_current) {
2465       // Set as the current chunk but otherwise treat as a humongous chunk.
2466       set_current_chunk(new_chunk);
2467     }
2468     // Link at head.  The _current_chunk only points to a humongous chunk for
2469     // the null class loader metaspace (class and data virtual space managers)
2470     // any humongous chunks so will not point to the tail
2471     // of the humongous chunks list.
2472     new_chunk->set_next(chunks_in_use(HumongousIndex));
2473     set_chunks_in_use(HumongousIndex, new_chunk);
2474 
2475     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2476   }
2477 
2478   // Add to the running sum of capacity
2479   inc_size_metrics(new_chunk->word_size());
2480 
2481   assert(new_chunk->is_empty(), "Not ready for reuse");
2482   Log(gc, metaspace, freelist) log;
2483   if (log.is_trace()) {
2484     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2485     ResourceMark rm;
2486     LogStream ls(log.trace());
2487     new_chunk->print_on(&ls);
2488     chunk_manager()->locked_print_free_chunks(&ls);
2489   }
2490 }
2491 
2492 void SpaceManager::retire_current_chunk() {
2493   if (current_chunk() != NULL) {
2494     size_t remaining_words = current_chunk()->free_word_size();
2495     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2496       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2497       deallocate(ptr, remaining_words);
2498       inc_used_metrics(remaining_words);
2499     }
2500   }
2501 }
2502 
2503 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2504   // Get a chunk from the chunk freelist
2505   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2506 
2507   if (next == NULL) {
2508     next = vs_list()->get_new_chunk(chunk_word_size,
2509                                     medium_chunk_bunch());
2510   }
2511 
2512   Log(gc, metaspace, alloc) log;
2513   if (log.is_debug() && next != NULL &&
2514       SpaceManager::is_humongous(next->word_size())) {
2515     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2516   }
2517 
2518   return next;
2519 }
2520 
2521 /*
2522  * The policy is to allocate up to _small_chunk_limit small chunks
2523  * after which only medium chunks are allocated.  This is done to
2524  * reduce fragmentation.  In some cases, this can result in a lot
2525  * of small chunks being allocated to the point where it's not
2526  * possible to expand.  If this happens, there may be no medium chunks
2527  * available and OOME would be thrown.  Instead of doing that,
2528  * if the allocation request size fits in a small chunk, an attempt
2529  * will be made to allocate a small chunk.
2530  */
2531 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2532   size_t raw_word_size = get_allocation_word_size(word_size);
2533 
2534   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2535     return NULL;
2536   }
2537 
2538   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2539   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2540 
2541   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2542 
2543   MetaWord* mem = NULL;
2544 
2545   if (chunk != NULL) {
2546     // Add chunk to the in-use chunk list and do an allocation from it.
2547     // Add to this manager's list of chunks in use.
2548     add_chunk(chunk, false);
2549     mem = chunk->allocate(raw_word_size);
2550 
2551     inc_used_metrics(raw_word_size);
2552 
2553     // Track metaspace memory usage statistic.
2554     track_metaspace_memory_usage();
2555   }
2556 
2557   return mem;
2558 }
2559 
2560 MetaWord* SpaceManager::allocate(size_t word_size) {
2561   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2562   size_t raw_word_size = get_allocation_word_size(word_size);
2563   BlockFreelist* fl =  block_freelists();
2564   MetaWord* p = NULL;
2565   // Allocation from the dictionary is expensive in the sense that
2566   // the dictionary has to be searched for a size.  Don't allocate
2567   // from the dictionary until it starts to get fat.  Is this
2568   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2569   // for allocations.  Do some profiling.  JJJ
2570   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2571     p = fl->get_block(raw_word_size);
2572   }
2573   if (p == NULL) {
2574     p = allocate_work(raw_word_size);
2575   }
2576 
2577   return p;
2578 }
2579 
2580 // Returns the address of spaced allocated for "word_size".
2581 // This methods does not know about blocks (Metablocks)
2582 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2583   assert_lock_strong(_lock);
2584 #ifdef ASSERT
2585   if (Metadebug::test_metadata_failure()) {
2586     return NULL;
2587   }
2588 #endif
2589   // Is there space in the current chunk?
2590   MetaWord* result = NULL;
2591 
2592   // For DumpSharedSpaces, only allocate out of the current chunk which is
2593   // never null because we gave it the size we wanted.   Caller reports out
2594   // of memory if this returns null.
2595   if (DumpSharedSpaces) {
2596     assert(current_chunk() != NULL, "should never happen");
2597     inc_used_metrics(word_size);
2598     return current_chunk()->allocate(word_size); // caller handles null result
2599   }
2600 
2601   if (current_chunk() != NULL) {
2602     result = current_chunk()->allocate(word_size);
2603   }
2604 
2605   if (result == NULL) {
2606     result = grow_and_allocate(word_size);
2607   }
2608 
2609   if (result != NULL) {
2610     inc_used_metrics(word_size);
2611     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2612            "Head of the list is being allocated");
2613   }
2614 
2615   return result;
2616 }
2617 
2618 void SpaceManager::verify() {
2619   // If there are blocks in the dictionary, then
2620   // verification of chunks does not work since
2621   // being in the dictionary alters a chunk.
2622   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2623     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2624       Metachunk* curr = chunks_in_use(i);
2625       while (curr != NULL) {
2626         curr->verify();
2627         verify_chunk_size(curr);
2628         curr = curr->next();
2629       }
2630     }
2631   }
2632 }
2633 
2634 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2635   assert(is_humongous(chunk->word_size()) ||
2636          chunk->word_size() == medium_chunk_size() ||
2637          chunk->word_size() == small_chunk_size() ||
2638          chunk->word_size() == specialized_chunk_size(),
2639          "Chunk size is wrong");
2640   return;
2641 }
2642 
2643 #ifdef ASSERT
2644 void SpaceManager::verify_allocated_blocks_words() {
2645   // Verification is only guaranteed at a safepoint.
2646   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2647     "Verification can fail if the applications is running");
2648   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2649          "allocation total is not consistent " SIZE_FORMAT
2650          " vs " SIZE_FORMAT,
2651          allocated_blocks_words(), sum_used_in_chunks_in_use());
2652 }
2653 
2654 #endif
2655 
2656 void SpaceManager::dump(outputStream* const out) const {
2657   size_t curr_total = 0;
2658   size_t waste = 0;
2659   uint i = 0;
2660   size_t used = 0;
2661   size_t capacity = 0;
2662 
2663   // Add up statistics for all chunks in this SpaceManager.
2664   for (ChunkIndex index = ZeroIndex;
2665        index < NumberOfInUseLists;
2666        index = next_chunk_index(index)) {
2667     for (Metachunk* curr = chunks_in_use(index);
2668          curr != NULL;
2669          curr = curr->next()) {
2670       out->print("%d) ", i++);
2671       curr->print_on(out);
2672       curr_total += curr->word_size();
2673       used += curr->used_word_size();
2674       capacity += curr->word_size();
2675       waste += curr->free_word_size() + curr->overhead();;
2676     }
2677   }
2678 
2679   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2680     if (block_freelists() != NULL) block_freelists()->print_on(out);
2681   }
2682 
2683   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2684   // Free space isn't wasted.
2685   waste -= free;
2686 
2687   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2688                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2689                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2690 }
2691 
2692 // MetaspaceAux
2693 
2694 
2695 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2696 size_t MetaspaceAux::_used_words[] = {0, 0};
2697 
2698 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2699   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2700   return list == NULL ? 0 : list->free_bytes();
2701 }
2702 
2703 size_t MetaspaceAux::free_bytes() {
2704   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2705 }
2706 
2707 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2708   assert_lock_strong(SpaceManager::expand_lock());
2709   assert(words <= capacity_words(mdtype),
2710          "About to decrement below 0: words " SIZE_FORMAT
2711          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2712          words, mdtype, capacity_words(mdtype));
2713   _capacity_words[mdtype] -= words;
2714 }
2715 
2716 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2717   assert_lock_strong(SpaceManager::expand_lock());
2718   // Needs to be atomic
2719   _capacity_words[mdtype] += words;
2720 }
2721 
2722 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2723   assert(words <= used_words(mdtype),
2724          "About to decrement below 0: words " SIZE_FORMAT
2725          " is greater than _used_words[%u] " SIZE_FORMAT,
2726          words, mdtype, used_words(mdtype));
2727   // For CMS deallocation of the Metaspaces occurs during the
2728   // sweep which is a concurrent phase.  Protection by the expand_lock()
2729   // is not enough since allocation is on a per Metaspace basis
2730   // and protected by the Metaspace lock.
2731   jlong minus_words = (jlong) - (jlong) words;
2732   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2733 }
2734 
2735 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2736   // _used_words tracks allocations for
2737   // each piece of metadata.  Those allocations are
2738   // generally done concurrently by different application
2739   // threads so must be done atomically.
2740   Atomic::add_ptr(words, &_used_words[mdtype]);
2741 }
2742 
2743 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2744   size_t used = 0;
2745   ClassLoaderDataGraphMetaspaceIterator iter;
2746   while (iter.repeat()) {
2747     Metaspace* msp = iter.get_next();
2748     // Sum allocated_blocks_words for each metaspace
2749     if (msp != NULL) {
2750       used += msp->used_words_slow(mdtype);
2751     }
2752   }
2753   return used * BytesPerWord;
2754 }
2755 
2756 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2757   size_t free = 0;
2758   ClassLoaderDataGraphMetaspaceIterator iter;
2759   while (iter.repeat()) {
2760     Metaspace* msp = iter.get_next();
2761     if (msp != NULL) {
2762       free += msp->free_words_slow(mdtype);
2763     }
2764   }
2765   return free * BytesPerWord;
2766 }
2767 
2768 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2769   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2770     return 0;
2771   }
2772   // Don't count the space in the freelists.  That space will be
2773   // added to the capacity calculation as needed.
2774   size_t capacity = 0;
2775   ClassLoaderDataGraphMetaspaceIterator iter;
2776   while (iter.repeat()) {
2777     Metaspace* msp = iter.get_next();
2778     if (msp != NULL) {
2779       capacity += msp->capacity_words_slow(mdtype);
2780     }
2781   }
2782   return capacity * BytesPerWord;
2783 }
2784 
2785 size_t MetaspaceAux::capacity_bytes_slow() {
2786 #ifdef PRODUCT
2787   // Use capacity_bytes() in PRODUCT instead of this function.
2788   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2789 #endif
2790   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2791   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2792   assert(capacity_bytes() == class_capacity + non_class_capacity,
2793          "bad accounting: capacity_bytes() " SIZE_FORMAT
2794          " class_capacity + non_class_capacity " SIZE_FORMAT
2795          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2796          capacity_bytes(), class_capacity + non_class_capacity,
2797          class_capacity, non_class_capacity);
2798 
2799   return class_capacity + non_class_capacity;
2800 }
2801 
2802 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2803   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2804   return list == NULL ? 0 : list->reserved_bytes();
2805 }
2806 
2807 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2808   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2809   return list == NULL ? 0 : list->committed_bytes();
2810 }
2811 
2812 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2813 
2814 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2815   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2816   if (chunk_manager == NULL) {
2817     return 0;
2818   }
2819   chunk_manager->slow_verify();
2820   return chunk_manager->free_chunks_total_words();
2821 }
2822 
2823 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2824   return free_chunks_total_words(mdtype) * BytesPerWord;
2825 }
2826 
2827 size_t MetaspaceAux::free_chunks_total_words() {
2828   return free_chunks_total_words(Metaspace::ClassType) +
2829          free_chunks_total_words(Metaspace::NonClassType);
2830 }
2831 
2832 size_t MetaspaceAux::free_chunks_total_bytes() {
2833   return free_chunks_total_words() * BytesPerWord;
2834 }
2835 
2836 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2837   return Metaspace::get_chunk_manager(mdtype) != NULL;
2838 }
2839 
2840 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2841   if (!has_chunk_free_list(mdtype)) {
2842     return MetaspaceChunkFreeListSummary();
2843   }
2844 
2845   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2846   return cm->chunk_free_list_summary();
2847 }
2848 
2849 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2850   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2851                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2852 }
2853 
2854 void MetaspaceAux::print_on(outputStream* out) {
2855   Metaspace::MetadataType nct = Metaspace::NonClassType;
2856 
2857   out->print_cr(" Metaspace       "
2858                 "used "      SIZE_FORMAT "K, "
2859                 "capacity "  SIZE_FORMAT "K, "
2860                 "committed " SIZE_FORMAT "K, "
2861                 "reserved "  SIZE_FORMAT "K",
2862                 used_bytes()/K,
2863                 capacity_bytes()/K,
2864                 committed_bytes()/K,
2865                 reserved_bytes()/K);
2866 
2867   if (Metaspace::using_class_space()) {
2868     Metaspace::MetadataType ct = Metaspace::ClassType;
2869     out->print_cr("  class space    "
2870                   "used "      SIZE_FORMAT "K, "
2871                   "capacity "  SIZE_FORMAT "K, "
2872                   "committed " SIZE_FORMAT "K, "
2873                   "reserved "  SIZE_FORMAT "K",
2874                   used_bytes(ct)/K,
2875                   capacity_bytes(ct)/K,
2876                   committed_bytes(ct)/K,
2877                   reserved_bytes(ct)/K);
2878   }
2879 }
2880 
2881 // Print information for class space and data space separately.
2882 // This is almost the same as above.
2883 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2884   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2885   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2886   size_t used_bytes = used_bytes_slow(mdtype);
2887   size_t free_bytes = free_bytes_slow(mdtype);
2888   size_t used_and_free = used_bytes + free_bytes +
2889                            free_chunks_capacity_bytes;
2890   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2891              "K + unused in chunks " SIZE_FORMAT "K  + "
2892              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2893              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2894              used_bytes / K,
2895              free_bytes / K,
2896              free_chunks_capacity_bytes / K,
2897              used_and_free / K,
2898              capacity_bytes / K);
2899   // Accounting can only be correct if we got the values during a safepoint
2900   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2901 }
2902 
2903 // Print total fragmentation for class metaspaces
2904 void MetaspaceAux::print_class_waste(outputStream* out) {
2905   assert(Metaspace::using_class_space(), "class metaspace not used");
2906   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2907   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2908   ClassLoaderDataGraphMetaspaceIterator iter;
2909   while (iter.repeat()) {
2910     Metaspace* msp = iter.get_next();
2911     if (msp != NULL) {
2912       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2913       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2914       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2915       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2916       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2917       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2918       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2919     }
2920   }
2921   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2922                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2923                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2924                 "large count " SIZE_FORMAT,
2925                 cls_specialized_count, cls_specialized_waste,
2926                 cls_small_count, cls_small_waste,
2927                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2928 }
2929 
2930 // Print total fragmentation for data and class metaspaces separately
2931 void MetaspaceAux::print_waste(outputStream* out) {
2932   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2933   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2934 
2935   ClassLoaderDataGraphMetaspaceIterator iter;
2936   while (iter.repeat()) {
2937     Metaspace* msp = iter.get_next();
2938     if (msp != NULL) {
2939       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2940       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2941       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2942       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2943       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2944       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2945       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2946     }
2947   }
2948   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2949   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2950                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2951                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2952                         "large count " SIZE_FORMAT,
2953              specialized_count, specialized_waste, small_count,
2954              small_waste, medium_count, medium_waste, humongous_count);
2955   if (Metaspace::using_class_space()) {
2956     print_class_waste(out);
2957   }
2958 }
2959 
2960 // Dump global metaspace things from the end of ClassLoaderDataGraph
2961 void MetaspaceAux::dump(outputStream* out) {
2962   out->print_cr("All Metaspace:");
2963   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2964   out->print("class space: "); print_on(out, Metaspace::ClassType);
2965   print_waste(out);
2966 }
2967 
2968 void MetaspaceAux::verify_free_chunks() {
2969   Metaspace::chunk_manager_metadata()->verify();
2970   if (Metaspace::using_class_space()) {
2971     Metaspace::chunk_manager_class()->verify();
2972   }
2973 }
2974 
2975 void MetaspaceAux::verify_capacity() {
2976 #ifdef ASSERT
2977   size_t running_sum_capacity_bytes = capacity_bytes();
2978   // For purposes of the running sum of capacity, verify against capacity
2979   size_t capacity_in_use_bytes = capacity_bytes_slow();
2980   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2981          "capacity_words() * BytesPerWord " SIZE_FORMAT
2982          " capacity_bytes_slow()" SIZE_FORMAT,
2983          running_sum_capacity_bytes, capacity_in_use_bytes);
2984   for (Metaspace::MetadataType i = Metaspace::ClassType;
2985        i < Metaspace:: MetadataTypeCount;
2986        i = (Metaspace::MetadataType)(i + 1)) {
2987     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2988     assert(capacity_bytes(i) == capacity_in_use_bytes,
2989            "capacity_bytes(%u) " SIZE_FORMAT
2990            " capacity_bytes_slow(%u)" SIZE_FORMAT,
2991            i, capacity_bytes(i), i, capacity_in_use_bytes);
2992   }
2993 #endif
2994 }
2995 
2996 void MetaspaceAux::verify_used() {
2997 #ifdef ASSERT
2998   size_t running_sum_used_bytes = used_bytes();
2999   // For purposes of the running sum of used, verify against used
3000   size_t used_in_use_bytes = used_bytes_slow();
3001   assert(used_bytes() == used_in_use_bytes,
3002          "used_bytes() " SIZE_FORMAT
3003          " used_bytes_slow()" SIZE_FORMAT,
3004          used_bytes(), used_in_use_bytes);
3005   for (Metaspace::MetadataType i = Metaspace::ClassType;
3006        i < Metaspace:: MetadataTypeCount;
3007        i = (Metaspace::MetadataType)(i + 1)) {
3008     size_t used_in_use_bytes = used_bytes_slow(i);
3009     assert(used_bytes(i) == used_in_use_bytes,
3010            "used_bytes(%u) " SIZE_FORMAT
3011            " used_bytes_slow(%u)" SIZE_FORMAT,
3012            i, used_bytes(i), i, used_in_use_bytes);
3013   }
3014 #endif
3015 }
3016 
3017 void MetaspaceAux::verify_metrics() {
3018   verify_capacity();
3019   verify_used();
3020 }
3021 
3022 
3023 // Metaspace methods
3024 
3025 size_t Metaspace::_first_chunk_word_size = 0;
3026 size_t Metaspace::_first_class_chunk_word_size = 0;
3027 
3028 size_t Metaspace::_commit_alignment = 0;
3029 size_t Metaspace::_reserve_alignment = 0;
3030 
3031 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3032   initialize(lock, type);
3033 }
3034 
3035 Metaspace::~Metaspace() {
3036   delete _vsm;
3037   if (using_class_space()) {
3038     delete _class_vsm;
3039   }
3040 }
3041 
3042 VirtualSpaceList* Metaspace::_space_list = NULL;
3043 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3044 
3045 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3046 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3047 
3048 #define VIRTUALSPACEMULTIPLIER 2
3049 
3050 #ifdef _LP64
3051 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3052 
3053 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3054   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3055   // narrow_klass_base is the lower of the metaspace base and the cds base
3056   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3057   // between the lower base and higher address.
3058   address lower_base;
3059   address higher_address;
3060 #if INCLUDE_CDS
3061   if (UseSharedSpaces) {
3062     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3063                           (address)(metaspace_base + compressed_class_space_size()));
3064     lower_base = MIN2(metaspace_base, cds_base);
3065   } else
3066 #endif
3067   {
3068     higher_address = metaspace_base + compressed_class_space_size();
3069     lower_base = metaspace_base;
3070 
3071     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3072     // If compressed class space fits in lower 32G, we don't need a base.
3073     if (higher_address <= (address)klass_encoding_max) {
3074       lower_base = 0; // Effectively lower base is zero.
3075     }
3076   }
3077 
3078   Universe::set_narrow_klass_base(lower_base);
3079 
3080   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3081     Universe::set_narrow_klass_shift(0);
3082   } else {
3083     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3084     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3085   }
3086   AOTLoader::set_narrow_klass_shift();
3087 }
3088 
3089 #if INCLUDE_CDS
3090 // Return TRUE if the specified metaspace_base and cds_base are close enough
3091 // to work with compressed klass pointers.
3092 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3093   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3094   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3095   address lower_base = MIN2((address)metaspace_base, cds_base);
3096   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3097                                 (address)(metaspace_base + compressed_class_space_size()));
3098   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3099 }
3100 #endif
3101 
3102 // Try to allocate the metaspace at the requested addr.
3103 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3104   assert(using_class_space(), "called improperly");
3105   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3106   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3107          "Metaspace size is too big");
3108   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3109   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3110   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3111 
3112   // Don't use large pages for the class space.
3113   bool large_pages = false;
3114 
3115 #if !(defined(AARCH64) || defined(AIX))
3116   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3117                                              _reserve_alignment,
3118                                              large_pages,
3119                                              requested_addr);
3120 #else // AARCH64
3121   ReservedSpace metaspace_rs;
3122 
3123   // Our compressed klass pointers may fit nicely into the lower 32
3124   // bits.
3125   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3126     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3127                                  _reserve_alignment,
3128                                  large_pages,
3129                                  requested_addr);
3130   }
3131 
3132   if (! metaspace_rs.is_reserved()) {
3133     // Aarch64: Try to align metaspace so that we can decode a compressed
3134     // klass with a single MOVK instruction.  We can do this iff the
3135     // compressed class base is a multiple of 4G.
3136     // Aix: Search for a place where we can find memory. If we need to load
3137     // the base, 4G alignment is helpful, too.
3138     size_t increment = AARCH64_ONLY(4*)G;
3139     for (char *a = (char*)align_ptr_up(requested_addr, increment);
3140          a < (char*)(1024*G);
3141          a += increment) {
3142       if (a == (char *)(32*G)) {
3143         // Go faster from here on. Zero-based is no longer possible.
3144         increment = 4*G;
3145       }
3146 
3147 #if INCLUDE_CDS
3148       if (UseSharedSpaces
3149           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3150         // We failed to find an aligned base that will reach.  Fall
3151         // back to using our requested addr.
3152         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3153                                      _reserve_alignment,
3154                                      large_pages,
3155                                      requested_addr);
3156         break;
3157       }
3158 #endif
3159 
3160       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3161                                    _reserve_alignment,
3162                                    large_pages,
3163                                    a);
3164       if (metaspace_rs.is_reserved())
3165         break;
3166     }
3167   }
3168 
3169 #endif // AARCH64
3170 
3171   if (!metaspace_rs.is_reserved()) {
3172 #if INCLUDE_CDS
3173     if (UseSharedSpaces) {
3174       size_t increment = align_size_up(1*G, _reserve_alignment);
3175 
3176       // Keep trying to allocate the metaspace, increasing the requested_addr
3177       // by 1GB each time, until we reach an address that will no longer allow
3178       // use of CDS with compressed klass pointers.
3179       char *addr = requested_addr;
3180       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3181              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3182         addr = addr + increment;
3183         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3184                                      _reserve_alignment, large_pages, addr);
3185       }
3186     }
3187 #endif
3188     // If no successful allocation then try to allocate the space anywhere.  If
3189     // that fails then OOM doom.  At this point we cannot try allocating the
3190     // metaspace as if UseCompressedClassPointers is off because too much
3191     // initialization has happened that depends on UseCompressedClassPointers.
3192     // So, UseCompressedClassPointers cannot be turned off at this point.
3193     if (!metaspace_rs.is_reserved()) {
3194       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3195                                    _reserve_alignment, large_pages);
3196       if (!metaspace_rs.is_reserved()) {
3197         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3198                                               compressed_class_space_size()));
3199       }
3200     }
3201   }
3202 
3203   // If we got here then the metaspace got allocated.
3204   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3205 
3206 #if INCLUDE_CDS
3207   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3208   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3209     FileMapInfo::stop_sharing_and_unmap(
3210         "Could not allocate metaspace at a compatible address");
3211   }
3212 #endif
3213   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3214                                   UseSharedSpaces ? (address)cds_base : 0);
3215 
3216   initialize_class_space(metaspace_rs);
3217 
3218   LogTarget(Trace, gc, metaspace) lt;
3219   if (lt.is_enabled()) {
3220     ResourceMark rm;
3221     LogStream ls(lt);
3222     print_compressed_class_space(&ls, requested_addr);
3223   }
3224 }
3225 
3226 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3227   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3228                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3229   if (_class_space_list != NULL) {
3230     address base = (address)_class_space_list->current_virtual_space()->bottom();
3231     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3232                  compressed_class_space_size(), p2i(base));
3233     if (requested_addr != 0) {
3234       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3235     }
3236     st->cr();
3237   }
3238 }
3239 
3240 // For UseCompressedClassPointers the class space is reserved above the top of
3241 // the Java heap.  The argument passed in is at the base of the compressed space.
3242 void Metaspace::initialize_class_space(ReservedSpace rs) {
3243   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3244   assert(rs.size() >= CompressedClassSpaceSize,
3245          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3246   assert(using_class_space(), "Must be using class space");
3247   _class_space_list = new VirtualSpaceList(rs);
3248   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3249 
3250   if (!_class_space_list->initialization_succeeded()) {
3251     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3252   }
3253 }
3254 
3255 #endif
3256 
3257 void Metaspace::ergo_initialize() {
3258   if (DumpSharedSpaces) {
3259     // Using large pages when dumping the shared archive is currently not implemented.
3260     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3261   }
3262 
3263   size_t page_size = os::vm_page_size();
3264   if (UseLargePages && UseLargePagesInMetaspace) {
3265     page_size = os::large_page_size();
3266   }
3267 
3268   _commit_alignment  = page_size;
3269   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3270 
3271   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3272   // override if MaxMetaspaceSize was set on the command line or not.
3273   // This information is needed later to conform to the specification of the
3274   // java.lang.management.MemoryUsage API.
3275   //
3276   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3277   // globals.hpp to the aligned value, but this is not possible, since the
3278   // alignment depends on other flags being parsed.
3279   MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3280 
3281   if (MetaspaceSize > MaxMetaspaceSize) {
3282     MetaspaceSize = MaxMetaspaceSize;
3283   }
3284 
3285   MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3286 
3287   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3288 
3289   MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3290   MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3291 
3292   CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3293   set_compressed_class_space_size(CompressedClassSpaceSize);
3294 }
3295 
3296 void Metaspace::global_initialize() {
3297   MetaspaceGC::initialize();
3298 
3299   // Initialize the alignment for shared spaces.
3300   int max_alignment = os::vm_allocation_granularity();
3301   size_t cds_total = 0;
3302 
3303   MetaspaceShared::set_max_alignment(max_alignment);
3304 
3305   if (DumpSharedSpaces) {
3306 #if INCLUDE_CDS
3307     MetaspaceShared::estimate_regions_size();
3308 
3309     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3310     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3311     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3312     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3313 
3314     // Initialize with the sum of the shared space sizes.  The read-only
3315     // and read write metaspace chunks will be allocated out of this and the
3316     // remainder is the misc code and data chunks.
3317     cds_total = FileMapInfo::shared_spaces_size();
3318     cds_total = align_size_up(cds_total, _reserve_alignment);
3319     _space_list = new VirtualSpaceList(cds_total/wordSize);
3320     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3321 
3322     if (!_space_list->initialization_succeeded()) {
3323       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3324     }
3325 
3326 #ifdef _LP64
3327     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3328       vm_exit_during_initialization("Unable to dump shared archive.",
3329           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3330                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3331                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3332                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3333     }
3334 
3335     // Set the compressed klass pointer base so that decoding of these pointers works
3336     // properly when creating the shared archive.
3337     assert(UseCompressedOops && UseCompressedClassPointers,
3338       "UseCompressedOops and UseCompressedClassPointers must be set");
3339     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3340     log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3341                                      p2i(_space_list->current_virtual_space()->bottom()));
3342 
3343     Universe::set_narrow_klass_shift(0);
3344 #endif // _LP64
3345 #endif // INCLUDE_CDS
3346   } else {
3347 #if INCLUDE_CDS
3348     if (UseSharedSpaces) {
3349       // If using shared space, open the file that contains the shared space
3350       // and map in the memory before initializing the rest of metaspace (so
3351       // the addresses don't conflict)
3352       address cds_address = NULL;
3353       FileMapInfo* mapinfo = new FileMapInfo();
3354 
3355       // Open the shared archive file, read and validate the header. If
3356       // initialization fails, shared spaces [UseSharedSpaces] are
3357       // disabled and the file is closed.
3358       // Map in spaces now also
3359       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3360         cds_total = FileMapInfo::shared_spaces_size();
3361         cds_address = (address)mapinfo->header()->region_addr(0);
3362 #ifdef _LP64
3363         if (using_class_space()) {
3364           char* cds_end = (char*)(cds_address + cds_total);
3365           cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3366           // If UseCompressedClassPointers is set then allocate the metaspace area
3367           // above the heap and above the CDS area (if it exists).
3368           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3369           // Map the shared string space after compressed pointers
3370           // because it relies on compressed class pointers setting to work
3371           mapinfo->map_string_regions();
3372         }
3373 #endif // _LP64
3374       } else {
3375         assert(!mapinfo->is_open() && !UseSharedSpaces,
3376                "archive file not closed or shared spaces not disabled.");
3377       }
3378     }
3379 #endif // INCLUDE_CDS
3380 
3381 #ifdef _LP64
3382     if (!UseSharedSpaces && using_class_space()) {
3383       char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3384       allocate_metaspace_compressed_klass_ptrs(base, 0);
3385     }
3386 #endif // _LP64
3387 
3388     // Initialize these before initializing the VirtualSpaceList
3389     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3390     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3391     // Make the first class chunk bigger than a medium chunk so it's not put
3392     // on the medium chunk list.   The next chunk will be small and progress
3393     // from there.  This size calculated by -version.
3394     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3395                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3396     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3397     // Arbitrarily set the initial virtual space to a multiple
3398     // of the boot class loader size.
3399     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3400     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3401 
3402     // Initialize the list of virtual spaces.
3403     _space_list = new VirtualSpaceList(word_size);
3404     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3405 
3406     if (!_space_list->initialization_succeeded()) {
3407       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3408     }
3409   }
3410 
3411   _tracer = new MetaspaceTracer();
3412 }
3413 
3414 void Metaspace::post_initialize() {
3415   MetaspaceGC::post_initialize();
3416 }
3417 
3418 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3419   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3420   if (chunk != NULL) {
3421     // Add to this manager's list of chunks in use and current_chunk().
3422     get_space_manager(mdtype)->add_chunk(chunk, true);
3423   }
3424 }
3425 
3426 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3427   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3428 
3429   // Get a chunk from the chunk freelist
3430   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3431 
3432   if (chunk == NULL) {
3433     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3434                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3435   }
3436 
3437   // For dumping shared archive, report error if allocation has failed.
3438   if (DumpSharedSpaces && chunk == NULL) {
3439     report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3440   }
3441 
3442   return chunk;
3443 }
3444 
3445 void Metaspace::verify_global_initialization() {
3446   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3447   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3448 
3449   if (using_class_space()) {
3450     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3451     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3452   }
3453 }
3454 
3455 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3456   verify_global_initialization();
3457 
3458   // Allocate SpaceManager for metadata objects.
3459   _vsm = new SpaceManager(NonClassType, lock);
3460 
3461   if (using_class_space()) {
3462     // Allocate SpaceManager for classes.
3463     _class_vsm = new SpaceManager(ClassType, lock);
3464   }
3465 
3466   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3467 
3468   // Allocate chunk for metadata objects
3469   initialize_first_chunk(type, NonClassType);
3470 
3471   // Allocate chunk for class metadata objects
3472   if (using_class_space()) {
3473     initialize_first_chunk(type, ClassType);
3474   }
3475 
3476   _alloc_record_head = NULL;
3477   _alloc_record_tail = NULL;
3478 }
3479 
3480 size_t Metaspace::align_word_size_up(size_t word_size) {
3481   size_t byte_size = word_size * wordSize;
3482   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3483 }
3484 
3485 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3486   // DumpSharedSpaces doesn't use class metadata area (yet)
3487   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3488   if (is_class_space_allocation(mdtype)) {
3489     return  class_vsm()->allocate(word_size);
3490   } else {
3491     return  vsm()->allocate(word_size);
3492   }
3493 }
3494 
3495 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3496   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3497   assert(delta_bytes > 0, "Must be");
3498 
3499   size_t before = 0;
3500   size_t after = 0;
3501   MetaWord* res;
3502   bool incremented;
3503 
3504   // Each thread increments the HWM at most once. Even if the thread fails to increment
3505   // the HWM, an allocation is still attempted. This is because another thread must then
3506   // have incremented the HWM and therefore the allocation might still succeed.
3507   do {
3508     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3509     res = allocate(word_size, mdtype);
3510   } while (!incremented && res == NULL);
3511 
3512   if (incremented) {
3513     tracer()->report_gc_threshold(before, after,
3514                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3515     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3516   }
3517 
3518   return res;
3519 }
3520 
3521 // Space allocated in the Metaspace.  This may
3522 // be across several metadata virtual spaces.
3523 char* Metaspace::bottom() const {
3524   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3525   return (char*)vsm()->current_chunk()->bottom();
3526 }
3527 
3528 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3529   if (mdtype == ClassType) {
3530     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3531   } else {
3532     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3533   }
3534 }
3535 
3536 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3537   if (mdtype == ClassType) {
3538     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3539   } else {
3540     return vsm()->sum_free_in_chunks_in_use();
3541   }
3542 }
3543 
3544 // Space capacity in the Metaspace.  It includes
3545 // space in the list of chunks from which allocations
3546 // have been made. Don't include space in the global freelist and
3547 // in the space available in the dictionary which
3548 // is already counted in some chunk.
3549 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3550   if (mdtype == ClassType) {
3551     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3552   } else {
3553     return vsm()->sum_capacity_in_chunks_in_use();
3554   }
3555 }
3556 
3557 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3558   return used_words_slow(mdtype) * BytesPerWord;
3559 }
3560 
3561 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3562   return capacity_words_slow(mdtype) * BytesPerWord;
3563 }
3564 
3565 size_t Metaspace::allocated_blocks_bytes() const {
3566   return vsm()->allocated_blocks_bytes() +
3567       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3568 }
3569 
3570 size_t Metaspace::allocated_chunks_bytes() const {
3571   return vsm()->allocated_chunks_bytes() +
3572       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3573 }
3574 
3575 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3576   assert(!SafepointSynchronize::is_at_safepoint()
3577          || Thread::current()->is_VM_thread(), "should be the VM thread");
3578 
3579   if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
3580     record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
3581   }
3582 
3583   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3584 
3585   if (is_class && using_class_space()) {
3586     class_vsm()->deallocate(ptr, word_size);
3587   } else {
3588     vsm()->deallocate(ptr, word_size);
3589   }
3590 }
3591 
3592 
3593 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3594                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3595   if (HAS_PENDING_EXCEPTION) {
3596     assert(false, "Should not allocate with exception pending");
3597     return NULL;  // caller does a CHECK_NULL too
3598   }
3599 
3600   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3601         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3602 
3603   // Allocate in metaspaces without taking out a lock, because it deadlocks
3604   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3605   // to revisit this for application class data sharing.
3606   if (DumpSharedSpaces) {
3607     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3608     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3609     MetaWord* result = space->allocate(word_size, NonClassType);
3610     if (result == NULL) {
3611       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3612     }
3613     if (log_is_enabled(Info, cds)) {
3614       space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
3615     }
3616 
3617     // Zero initialize.
3618     Copy::fill_to_words((HeapWord*)result, word_size, 0);
3619 
3620     return result;
3621   }
3622 
3623   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3624 
3625   // Try to allocate metadata.
3626   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3627 
3628   if (result == NULL) {
3629     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3630 
3631     // Allocation failed.
3632     if (is_init_completed()) {
3633       // Only start a GC if the bootstrapping has completed.
3634 
3635       // Try to clean out some memory and retry.
3636       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3637           loader_data, word_size, mdtype);
3638     }
3639   }
3640 
3641   if (result == NULL) {
3642     SpaceManager* sm;
3643     if (is_class_space_allocation(mdtype)) {
3644       sm = loader_data->metaspace_non_null()->class_vsm();
3645     } else {
3646       sm = loader_data->metaspace_non_null()->vsm();
3647     }
3648 
3649     result = sm->get_small_chunk_and_allocate(word_size);
3650 
3651     if (result == NULL) {
3652       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3653     }
3654   }
3655 
3656   // Zero initialize.
3657   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3658 
3659   return result;
3660 }
3661 
3662 size_t Metaspace::class_chunk_size(size_t word_size) {
3663   assert(using_class_space(), "Has to use class space");
3664   return class_vsm()->calc_chunk_size(word_size);
3665 }
3666 
3667 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3668   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3669 
3670   // If result is still null, we are out of memory.
3671   Log(gc, metaspace, freelist) log;
3672   if (log.is_info()) {
3673     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3674              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3675     ResourceMark rm;
3676     LogStream ls(log.info());
3677     if (loader_data->metaspace_or_null() != NULL) {
3678       loader_data->dump(&ls);
3679     }
3680     MetaspaceAux::dump(&ls);
3681   }
3682 
3683   bool out_of_compressed_class_space = false;
3684   if (is_class_space_allocation(mdtype)) {
3685     Metaspace* metaspace = loader_data->metaspace_non_null();
3686     out_of_compressed_class_space =
3687       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3688       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3689       CompressedClassSpaceSize;
3690   }
3691 
3692   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3693   const char* space_string = out_of_compressed_class_space ?
3694     "Compressed class space" : "Metaspace";
3695 
3696   report_java_out_of_memory(space_string);
3697 
3698   if (JvmtiExport::should_post_resource_exhausted()) {
3699     JvmtiExport::post_resource_exhausted(
3700         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3701         space_string);
3702   }
3703 
3704   if (!is_init_completed()) {
3705     vm_exit_during_initialization("OutOfMemoryError", space_string);
3706   }
3707 
3708   if (out_of_compressed_class_space) {
3709     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3710   } else {
3711     THROW_OOP(Universe::out_of_memory_error_metaspace());
3712   }
3713 }
3714 
3715 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3716   switch (mdtype) {
3717     case Metaspace::ClassType: return "Class";
3718     case Metaspace::NonClassType: return "Metadata";
3719     default:
3720       assert(false, "Got bad mdtype: %d", (int) mdtype);
3721       return NULL;
3722   }
3723 }
3724 
3725 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3726   assert(DumpSharedSpaces, "sanity");
3727 
3728   int byte_size = (int)word_size * wordSize;
3729   AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3730 
3731   if (_alloc_record_head == NULL) {
3732     _alloc_record_head = _alloc_record_tail = rec;
3733   } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3734     _alloc_record_tail->_next = rec;
3735     _alloc_record_tail = rec;
3736   } else {
3737     // slow linear search, but this doesn't happen that often, and only when dumping
3738     for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3739       if (old->_ptr == ptr) {
3740         assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3741         int remain_bytes = old->_byte_size - byte_size;
3742         assert(remain_bytes >= 0, "sanity");
3743         old->_type = type;
3744 
3745         if (remain_bytes == 0) {
3746           delete(rec);
3747         } else {
3748           address remain_ptr = address(ptr) + byte_size;
3749           rec->_ptr = remain_ptr;
3750           rec->_byte_size = remain_bytes;
3751           rec->_type = MetaspaceObj::DeallocatedType;
3752           rec->_next = old->_next;
3753           old->_byte_size = byte_size;
3754           old->_next = rec;
3755         }
3756         return;
3757       }
3758     }
3759     assert(0, "reallocating a freed pointer that was not recorded");
3760   }
3761 }
3762 
3763 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3764   assert(DumpSharedSpaces, "sanity");
3765 
3766   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3767     if (rec->_ptr == ptr) {
3768       assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3769       rec->_type = MetaspaceObj::DeallocatedType;
3770       return;
3771     }
3772   }
3773 
3774   assert(0, "deallocating a pointer that was not recorded");
3775 }
3776 
3777 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3778   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3779 
3780   address last_addr = (address)bottom();
3781 
3782   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3783     address ptr = rec->_ptr;
3784     if (last_addr < ptr) {
3785       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3786     }
3787     closure->doit(ptr, rec->_type, rec->_byte_size);
3788     last_addr = ptr + rec->_byte_size;
3789   }
3790 
3791   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3792   if (last_addr < top) {
3793     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3794   }
3795 }
3796 
3797 void Metaspace::purge(MetadataType mdtype) {
3798   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3799 }
3800 
3801 void Metaspace::purge() {
3802   MutexLockerEx cl(SpaceManager::expand_lock(),
3803                    Mutex::_no_safepoint_check_flag);
3804   purge(NonClassType);
3805   if (using_class_space()) {
3806     purge(ClassType);
3807   }
3808 }
3809 
3810 void Metaspace::print_on(outputStream* out) const {
3811   // Print both class virtual space counts and metaspace.
3812   if (Verbose) {
3813     vsm()->print_on(out);
3814     if (using_class_space()) {
3815       class_vsm()->print_on(out);
3816     }
3817   }
3818 }
3819 
3820 bool Metaspace::contains(const void* ptr) {
3821   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3822     return true;
3823   }
3824   return contains_non_shared(ptr);
3825 }
3826 
3827 bool Metaspace::contains_non_shared(const void* ptr) {
3828   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3829      return true;
3830   }
3831 
3832   return get_space_list(NonClassType)->contains(ptr);
3833 }
3834 
3835 void Metaspace::verify() {
3836   vsm()->verify();
3837   if (using_class_space()) {
3838     class_vsm()->verify();
3839   }
3840 }
3841 
3842 void Metaspace::dump(outputStream* const out) const {
3843   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3844   vsm()->dump(out);
3845   if (using_class_space()) {
3846     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3847     class_vsm()->dump(out);
3848   }
3849 }
3850 
3851 /////////////// Unit tests ///////////////
3852 
3853 #ifndef PRODUCT
3854 
3855 class TestMetaspaceAuxTest : AllStatic {
3856  public:
3857   static void test_reserved() {
3858     size_t reserved = MetaspaceAux::reserved_bytes();
3859 
3860     assert(reserved > 0, "assert");
3861 
3862     size_t committed  = MetaspaceAux::committed_bytes();
3863     assert(committed <= reserved, "assert");
3864 
3865     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3866     assert(reserved_metadata > 0, "assert");
3867     assert(reserved_metadata <= reserved, "assert");
3868 
3869     if (UseCompressedClassPointers) {
3870       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3871       assert(reserved_class > 0, "assert");
3872       assert(reserved_class < reserved, "assert");
3873     }
3874   }
3875 
3876   static void test_committed() {
3877     size_t committed = MetaspaceAux::committed_bytes();
3878 
3879     assert(committed > 0, "assert");
3880 
3881     size_t reserved  = MetaspaceAux::reserved_bytes();
3882     assert(committed <= reserved, "assert");
3883 
3884     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3885     assert(committed_metadata > 0, "assert");
3886     assert(committed_metadata <= committed, "assert");
3887 
3888     if (UseCompressedClassPointers) {
3889       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3890       assert(committed_class > 0, "assert");
3891       assert(committed_class < committed, "assert");
3892     }
3893   }
3894 
3895   static void test_virtual_space_list_large_chunk() {
3896     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3897     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3898     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3899     // vm_allocation_granularity aligned on Windows.
3900     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3901     large_size += (os::vm_page_size()/BytesPerWord);
3902     vs_list->get_new_chunk(large_size, 0);
3903   }
3904 
3905   static void test() {
3906     test_reserved();
3907     test_committed();
3908     test_virtual_space_list_large_chunk();
3909   }
3910 };
3911 
3912 void TestMetaspaceAux_test() {
3913   TestMetaspaceAuxTest::test();
3914 }
3915 
3916 class TestVirtualSpaceNodeTest {
3917   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3918                                           size_t& num_small_chunks,
3919                                           size_t& num_specialized_chunks) {
3920     num_medium_chunks = words_left / MediumChunk;
3921     words_left = words_left % MediumChunk;
3922 
3923     num_small_chunks = words_left / SmallChunk;
3924     words_left = words_left % SmallChunk;
3925     // how many specialized chunks can we get?
3926     num_specialized_chunks = words_left / SpecializedChunk;
3927     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3928   }
3929 
3930  public:
3931   static void test() {
3932     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3933     const size_t vsn_test_size_words = MediumChunk  * 4;
3934     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3935 
3936     // The chunk sizes must be multiples of eachother, or this will fail
3937     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3938     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3939 
3940     { // No committed memory in VSN
3941       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3942       VirtualSpaceNode vsn(vsn_test_size_bytes);
3943       vsn.initialize();
3944       vsn.retire(&cm);
3945       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3946     }
3947 
3948     { // All of VSN is committed, half is used by chunks
3949       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3950       VirtualSpaceNode vsn(vsn_test_size_bytes);
3951       vsn.initialize();
3952       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3953       vsn.get_chunk_vs(MediumChunk);
3954       vsn.get_chunk_vs(MediumChunk);
3955       vsn.retire(&cm);
3956       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3957       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3958     }
3959 
3960     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3961     // This doesn't work for systems with vm_page_size >= 16K.
3962     if (page_chunks < MediumChunk) {
3963       // 4 pages of VSN is committed, some is used by chunks
3964       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3965       VirtualSpaceNode vsn(vsn_test_size_bytes);
3966 
3967       vsn.initialize();
3968       vsn.expand_by(page_chunks, page_chunks);
3969       vsn.get_chunk_vs(SmallChunk);
3970       vsn.get_chunk_vs(SpecializedChunk);
3971       vsn.retire(&cm);
3972 
3973       // committed - used = words left to retire
3974       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3975 
3976       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3977       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3978 
3979       assert(num_medium_chunks == 0, "should not get any medium chunks");
3980       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3981       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3982     }
3983 
3984     { // Half of VSN is committed, a humongous chunk is used
3985       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3986       VirtualSpaceNode vsn(vsn_test_size_bytes);
3987       vsn.initialize();
3988       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3989       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3990       vsn.retire(&cm);
3991 
3992       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3993       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3994       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3995 
3996       assert(num_medium_chunks == 0, "should not get any medium chunks");
3997       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3998       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3999     }
4000 
4001   }
4002 
4003 #define assert_is_available_positive(word_size) \
4004   assert(vsn.is_available(word_size), \
4005          #word_size ": " PTR_FORMAT " bytes were not available in " \
4006          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4007          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4008 
4009 #define assert_is_available_negative(word_size) \
4010   assert(!vsn.is_available(word_size), \
4011          #word_size ": " PTR_FORMAT " bytes should not be available in " \
4012          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4013          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4014 
4015   static void test_is_available_positive() {
4016     // Reserve some memory.
4017     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4018     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4019 
4020     // Commit some memory.
4021     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4022     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4023     assert(expanded, "Failed to commit");
4024 
4025     // Check that is_available accepts the committed size.
4026     assert_is_available_positive(commit_word_size);
4027 
4028     // Check that is_available accepts half the committed size.
4029     size_t expand_word_size = commit_word_size / 2;
4030     assert_is_available_positive(expand_word_size);
4031   }
4032 
4033   static void test_is_available_negative() {
4034     // Reserve some memory.
4035     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4036     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4037 
4038     // Commit some memory.
4039     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4040     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4041     assert(expanded, "Failed to commit");
4042 
4043     // Check that is_available doesn't accept a too large size.
4044     size_t two_times_commit_word_size = commit_word_size * 2;
4045     assert_is_available_negative(two_times_commit_word_size);
4046   }
4047 
4048   static void test_is_available_overflow() {
4049     // Reserve some memory.
4050     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4051     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4052 
4053     // Commit some memory.
4054     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4055     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4056     assert(expanded, "Failed to commit");
4057 
4058     // Calculate a size that will overflow the virtual space size.
4059     void* virtual_space_max = (void*)(uintptr_t)-1;
4060     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4061     size_t overflow_size = bottom_to_max + BytesPerWord;
4062     size_t overflow_word_size = overflow_size / BytesPerWord;
4063 
4064     // Check that is_available can handle the overflow.
4065     assert_is_available_negative(overflow_word_size);
4066   }
4067 
4068   static void test_is_available() {
4069     TestVirtualSpaceNodeTest::test_is_available_positive();
4070     TestVirtualSpaceNodeTest::test_is_available_negative();
4071     TestVirtualSpaceNodeTest::test_is_available_overflow();
4072   }
4073 };
4074 
4075 void TestVirtualSpaceNode_test() {
4076   TestVirtualSpaceNodeTest::test();
4077   TestVirtualSpaceNodeTest::test_is_available();
4078 }
4079 
4080 // The following test is placed here instead of a gtest / unittest file
4081 // because the ChunkManager class is only available in this file.
4082 void ChunkManager_test_list_index() {
4083   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4084 
4085   // Test previous bug where a query for a humongous class metachunk,
4086   // incorrectly matched the non-class medium metachunk size.
4087   {
4088     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4089 
4090     ChunkIndex index = manager.list_index(MediumChunk);
4091 
4092     assert(index == HumongousIndex,
4093            "Requested size is larger than ClassMediumChunk,"
4094            " so should return HumongousIndex. Got index: %d", (int)index);
4095   }
4096 
4097   // Check the specified sizes as well.
4098   {
4099     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4100     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4101   }
4102   {
4103     ChunkIndex index = manager.list_index(ClassSmallChunk);
4104     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4105   }
4106   {
4107     ChunkIndex index = manager.list_index(ClassMediumChunk);
4108     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4109   }
4110   {
4111     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4112     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4113   }
4114 }
4115 
4116 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4117 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4118 // content.
4119 class ChunkManagerReturnTestImpl {
4120 
4121   VirtualSpaceNode _vsn;
4122   ChunkManager _cm;
4123 
4124   // The expected content of the chunk manager.
4125   unsigned _chunks_in_chunkmanager;
4126   size_t _words_in_chunkmanager;
4127 
4128   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4129   static const int num_chunks = 256;
4130   Metachunk* _pool[num_chunks];
4131 
4132   // Helper, return a random position into the chunk pool.
4133   static int get_random_position() {
4134     return os::random() % num_chunks;
4135   }
4136 
4137   // Asserts that ChunkManager counters match expectations.
4138   void assert_counters() {
4139     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4140     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4141     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4142   }
4143 
4144   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4145   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4146   size_t get_random_chunk_size() {
4147     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4148     const int rand = os::random() % 4;
4149     if (rand < 3) {
4150       return sizes[rand];
4151     } else {
4152       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4153       return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4154     }
4155   }
4156 
4157   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4158   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4159   int next_matching_chunk(int start, bool is_free) const {
4160     assert(start >= 0 && start < num_chunks, "invalid parameter");
4161     int pos = start;
4162     do {
4163       if (++pos == num_chunks) {
4164         pos = 0;
4165       }
4166       if (_pool[pos]->is_tagged_free() == is_free) {
4167         return pos;
4168       }
4169     } while (pos != start);
4170     return -1;
4171   }
4172 
4173   // A structure to keep information about a chunk list including which
4174   // chunks are part of this list. This is needed to keep information about a chunk list
4175   // we will to return to the ChunkManager, because the original list will be destroyed.
4176   struct AChunkList {
4177     Metachunk* head;
4178     Metachunk* all[num_chunks];
4179     size_t size;
4180     int num;
4181     ChunkIndex index;
4182   };
4183 
4184   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4185   // a random chunk list of max. length <list_size> of chunks with the same
4186   // ChunkIndex (chunk size).
4187   // Returns false if list cannot be assembled. List is returned in the <out>
4188   // structure. Returned list may be smaller than <list_size>.
4189   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4190     // Choose a random in-use chunk from the pool...
4191     const int headpos = next_matching_chunk(get_random_position(), false);
4192     if (headpos == -1) {
4193       return false;
4194     }
4195     Metachunk* const head = _pool[headpos];
4196     out->all[0] = head;
4197     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4198     // ..then go from there, chain it up with up to list_size - 1 number of other
4199     // in-use chunks of the same index.
4200     const ChunkIndex index = _cm.list_index(head->word_size());
4201     int num_added = 1;
4202     size_t size_added = head->word_size();
4203     int pos = headpos;
4204     Metachunk* tail = head;
4205     do {
4206       pos = next_matching_chunk(pos, false);
4207       if (pos != headpos) {
4208         Metachunk* c = _pool[pos];
4209         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4210         if (index == _cm.list_index(c->word_size())) {
4211           tail->set_next(c);
4212           c->set_prev(tail);
4213           tail = c;
4214           out->all[num_added] = c;
4215           num_added ++;
4216           size_added += c->word_size();
4217         }
4218       }
4219     } while (num_added < list_size && pos != headpos);
4220     out->head = head;
4221     out->index = index;
4222     out->size = size_added;
4223     out->num = num_added;
4224     return true;
4225   }
4226 
4227   // Take a single random chunk from the ChunkManager.
4228   bool take_single_random_chunk_from_chunkmanager() {
4229     assert_counters();
4230     _cm.locked_verify();
4231     int pos = next_matching_chunk(get_random_position(), true);
4232     if (pos == -1) {
4233       return false;
4234     }
4235     Metachunk* c = _pool[pos];
4236     assert(c->is_tagged_free(), "Chunk state mismatch");
4237     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4238     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4239     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4240     // normal chunk allocation.
4241     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4242     assert(c2 != NULL, "Unexpected.");
4243     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4244     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4245     _chunks_in_chunkmanager --;
4246     _words_in_chunkmanager -= c->word_size();
4247     assert_counters();
4248     _cm.locked_verify();
4249     return true;
4250   }
4251 
4252   // Returns a single random chunk to the chunk manager. Returns false if that
4253   // was not possible (all chunks are already in the chunk manager).
4254   bool return_single_random_chunk_to_chunkmanager() {
4255     assert_counters();
4256     _cm.locked_verify();
4257     int pos = next_matching_chunk(get_random_position(), false);
4258     if (pos == -1) {
4259       return false;
4260     }
4261     Metachunk* c = _pool[pos];
4262     assert(c->is_tagged_free() == false, "wrong chunk information");
4263     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4264     _chunks_in_chunkmanager ++;
4265     _words_in_chunkmanager += c->word_size();
4266     assert(c->is_tagged_free() == true, "wrong chunk information");
4267     assert_counters();
4268     _cm.locked_verify();
4269     return true;
4270   }
4271 
4272   // Return a random chunk list to the chunk manager. Returns the length of the
4273   // returned list.
4274   int return_random_chunk_list_to_chunkmanager(int list_size) {
4275     assert_counters();
4276     _cm.locked_verify();
4277     AChunkList aChunkList;
4278     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4279       return 0;
4280     }
4281     // Before returning chunks are returned, they should be tagged in use.
4282     for (int i = 0; i < aChunkList.num; i ++) {
4283       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4284     }
4285     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4286     _chunks_in_chunkmanager += aChunkList.num;
4287     _words_in_chunkmanager += aChunkList.size;
4288     // After all chunks are returned, check that they are now tagged free.
4289     for (int i = 0; i < aChunkList.num; i ++) {
4290       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4291     }
4292     assert_counters();
4293     _cm.locked_verify();
4294     return aChunkList.num;
4295   }
4296 
4297 public:
4298 
4299   ChunkManagerReturnTestImpl()
4300     : _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4301     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4302     , _chunks_in_chunkmanager(0)
4303     , _words_in_chunkmanager(0)
4304   {
4305     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4306     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4307     // "in use", because not yet added to any chunk manager.
4308     _vsn.initialize();
4309     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4310     for (int i = 0; i < num_chunks; i ++) {
4311       const size_t size = get_random_chunk_size();
4312       _pool[i] = _vsn.get_chunk_vs(size);
4313       assert(_pool[i] != NULL, "allocation failed");
4314     }
4315     assert_counters();
4316     _cm.locked_verify();
4317   }
4318 
4319   // Test entry point.
4320   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4321   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4322   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4323   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4324   // chunks manager, thereby emptying or filling it completely.
4325   void do_test(float phase_length_factor) {
4326     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4327     assert_counters();
4328     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4329     const int num_max_ops = num_chunks * 100;
4330     int num_ops = num_max_ops;
4331     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4332     int num_ops_until_switch = MAX2(1, (int)(average_phase_length + os::random() % 8 - 4));
4333     bool return_phase = true;
4334     while (num_ops > 0) {
4335       int chunks_moved = 0;
4336       if (return_phase) {
4337         // Randomly switch between returning a single chunk or a random length chunk list.
4338         if (os::random() % 2 == 0) {
4339           if (return_single_random_chunk_to_chunkmanager()) {
4340             chunks_moved = 1;
4341           }
4342         } else {
4343           const int list_length = MAX2(1, (int)(os::random() % num_ops_until_switch));
4344           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4345         }
4346       } else {
4347         // Breath out.
4348         if (take_single_random_chunk_from_chunkmanager()) {
4349           chunks_moved = 1;
4350         }
4351       }
4352       num_ops -= chunks_moved;
4353       num_ops_until_switch -= chunks_moved;
4354       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4355         return_phase = !return_phase;
4356         num_ops_until_switch = MAX2(1, (int)(average_phase_length + os::random() % 8 - 4));
4357       }
4358     }
4359   }
4360 };
4361 
4362 void* setup_chunkmanager_returntests() {
4363   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4364   return p;
4365 }
4366 
4367 void teardown_chunkmanager_returntests(void* p) {
4368   delete (ChunkManagerReturnTestImpl*) p;
4369 }
4370 
4371 void run_chunkmanager_returntests(void* p, float phase_length) {
4372   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4373   test->do_test(phase_length);
4374 }
4375 
4376 // The following test is placed here instead of a gtest / unittest file
4377 // because the ChunkManager class is only available in this file.
4378 class SpaceManagerTest : AllStatic {
4379   friend void SpaceManager_test_adjust_initial_chunk_size();
4380 
4381   static void test_adjust_initial_chunk_size(bool is_class) {
4382     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4383     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4384     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4385 
4386 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4387     do {                                                                         \
4388       size_t v = value;                                                          \
4389       size_t e = expected;                                                       \
4390       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4391              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4392     } while (0)
4393 
4394     // Smallest (specialized)
4395     test_adjust_initial_chunk_size(1,            smallest, is_class);
4396     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4397     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4398 
4399     // Small
4400     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4401     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4402     test_adjust_initial_chunk_size(normal,       normal, is_class);
4403 
4404     // Medium
4405     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4406     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4407     test_adjust_initial_chunk_size(medium,     medium, is_class);
4408 
4409     // Humongous
4410     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4411 
4412 #undef test_adjust_initial_chunk_size
4413   }
4414 
4415   static void test_adjust_initial_chunk_size() {
4416     test_adjust_initial_chunk_size(false);
4417     test_adjust_initial_chunk_size(true);
4418   }
4419 };
4420 
4421 void SpaceManager_test_adjust_initial_chunk_size() {
4422   SpaceManagerTest::test_adjust_initial_chunk_size();
4423 }
4424 
4425 #endif