1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Set this constant to enable slow integrity checking of the free chunk lists
  58 const bool metaspace_slow_verify = false;
  59 
  60 size_t const allocation_from_dictionary_limit = 4 * K;
  61 
  62 MetaWord* last_allocated = 0;
  63 
  64 size_t Metaspace::_compressed_class_space_size;
  65 const MetaspaceTracer* Metaspace::_tracer = NULL;
  66 
  67 // Used in declarations in SpaceManager and ChunkManager
  68 enum ChunkIndex {
  69   ZeroIndex = 0,
  70   SpecializedIndex = ZeroIndex,
  71   SmallIndex = SpecializedIndex + 1,
  72   MediumIndex = SmallIndex + 1,
  73   HumongousIndex = MediumIndex + 1,
  74   NumberOfFreeLists = 3,
  75   NumberOfInUseLists = 4
  76 };
  77 
  78 // Helper, returns a descriptive name for the given index.
  79 static const char* chunk_size_name(ChunkIndex index) {
  80   switch (index) {
  81     case SpecializedIndex:
  82       return "specialized";
  83     case SmallIndex:
  84       return "small";
  85     case MediumIndex:
  86       return "medium";
  87     case HumongousIndex:
  88       return "humongous";
  89     default:
  90       return "Invalid index";
  91   }
  92 }
  93 
  94 enum ChunkSizes {    // in words.
  95   ClassSpecializedChunk = 128,
  96   SpecializedChunk = 128,
  97   ClassSmallChunk = 256,
  98   SmallChunk = 512,
  99   ClassMediumChunk = 4 * K,
 100   MediumChunk = 8 * K
 101 };
 102 
 103 static ChunkIndex next_chunk_index(ChunkIndex i) {
 104   assert(i < NumberOfInUseLists, "Out of bound");
 105   return (ChunkIndex) (i+1);
 106 }
 107 
 108 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 109 uint MetaspaceGC::_shrink_factor = 0;
 110 bool MetaspaceGC::_should_concurrent_collect = false;
 111 
 112 typedef class FreeList<Metachunk> ChunkList;
 113 
 114 // Manages the global free lists of chunks.
 115 class ChunkManager : public CHeapObj<mtInternal> {
 116   friend class TestVirtualSpaceNodeTest;
 117 
 118   // Free list of chunks of different sizes.
 119   //   SpecializedChunk
 120   //   SmallChunk
 121   //   MediumChunk
 122   ChunkList _free_chunks[NumberOfFreeLists];
 123 
 124   // Return non-humongous chunk list by its index.
 125   ChunkList* free_chunks(ChunkIndex index);
 126 
 127   // Returns non-humongous chunk list for the given chunk word size.
 128   ChunkList* find_free_chunks_list(size_t word_size);
 129 
 130   //   HumongousChunk
 131   ChunkTreeDictionary _humongous_dictionary;
 132 
 133   // Returns the humongous chunk dictionary.
 134   ChunkTreeDictionary* humongous_dictionary() {
 135     return &_humongous_dictionary;
 136   }
 137 
 138   // Size, in metaspace words, of all chunks managed by this ChunkManager
 139   size_t _free_chunks_total;
 140   // Number of chunks in this ChunkManager
 141   size_t _free_chunks_count;
 142 
 143   // Update counters after a chunk was added or removed removed.
 144   void account_for_added_chunk(const Metachunk* c);
 145   void account_for_removed_chunk(const Metachunk* c);
 146 
 147   // Debug support
 148 
 149   size_t sum_free_chunks();
 150   size_t sum_free_chunks_count();
 151 
 152   void locked_verify_free_chunks_total();
 153   void slow_locked_verify_free_chunks_total() {
 154     if (metaspace_slow_verify) {
 155       locked_verify_free_chunks_total();
 156     }
 157   }
 158   void locked_verify_free_chunks_count();
 159   void slow_locked_verify_free_chunks_count() {
 160     if (metaspace_slow_verify) {
 161       locked_verify_free_chunks_count();
 162     }
 163   }
 164   void verify_free_chunks_count();
 165 
 166  public:
 167 
 168   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 169       : _free_chunks_total(0), _free_chunks_count(0) {
 170     _free_chunks[SpecializedIndex].set_size(specialized_size);
 171     _free_chunks[SmallIndex].set_size(small_size);
 172     _free_chunks[MediumIndex].set_size(medium_size);
 173   }
 174 
 175   // add or delete (return) a chunk to the global freelist.
 176   Metachunk* chunk_freelist_allocate(size_t word_size);
 177 
 178   // Map a size to a list index assuming that there are lists
 179   // for special, small, medium, and humongous chunks.
 180   ChunkIndex list_index(size_t size);
 181 
 182   // Map a given index to the chunk size.
 183   size_t size_by_index(ChunkIndex index);
 184 
 185   // Take a chunk from the ChunkManager. The chunk is expected to be in
 186   // the chunk manager (the freelist if non-humongous, the dictionary if
 187   // humongous).
 188   void remove_chunk(Metachunk* chunk);
 189 
 190   // Return a single chunk of type index to the ChunkManager.
 191   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 192 
 193   // Add the simple linked list of chunks to the freelist of chunks
 194   // of type index.
 195   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 196 
 197   // Total of the space in the free chunks list
 198   size_t free_chunks_total_words();
 199   size_t free_chunks_total_bytes();
 200 
 201   // Number of chunks in the free chunks list
 202   size_t free_chunks_count();
 203 
 204   // Remove from a list by size.  Selects list based on size of chunk.
 205   Metachunk* free_chunks_get(size_t chunk_word_size);
 206 
 207 #define index_bounds_check(index)                                         \
 208   assert(index == SpecializedIndex ||                                     \
 209          index == SmallIndex ||                                           \
 210          index == MediumIndex ||                                          \
 211          index == HumongousIndex, "Bad index: %d", (int) index)
 212 
 213   size_t num_free_chunks(ChunkIndex index) const {
 214     index_bounds_check(index);
 215 
 216     if (index == HumongousIndex) {
 217       return _humongous_dictionary.total_free_blocks();
 218     }
 219 
 220     ssize_t count = _free_chunks[index].count();
 221     return count == -1 ? 0 : (size_t) count;
 222   }
 223 
 224   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 225     index_bounds_check(index);
 226 
 227     size_t word_size = 0;
 228     if (index == HumongousIndex) {
 229       word_size = _humongous_dictionary.total_size();
 230     } else {
 231       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 232       word_size = size_per_chunk_in_words * num_free_chunks(index);
 233     }
 234 
 235     return word_size * BytesPerWord;
 236   }
 237 
 238   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 239     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 240                                          num_free_chunks(SmallIndex),
 241                                          num_free_chunks(MediumIndex),
 242                                          num_free_chunks(HumongousIndex),
 243                                          size_free_chunks_in_bytes(SpecializedIndex),
 244                                          size_free_chunks_in_bytes(SmallIndex),
 245                                          size_free_chunks_in_bytes(MediumIndex),
 246                                          size_free_chunks_in_bytes(HumongousIndex));
 247   }
 248 
 249   // Debug support
 250   void verify();
 251   void slow_verify() {
 252     if (metaspace_slow_verify) {
 253       verify();
 254     }
 255   }
 256   void locked_verify();
 257   void slow_locked_verify() {
 258     if (metaspace_slow_verify) {
 259       locked_verify();
 260     }
 261   }
 262   void verify_free_chunks_total();
 263 
 264   void locked_print_free_chunks(outputStream* st);
 265   void locked_print_sum_free_chunks(outputStream* st);
 266 
 267   void print_on(outputStream* st) const;
 268 };
 269 
 270 class SmallBlocks : public CHeapObj<mtClass> {
 271   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 272   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 273 
 274  private:
 275   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 276 
 277   FreeList<Metablock>& list_at(size_t word_size) {
 278     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 279     return _small_lists[word_size - _small_block_min_size];
 280   }
 281 
 282  public:
 283   SmallBlocks() {
 284     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 285       uint k = i - _small_block_min_size;
 286       _small_lists[k].set_size(i);
 287     }
 288   }
 289 
 290   size_t total_size() const {
 291     size_t result = 0;
 292     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 293       uint k = i - _small_block_min_size;
 294       result = result + _small_lists[k].count() * _small_lists[k].size();
 295     }
 296     return result;
 297   }
 298 
 299   static uint small_block_max_size() { return _small_block_max_size; }
 300   static uint small_block_min_size() { return _small_block_min_size; }
 301 
 302   MetaWord* get_block(size_t word_size) {
 303     if (list_at(word_size).count() > 0) {
 304       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 305       return new_block;
 306     } else {
 307       return NULL;
 308     }
 309   }
 310   void return_block(Metablock* free_chunk, size_t word_size) {
 311     list_at(word_size).return_chunk_at_head(free_chunk, false);
 312     assert(list_at(word_size).count() > 0, "Should have a chunk");
 313   }
 314 
 315   void print_on(outputStream* st) const {
 316     st->print_cr("SmallBlocks:");
 317     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 318       uint k = i - _small_block_min_size;
 319       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 320     }
 321   }
 322 };
 323 
 324 // Used to manage the free list of Metablocks (a block corresponds
 325 // to the allocation of a quantum of metadata).
 326 class BlockFreelist : public CHeapObj<mtClass> {
 327   BlockTreeDictionary* const _dictionary;
 328   SmallBlocks* _small_blocks;
 329 
 330   // Only allocate and split from freelist if the size of the allocation
 331   // is at least 1/4th the size of the available block.
 332   const static int WasteMultiplier = 4;
 333 
 334   // Accessors
 335   BlockTreeDictionary* dictionary() const { return _dictionary; }
 336   SmallBlocks* small_blocks() {
 337     if (_small_blocks == NULL) {
 338       _small_blocks = new SmallBlocks();
 339     }
 340     return _small_blocks;
 341   }
 342 
 343  public:
 344   BlockFreelist();
 345   ~BlockFreelist();
 346 
 347   // Get and return a block to the free list
 348   MetaWord* get_block(size_t word_size);
 349   void return_block(MetaWord* p, size_t word_size);
 350 
 351   size_t total_size() const  {
 352     size_t result = dictionary()->total_size();
 353     if (_small_blocks != NULL) {
 354       result = result + _small_blocks->total_size();
 355     }
 356     return result;
 357   }
 358 
 359   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 360   void print_on(outputStream* st) const;
 361 };
 362 
 363 // A VirtualSpaceList node.
 364 class VirtualSpaceNode : public CHeapObj<mtClass> {
 365   friend class VirtualSpaceList;
 366 
 367   // Link to next VirtualSpaceNode
 368   VirtualSpaceNode* _next;
 369 
 370   // total in the VirtualSpace
 371   MemRegion _reserved;
 372   ReservedSpace _rs;
 373   VirtualSpace _virtual_space;
 374   MetaWord* _top;
 375   // count of chunks contained in this VirtualSpace
 376   uintx _container_count;
 377 
 378   // Convenience functions to access the _virtual_space
 379   char* low()  const { return virtual_space()->low(); }
 380   char* high() const { return virtual_space()->high(); }
 381 
 382   // The first Metachunk will be allocated at the bottom of the
 383   // VirtualSpace
 384   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 385 
 386   // Committed but unused space in the virtual space
 387   size_t free_words_in_vs() const;
 388  public:
 389 
 390   VirtualSpaceNode(size_t byte_size);
 391   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 392   ~VirtualSpaceNode();
 393 
 394   // Convenience functions for logical bottom and end
 395   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 396   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 397 
 398   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 399 
 400   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 401   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 402 
 403   bool is_pre_committed() const { return _virtual_space.special(); }
 404 
 405   // address of next available space in _virtual_space;
 406   // Accessors
 407   VirtualSpaceNode* next() { return _next; }
 408   void set_next(VirtualSpaceNode* v) { _next = v; }
 409 
 410   void set_reserved(MemRegion const v) { _reserved = v; }
 411   void set_top(MetaWord* v) { _top = v; }
 412 
 413   // Accessors
 414   MemRegion* reserved() { return &_reserved; }
 415   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 416 
 417   // Returns true if "word_size" is available in the VirtualSpace
 418   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 419 
 420   MetaWord* top() const { return _top; }
 421   void inc_top(size_t word_size) { _top += word_size; }
 422 
 423   uintx container_count() { return _container_count; }
 424   void inc_container_count();
 425   void dec_container_count();
 426 #ifdef ASSERT
 427   uintx container_count_slow();
 428   void verify_container_count();
 429 #endif
 430 
 431   // used and capacity in this single entry in the list
 432   size_t used_words_in_vs() const;
 433   size_t capacity_words_in_vs() const;
 434 
 435   bool initialize();
 436 
 437   // get space from the virtual space
 438   Metachunk* take_from_committed(size_t chunk_word_size);
 439 
 440   // Allocate a chunk from the virtual space and return it.
 441   Metachunk* get_chunk_vs(size_t chunk_word_size);
 442 
 443   // Expands/shrinks the committed space in a virtual space.  Delegates
 444   // to Virtualspace
 445   bool expand_by(size_t min_words, size_t preferred_words);
 446 
 447   // In preparation for deleting this node, remove all the chunks
 448   // in the node from any freelist.
 449   void purge(ChunkManager* chunk_manager);
 450 
 451   // If an allocation doesn't fit in the current node a new node is created.
 452   // Allocate chunks out of the remaining committed space in this node
 453   // to avoid wasting that memory.
 454   // This always adds up because all the chunk sizes are multiples of
 455   // the smallest chunk size.
 456   void retire(ChunkManager* chunk_manager);
 457 
 458 #ifdef ASSERT
 459   // Debug support
 460   void mangle();
 461 #endif
 462 
 463   void print_on(outputStream* st) const;
 464 };
 465 
 466 #define assert_is_aligned(value, alignment)                  \
 467   assert(is_aligned((value), (alignment)),                   \
 468          SIZE_FORMAT_HEX " is not aligned to "               \
 469          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 470 
 471 // Decide if large pages should be committed when the memory is reserved.
 472 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 473   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 474     size_t words = bytes / BytesPerWord;
 475     bool is_class = false; // We never reserve large pages for the class space.
 476     if (MetaspaceGC::can_expand(words, is_class) &&
 477         MetaspaceGC::allowed_expansion() >= words) {
 478       return true;
 479     }
 480   }
 481 
 482   return false;
 483 }
 484 
 485   // byte_size is the size of the associated virtualspace.
 486 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 487   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 488 
 489 #if INCLUDE_CDS
 490   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 491   // configurable address, generally at the top of the Java heap so other
 492   // memory addresses don't conflict.
 493   if (DumpSharedSpaces) {
 494     bool large_pages = false; // No large pages when dumping the CDS archive.
 495     char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 496 
 497     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 498     if (_rs.is_reserved()) {
 499       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 500     } else {
 501       // Get a mmap region anywhere if the SharedBaseAddress fails.
 502       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 503     }
 504     if (!_rs.is_reserved()) {
 505       vm_exit_during_initialization("Unable to allocate memory for shared space",
 506         err_msg(SIZE_FORMAT " bytes.", bytes));
 507     }
 508     MetaspaceShared::initialize_shared_rs(&_rs);
 509   } else
 510 #endif
 511   {
 512     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 513 
 514     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 515   }
 516 
 517   if (_rs.is_reserved()) {
 518     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 519     assert(_rs.size() != 0, "Catch if we get a 0 size");
 520     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 521     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 522 
 523     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 524   }
 525 }
 526 
 527 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 528   Metachunk* chunk = first_chunk();
 529   Metachunk* invalid_chunk = (Metachunk*) top();
 530   while (chunk < invalid_chunk ) {
 531     assert(chunk->is_tagged_free(), "Should be tagged free");
 532     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 533     chunk_manager->remove_chunk(chunk);
 534     assert(chunk->next() == NULL &&
 535            chunk->prev() == NULL,
 536            "Was not removed from its list");
 537     chunk = (Metachunk*) next;
 538   }
 539 }
 540 
 541 #ifdef ASSERT
 542 uintx VirtualSpaceNode::container_count_slow() {
 543   uintx count = 0;
 544   Metachunk* chunk = first_chunk();
 545   Metachunk* invalid_chunk = (Metachunk*) top();
 546   while (chunk < invalid_chunk ) {
 547     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 548     // Don't count the chunks on the free lists.  Those are
 549     // still part of the VirtualSpaceNode but not currently
 550     // counted.
 551     if (!chunk->is_tagged_free()) {
 552       count++;
 553     }
 554     chunk = (Metachunk*) next;
 555   }
 556   return count;
 557 }
 558 #endif
 559 
 560 // List of VirtualSpaces for metadata allocation.
 561 class VirtualSpaceList : public CHeapObj<mtClass> {
 562   friend class VirtualSpaceNode;
 563 
 564   enum VirtualSpaceSizes {
 565     VirtualSpaceSize = 256 * K
 566   };
 567 
 568   // Head of the list
 569   VirtualSpaceNode* _virtual_space_list;
 570   // virtual space currently being used for allocations
 571   VirtualSpaceNode* _current_virtual_space;
 572 
 573   // Is this VirtualSpaceList used for the compressed class space
 574   bool _is_class;
 575 
 576   // Sum of reserved and committed memory in the virtual spaces
 577   size_t _reserved_words;
 578   size_t _committed_words;
 579 
 580   // Number of virtual spaces
 581   size_t _virtual_space_count;
 582 
 583   ~VirtualSpaceList();
 584 
 585   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 586 
 587   void set_virtual_space_list(VirtualSpaceNode* v) {
 588     _virtual_space_list = v;
 589   }
 590   void set_current_virtual_space(VirtualSpaceNode* v) {
 591     _current_virtual_space = v;
 592   }
 593 
 594   void link_vs(VirtualSpaceNode* new_entry);
 595 
 596   // Get another virtual space and add it to the list.  This
 597   // is typically prompted by a failed attempt to allocate a chunk
 598   // and is typically followed by the allocation of a chunk.
 599   bool create_new_virtual_space(size_t vs_word_size);
 600 
 601   // Chunk up the unused committed space in the current
 602   // virtual space and add the chunks to the free list.
 603   void retire_current_virtual_space();
 604 
 605  public:
 606   VirtualSpaceList(size_t word_size);
 607   VirtualSpaceList(ReservedSpace rs);
 608 
 609   size_t free_bytes();
 610 
 611   Metachunk* get_new_chunk(size_t chunk_word_size,
 612                            size_t suggested_commit_granularity);
 613 
 614   bool expand_node_by(VirtualSpaceNode* node,
 615                       size_t min_words,
 616                       size_t preferred_words);
 617 
 618   bool expand_by(size_t min_words,
 619                  size_t preferred_words);
 620 
 621   VirtualSpaceNode* current_virtual_space() {
 622     return _current_virtual_space;
 623   }
 624 
 625   bool is_class() const { return _is_class; }
 626 
 627   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 628 
 629   size_t reserved_words()  { return _reserved_words; }
 630   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 631   size_t committed_words() { return _committed_words; }
 632   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 633 
 634   void inc_reserved_words(size_t v);
 635   void dec_reserved_words(size_t v);
 636   void inc_committed_words(size_t v);
 637   void dec_committed_words(size_t v);
 638   void inc_virtual_space_count();
 639   void dec_virtual_space_count();
 640 
 641   bool contains(const void* ptr);
 642 
 643   // Unlink empty VirtualSpaceNodes and free it.
 644   void purge(ChunkManager* chunk_manager);
 645 
 646   void print_on(outputStream* st) const;
 647 
 648   class VirtualSpaceListIterator : public StackObj {
 649     VirtualSpaceNode* _virtual_spaces;
 650    public:
 651     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 652       _virtual_spaces(virtual_spaces) {}
 653 
 654     bool repeat() {
 655       return _virtual_spaces != NULL;
 656     }
 657 
 658     VirtualSpaceNode* get_next() {
 659       VirtualSpaceNode* result = _virtual_spaces;
 660       if (_virtual_spaces != NULL) {
 661         _virtual_spaces = _virtual_spaces->next();
 662       }
 663       return result;
 664     }
 665   };
 666 };
 667 
 668 class Metadebug : AllStatic {
 669   // Debugging support for Metaspaces
 670   static int _allocation_fail_alot_count;
 671 
 672  public:
 673 
 674   static void init_allocation_fail_alot_count();
 675 #ifdef ASSERT
 676   static bool test_metadata_failure();
 677 #endif
 678 };
 679 
 680 int Metadebug::_allocation_fail_alot_count = 0;
 681 
 682 //  SpaceManager - used by Metaspace to handle allocations
 683 class SpaceManager : public CHeapObj<mtClass> {
 684   friend class Metaspace;
 685   friend class Metadebug;
 686 
 687  private:
 688 
 689   // protects allocations
 690   Mutex* const _lock;
 691 
 692   // Type of metadata allocated.
 693   Metaspace::MetadataType _mdtype;
 694 
 695   // List of chunks in use by this SpaceManager.  Allocations
 696   // are done from the current chunk.  The list is used for deallocating
 697   // chunks when the SpaceManager is freed.
 698   Metachunk* _chunks_in_use[NumberOfInUseLists];
 699   Metachunk* _current_chunk;
 700 
 701   // Maximum number of small chunks to allocate to a SpaceManager
 702   static uint const _small_chunk_limit;
 703 
 704   // Sum of all space in allocated chunks
 705   size_t _allocated_blocks_words;
 706 
 707   // Sum of all allocated chunks
 708   size_t _allocated_chunks_words;
 709   size_t _allocated_chunks_count;
 710 
 711   // Free lists of blocks are per SpaceManager since they
 712   // are assumed to be in chunks in use by the SpaceManager
 713   // and all chunks in use by a SpaceManager are freed when
 714   // the class loader using the SpaceManager is collected.
 715   BlockFreelist* _block_freelists;
 716 
 717   // protects virtualspace and chunk expansions
 718   static const char*  _expand_lock_name;
 719   static const int    _expand_lock_rank;
 720   static Mutex* const _expand_lock;
 721 
 722  private:
 723   // Accessors
 724   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 725   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 726     _chunks_in_use[index] = v;
 727   }
 728 
 729   BlockFreelist* block_freelists() const { return _block_freelists; }
 730 
 731   Metaspace::MetadataType mdtype() { return _mdtype; }
 732 
 733   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 734   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 735 
 736   Metachunk* current_chunk() const { return _current_chunk; }
 737   void set_current_chunk(Metachunk* v) {
 738     _current_chunk = v;
 739   }
 740 
 741   Metachunk* find_current_chunk(size_t word_size);
 742 
 743   // Add chunk to the list of chunks in use
 744   void add_chunk(Metachunk* v, bool make_current);
 745   void retire_current_chunk();
 746 
 747   Mutex* lock() const { return _lock; }
 748 
 749  protected:
 750   void initialize();
 751 
 752  public:
 753   SpaceManager(Metaspace::MetadataType mdtype,
 754                Mutex* lock);
 755   ~SpaceManager();
 756 
 757   enum ChunkMultiples {
 758     MediumChunkMultiple = 4
 759   };
 760 
 761   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 762   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 763   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 764 
 765   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 766 
 767   // Accessors
 768   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 769 
 770   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 771   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 772   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 773 
 774   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 775 
 776   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 777 
 778   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 779   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 780   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 781   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 782   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 783 
 784   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 785 
 786   static Mutex* expand_lock() { return _expand_lock; }
 787 
 788   // Increment the per Metaspace and global running sums for Metachunks
 789   // by the given size.  This is used when a Metachunk to added to
 790   // the in-use list.
 791   void inc_size_metrics(size_t words);
 792   // Increment the per Metaspace and global running sums Metablocks by the given
 793   // size.  This is used when a Metablock is allocated.
 794   void inc_used_metrics(size_t words);
 795   // Delete the portion of the running sums for this SpaceManager. That is,
 796   // the globals running sums for the Metachunks and Metablocks are
 797   // decremented for all the Metachunks in-use by this SpaceManager.
 798   void dec_total_from_size_metrics();
 799 
 800   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 801   // or return the unadjusted size if the requested size is humongous.
 802   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 803   size_t adjust_initial_chunk_size(size_t requested) const;
 804 
 805   // Get the initial chunks size for this metaspace type.
 806   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 807 
 808   size_t sum_capacity_in_chunks_in_use() const;
 809   size_t sum_used_in_chunks_in_use() const;
 810   size_t sum_free_in_chunks_in_use() const;
 811   size_t sum_waste_in_chunks_in_use() const;
 812   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 813 
 814   size_t sum_count_in_chunks_in_use();
 815   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 816 
 817   Metachunk* get_new_chunk(size_t chunk_word_size);
 818 
 819   // Block allocation and deallocation.
 820   // Allocates a block from the current chunk
 821   MetaWord* allocate(size_t word_size);
 822   // Allocates a block from a small chunk
 823   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 824 
 825   // Helper for allocations
 826   MetaWord* allocate_work(size_t word_size);
 827 
 828   // Returns a block to the per manager freelist
 829   void deallocate(MetaWord* p, size_t word_size);
 830 
 831   // Based on the allocation size and a minimum chunk size,
 832   // returned chunk size (for expanding space for chunk allocation).
 833   size_t calc_chunk_size(size_t allocation_word_size);
 834 
 835   // Called when an allocation from the current chunk fails.
 836   // Gets a new chunk (may require getting a new virtual space),
 837   // and allocates from that chunk.
 838   MetaWord* grow_and_allocate(size_t word_size);
 839 
 840   // Notify memory usage to MemoryService.
 841   void track_metaspace_memory_usage();
 842 
 843   // debugging support.
 844 
 845   void dump(outputStream* const out) const;
 846   void print_on(outputStream* st) const;
 847   void locked_print_chunks_in_use_on(outputStream* st) const;
 848 
 849   void verify();
 850   void verify_chunk_size(Metachunk* chunk);
 851 #ifdef ASSERT
 852   void verify_allocated_blocks_words();
 853 #endif
 854 
 855   // This adjusts the size given to be greater than the minimum allocation size in
 856   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 857   size_t get_allocation_word_size(size_t word_size) {
 858     size_t byte_size = word_size * BytesPerWord;
 859 
 860     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 861     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 862 
 863     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 864     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 865 
 866     return raw_word_size;
 867   }
 868 };
 869 
 870 uint const SpaceManager::_small_chunk_limit = 4;
 871 
 872 const char* SpaceManager::_expand_lock_name =
 873   "SpaceManager chunk allocation lock";
 874 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 875 Mutex* const SpaceManager::_expand_lock =
 876   new Mutex(SpaceManager::_expand_lock_rank,
 877             SpaceManager::_expand_lock_name,
 878             Mutex::_allow_vm_block_flag,
 879             Monitor::_safepoint_check_never);
 880 
 881 void VirtualSpaceNode::inc_container_count() {
 882   assert_lock_strong(SpaceManager::expand_lock());
 883   _container_count++;
 884 }
 885 
 886 void VirtualSpaceNode::dec_container_count() {
 887   assert_lock_strong(SpaceManager::expand_lock());
 888   _container_count--;
 889 }
 890 
 891 #ifdef ASSERT
 892 void VirtualSpaceNode::verify_container_count() {
 893   assert(_container_count == container_count_slow(),
 894          "Inconsistency in container_count _container_count " UINTX_FORMAT
 895          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 896 }
 897 #endif
 898 
 899 // BlockFreelist methods
 900 
 901 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 902 
 903 BlockFreelist::~BlockFreelist() {
 904   delete _dictionary;
 905   if (_small_blocks != NULL) {
 906     delete _small_blocks;
 907   }
 908 }
 909 
 910 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 911   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
 912 
 913   Metablock* free_chunk = ::new (p) Metablock(word_size);
 914   if (word_size < SmallBlocks::small_block_max_size()) {
 915     small_blocks()->return_block(free_chunk, word_size);
 916   } else {
 917   dictionary()->return_chunk(free_chunk);
 918 }
 919   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
 920             SIZE_FORMAT, p2i(free_chunk), word_size);
 921 }
 922 
 923 MetaWord* BlockFreelist::get_block(size_t word_size) {
 924   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
 925 
 926   // Try small_blocks first.
 927   if (word_size < SmallBlocks::small_block_max_size()) {
 928     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
 929     // this space manager.
 930     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
 931     if (new_block != NULL) {
 932       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 933               p2i(new_block), word_size);
 934       return new_block;
 935     }
 936   }
 937 
 938   if (word_size < BlockFreelist::min_dictionary_size()) {
 939     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
 940     return NULL;
 941   }
 942 
 943   Metablock* free_block = dictionary()->get_chunk(word_size);
 944   if (free_block == NULL) {
 945     return NULL;
 946   }
 947 
 948   const size_t block_size = free_block->size();
 949   if (block_size > WasteMultiplier * word_size) {
 950     return_block((MetaWord*)free_block, block_size);
 951     return NULL;
 952   }
 953 
 954   MetaWord* new_block = (MetaWord*)free_block;
 955   assert(block_size >= word_size, "Incorrect size of block from freelist");
 956   const size_t unused = block_size - word_size;
 957   if (unused >= SmallBlocks::small_block_min_size()) {
 958     return_block(new_block + word_size, unused);
 959   }
 960 
 961   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 962             p2i(new_block), word_size);
 963   return new_block;
 964 }
 965 
 966 void BlockFreelist::print_on(outputStream* st) const {
 967   dictionary()->print_free_lists(st);
 968   if (_small_blocks != NULL) {
 969     _small_blocks->print_on(st);
 970   }
 971 }
 972 
 973 // VirtualSpaceNode methods
 974 
 975 VirtualSpaceNode::~VirtualSpaceNode() {
 976   _rs.release();
 977 #ifdef ASSERT
 978   size_t word_size = sizeof(*this) / BytesPerWord;
 979   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 980 #endif
 981 }
 982 
 983 size_t VirtualSpaceNode::used_words_in_vs() const {
 984   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 985 }
 986 
 987 // Space committed in the VirtualSpace
 988 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 989   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 990 }
 991 
 992 size_t VirtualSpaceNode::free_words_in_vs() const {
 993   return pointer_delta(end(), top(), sizeof(MetaWord));
 994 }
 995 
 996 // Allocates the chunk from the virtual space only.
 997 // This interface is also used internally for debugging.  Not all
 998 // chunks removed here are necessarily used for allocation.
 999 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1000   // Bottom of the new chunk
1001   MetaWord* chunk_limit = top();
1002   assert(chunk_limit != NULL, "Not safe to call this method");
1003 
1004   // The virtual spaces are always expanded by the
1005   // commit granularity to enforce the following condition.
1006   // Without this the is_available check will not work correctly.
1007   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1008       "The committed memory doesn't match the expanded memory.");
1009 
1010   if (!is_available(chunk_word_size)) {
1011     Log(gc, metaspace, freelist) log;
1012     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1013     // Dump some information about the virtual space that is nearly full
1014     ResourceMark rm;
1015     print_on(log.debug_stream());
1016     return NULL;
1017   }
1018 
1019   // Take the space  (bump top on the current virtual space).
1020   inc_top(chunk_word_size);
1021 
1022   // Initialize the chunk
1023   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1024   return result;
1025 }
1026 
1027 
1028 // Expand the virtual space (commit more of the reserved space)
1029 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1030   size_t min_bytes = min_words * BytesPerWord;
1031   size_t preferred_bytes = preferred_words * BytesPerWord;
1032 
1033   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1034 
1035   if (uncommitted < min_bytes) {
1036     return false;
1037   }
1038 
1039   size_t commit = MIN2(preferred_bytes, uncommitted);
1040   bool result = virtual_space()->expand_by(commit, false);
1041 
1042   assert(result, "Failed to commit memory");
1043 
1044   return result;
1045 }
1046 
1047 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1048   assert_lock_strong(SpaceManager::expand_lock());
1049   Metachunk* result = take_from_committed(chunk_word_size);
1050   if (result != NULL) {
1051     inc_container_count();
1052   }
1053   return result;
1054 }
1055 
1056 bool VirtualSpaceNode::initialize() {
1057 
1058   if (!_rs.is_reserved()) {
1059     return false;
1060   }
1061 
1062   // These are necessary restriction to make sure that the virtual space always
1063   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1064   // aligned only the middle alignment of the VirtualSpace is used.
1065   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1066   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1067 
1068   // ReservedSpaces marked as special will have the entire memory
1069   // pre-committed. Setting a committed size will make sure that
1070   // committed_size and actual_committed_size agrees.
1071   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1072 
1073   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1074                                             Metaspace::commit_alignment());
1075   if (result) {
1076     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1077         "Checking that the pre-committed memory was registered by the VirtualSpace");
1078 
1079     set_top((MetaWord*)virtual_space()->low());
1080     set_reserved(MemRegion((HeapWord*)_rs.base(),
1081                  (HeapWord*)(_rs.base() + _rs.size())));
1082 
1083     assert(reserved()->start() == (HeapWord*) _rs.base(),
1084            "Reserved start was not set properly " PTR_FORMAT
1085            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1086     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1087            "Reserved size was not set properly " SIZE_FORMAT
1088            " != " SIZE_FORMAT, reserved()->word_size(),
1089            _rs.size() / BytesPerWord);
1090   }
1091 
1092   return result;
1093 }
1094 
1095 void VirtualSpaceNode::print_on(outputStream* st) const {
1096   size_t used = used_words_in_vs();
1097   size_t capacity = capacity_words_in_vs();
1098   VirtualSpace* vs = virtual_space();
1099   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1100            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1101            PTR_FORMAT ", " PTR_FORMAT ")",
1102            p2i(vs), capacity / K,
1103            capacity == 0 ? 0 : used * 100 / capacity,
1104            p2i(bottom()), p2i(top()), p2i(end()),
1105            p2i(vs->high_boundary()));
1106 }
1107 
1108 #ifdef ASSERT
1109 void VirtualSpaceNode::mangle() {
1110   size_t word_size = capacity_words_in_vs();
1111   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1112 }
1113 #endif // ASSERT
1114 
1115 // VirtualSpaceList methods
1116 // Space allocated from the VirtualSpace
1117 
1118 VirtualSpaceList::~VirtualSpaceList() {
1119   VirtualSpaceListIterator iter(virtual_space_list());
1120   while (iter.repeat()) {
1121     VirtualSpaceNode* vsl = iter.get_next();
1122     delete vsl;
1123   }
1124 }
1125 
1126 void VirtualSpaceList::inc_reserved_words(size_t v) {
1127   assert_lock_strong(SpaceManager::expand_lock());
1128   _reserved_words = _reserved_words + v;
1129 }
1130 void VirtualSpaceList::dec_reserved_words(size_t v) {
1131   assert_lock_strong(SpaceManager::expand_lock());
1132   _reserved_words = _reserved_words - v;
1133 }
1134 
1135 #define assert_committed_below_limit()                        \
1136   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1137          "Too much committed memory. Committed: " SIZE_FORMAT \
1138          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1139          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1140 
1141 void VirtualSpaceList::inc_committed_words(size_t v) {
1142   assert_lock_strong(SpaceManager::expand_lock());
1143   _committed_words = _committed_words + v;
1144 
1145   assert_committed_below_limit();
1146 }
1147 void VirtualSpaceList::dec_committed_words(size_t v) {
1148   assert_lock_strong(SpaceManager::expand_lock());
1149   _committed_words = _committed_words - v;
1150 
1151   assert_committed_below_limit();
1152 }
1153 
1154 void VirtualSpaceList::inc_virtual_space_count() {
1155   assert_lock_strong(SpaceManager::expand_lock());
1156   _virtual_space_count++;
1157 }
1158 void VirtualSpaceList::dec_virtual_space_count() {
1159   assert_lock_strong(SpaceManager::expand_lock());
1160   _virtual_space_count--;
1161 }
1162 
1163 void ChunkManager::remove_chunk(Metachunk* chunk) {
1164   size_t word_size = chunk->word_size();
1165   ChunkIndex index = list_index(word_size);
1166   if (index != HumongousIndex) {
1167     free_chunks(index)->remove_chunk(chunk);
1168   } else {
1169     humongous_dictionary()->remove_chunk(chunk);
1170   }
1171 
1172   // Chunk has been removed from the chunks free list, update counters.
1173   account_for_removed_chunk(chunk);
1174 }
1175 
1176 // Walk the list of VirtualSpaceNodes and delete
1177 // nodes with a 0 container_count.  Remove Metachunks in
1178 // the node from their respective freelists.
1179 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1180   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1181   assert_lock_strong(SpaceManager::expand_lock());
1182   // Don't use a VirtualSpaceListIterator because this
1183   // list is being changed and a straightforward use of an iterator is not safe.
1184   VirtualSpaceNode* purged_vsl = NULL;
1185   VirtualSpaceNode* prev_vsl = virtual_space_list();
1186   VirtualSpaceNode* next_vsl = prev_vsl;
1187   while (next_vsl != NULL) {
1188     VirtualSpaceNode* vsl = next_vsl;
1189     DEBUG_ONLY(vsl->verify_container_count();)
1190     next_vsl = vsl->next();
1191     // Don't free the current virtual space since it will likely
1192     // be needed soon.
1193     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1194       // Unlink it from the list
1195       if (prev_vsl == vsl) {
1196         // This is the case of the current node being the first node.
1197         assert(vsl == virtual_space_list(), "Expected to be the first node");
1198         set_virtual_space_list(vsl->next());
1199       } else {
1200         prev_vsl->set_next(vsl->next());
1201       }
1202 
1203       vsl->purge(chunk_manager);
1204       dec_reserved_words(vsl->reserved_words());
1205       dec_committed_words(vsl->committed_words());
1206       dec_virtual_space_count();
1207       purged_vsl = vsl;
1208       delete vsl;
1209     } else {
1210       prev_vsl = vsl;
1211     }
1212   }
1213 #ifdef ASSERT
1214   if (purged_vsl != NULL) {
1215     // List should be stable enough to use an iterator here.
1216     VirtualSpaceListIterator iter(virtual_space_list());
1217     while (iter.repeat()) {
1218       VirtualSpaceNode* vsl = iter.get_next();
1219       assert(vsl != purged_vsl, "Purge of vsl failed");
1220     }
1221   }
1222 #endif
1223 }
1224 
1225 
1226 // This function looks at the mmap regions in the metaspace without locking.
1227 // The chunks are added with store ordering and not deleted except for at
1228 // unloading time during a safepoint.
1229 bool VirtualSpaceList::contains(const void* ptr) {
1230   // List should be stable enough to use an iterator here because removing virtual
1231   // space nodes is only allowed at a safepoint.
1232   VirtualSpaceListIterator iter(virtual_space_list());
1233   while (iter.repeat()) {
1234     VirtualSpaceNode* vsn = iter.get_next();
1235     if (vsn->contains(ptr)) {
1236       return true;
1237     }
1238   }
1239   return false;
1240 }
1241 
1242 void VirtualSpaceList::retire_current_virtual_space() {
1243   assert_lock_strong(SpaceManager::expand_lock());
1244 
1245   VirtualSpaceNode* vsn = current_virtual_space();
1246 
1247   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1248                                   Metaspace::chunk_manager_metadata();
1249 
1250   vsn->retire(cm);
1251 }
1252 
1253 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1254   DEBUG_ONLY(verify_container_count();)
1255   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1256     ChunkIndex index = (ChunkIndex)i;
1257     size_t chunk_size = chunk_manager->size_by_index(index);
1258 
1259     while (free_words_in_vs() >= chunk_size) {
1260       Metachunk* chunk = get_chunk_vs(chunk_size);
1261       assert(chunk != NULL, "allocation should have been successful");
1262 
1263       chunk_manager->return_single_chunk(index, chunk);
1264     }
1265     DEBUG_ONLY(verify_container_count();)
1266   }
1267   assert(free_words_in_vs() == 0, "should be empty now");
1268 }
1269 
1270 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1271                                    _is_class(false),
1272                                    _virtual_space_list(NULL),
1273                                    _current_virtual_space(NULL),
1274                                    _reserved_words(0),
1275                                    _committed_words(0),
1276                                    _virtual_space_count(0) {
1277   MutexLockerEx cl(SpaceManager::expand_lock(),
1278                    Mutex::_no_safepoint_check_flag);
1279   create_new_virtual_space(word_size);
1280 }
1281 
1282 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1283                                    _is_class(true),
1284                                    _virtual_space_list(NULL),
1285                                    _current_virtual_space(NULL),
1286                                    _reserved_words(0),
1287                                    _committed_words(0),
1288                                    _virtual_space_count(0) {
1289   MutexLockerEx cl(SpaceManager::expand_lock(),
1290                    Mutex::_no_safepoint_check_flag);
1291   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1292   bool succeeded = class_entry->initialize();
1293   if (succeeded) {
1294     link_vs(class_entry);
1295   }
1296 }
1297 
1298 size_t VirtualSpaceList::free_bytes() {
1299   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1300 }
1301 
1302 // Allocate another meta virtual space and add it to the list.
1303 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1304   assert_lock_strong(SpaceManager::expand_lock());
1305 
1306   if (is_class()) {
1307     assert(false, "We currently don't support more than one VirtualSpace for"
1308                   " the compressed class space. The initialization of the"
1309                   " CCS uses another code path and should not hit this path.");
1310     return false;
1311   }
1312 
1313   if (vs_word_size == 0) {
1314     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1315     return false;
1316   }
1317 
1318   // Reserve the space
1319   size_t vs_byte_size = vs_word_size * BytesPerWord;
1320   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1321 
1322   // Allocate the meta virtual space and initialize it.
1323   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1324   if (!new_entry->initialize()) {
1325     delete new_entry;
1326     return false;
1327   } else {
1328     assert(new_entry->reserved_words() == vs_word_size,
1329         "Reserved memory size differs from requested memory size");
1330     // ensure lock-free iteration sees fully initialized node
1331     OrderAccess::storestore();
1332     link_vs(new_entry);
1333     return true;
1334   }
1335 }
1336 
1337 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1338   if (virtual_space_list() == NULL) {
1339       set_virtual_space_list(new_entry);
1340   } else {
1341     current_virtual_space()->set_next(new_entry);
1342   }
1343   set_current_virtual_space(new_entry);
1344   inc_reserved_words(new_entry->reserved_words());
1345   inc_committed_words(new_entry->committed_words());
1346   inc_virtual_space_count();
1347 #ifdef ASSERT
1348   new_entry->mangle();
1349 #endif
1350   if (log_is_enabled(Trace, gc, metaspace)) {
1351     Log(gc, metaspace) log;
1352     VirtualSpaceNode* vsl = current_virtual_space();
1353     ResourceMark rm;
1354     vsl->print_on(log.trace_stream());
1355   }
1356 }
1357 
1358 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1359                                       size_t min_words,
1360                                       size_t preferred_words) {
1361   size_t before = node->committed_words();
1362 
1363   bool result = node->expand_by(min_words, preferred_words);
1364 
1365   size_t after = node->committed_words();
1366 
1367   // after and before can be the same if the memory was pre-committed.
1368   assert(after >= before, "Inconsistency");
1369   inc_committed_words(after - before);
1370 
1371   return result;
1372 }
1373 
1374 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1375   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1376   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1377   assert(min_words <= preferred_words, "Invalid arguments");
1378 
1379   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1380     return  false;
1381   }
1382 
1383   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1384   if (allowed_expansion_words < min_words) {
1385     return false;
1386   }
1387 
1388   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1389 
1390   // Commit more memory from the the current virtual space.
1391   bool vs_expanded = expand_node_by(current_virtual_space(),
1392                                     min_words,
1393                                     max_expansion_words);
1394   if (vs_expanded) {
1395     return true;
1396   }
1397   retire_current_virtual_space();
1398 
1399   // Get another virtual space.
1400   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1401   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1402 
1403   if (create_new_virtual_space(grow_vs_words)) {
1404     if (current_virtual_space()->is_pre_committed()) {
1405       // The memory was pre-committed, so we are done here.
1406       assert(min_words <= current_virtual_space()->committed_words(),
1407           "The new VirtualSpace was pre-committed, so it"
1408           "should be large enough to fit the alloc request.");
1409       return true;
1410     }
1411 
1412     return expand_node_by(current_virtual_space(),
1413                           min_words,
1414                           max_expansion_words);
1415   }
1416 
1417   return false;
1418 }
1419 
1420 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1421 
1422   // Allocate a chunk out of the current virtual space.
1423   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1424 
1425   if (next != NULL) {
1426     return next;
1427   }
1428 
1429   // The expand amount is currently only determined by the requested sizes
1430   // and not how much committed memory is left in the current virtual space.
1431 
1432   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1433   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1434   if (min_word_size >= preferred_word_size) {
1435     // Can happen when humongous chunks are allocated.
1436     preferred_word_size = min_word_size;
1437   }
1438 
1439   bool expanded = expand_by(min_word_size, preferred_word_size);
1440   if (expanded) {
1441     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1442     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1443   }
1444 
1445    return next;
1446 }
1447 
1448 void VirtualSpaceList::print_on(outputStream* st) const {
1449   VirtualSpaceListIterator iter(virtual_space_list());
1450   while (iter.repeat()) {
1451     VirtualSpaceNode* node = iter.get_next();
1452     node->print_on(st);
1453   }
1454 }
1455 
1456 // MetaspaceGC methods
1457 
1458 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1459 // Within the VM operation after the GC the attempt to allocate the metadata
1460 // should succeed.  If the GC did not free enough space for the metaspace
1461 // allocation, the HWM is increased so that another virtualspace will be
1462 // allocated for the metadata.  With perm gen the increase in the perm
1463 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1464 // metaspace policy uses those as the small and large steps for the HWM.
1465 //
1466 // After the GC the compute_new_size() for MetaspaceGC is called to
1467 // resize the capacity of the metaspaces.  The current implementation
1468 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1469 // to resize the Java heap by some GC's.  New flags can be implemented
1470 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1471 // free space is desirable in the metaspace capacity to decide how much
1472 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1473 // free space is desirable in the metaspace capacity before decreasing
1474 // the HWM.
1475 
1476 // Calculate the amount to increase the high water mark (HWM).
1477 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1478 // another expansion is not requested too soon.  If that is not
1479 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1480 // If that is still not enough, expand by the size of the allocation
1481 // plus some.
1482 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1483   size_t min_delta = MinMetaspaceExpansion;
1484   size_t max_delta = MaxMetaspaceExpansion;
1485   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1486 
1487   if (delta <= min_delta) {
1488     delta = min_delta;
1489   } else if (delta <= max_delta) {
1490     // Don't want to hit the high water mark on the next
1491     // allocation so make the delta greater than just enough
1492     // for this allocation.
1493     delta = max_delta;
1494   } else {
1495     // This allocation is large but the next ones are probably not
1496     // so increase by the minimum.
1497     delta = delta + min_delta;
1498   }
1499 
1500   assert_is_aligned(delta, Metaspace::commit_alignment());
1501 
1502   return delta;
1503 }
1504 
1505 size_t MetaspaceGC::capacity_until_GC() {
1506   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1507   assert(value >= MetaspaceSize, "Not initialized properly?");
1508   return value;
1509 }
1510 
1511 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1512   assert_is_aligned(v, Metaspace::commit_alignment());
1513 
1514   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1515   size_t new_value = capacity_until_GC + v;
1516 
1517   if (new_value < capacity_until_GC) {
1518     // The addition wrapped around, set new_value to aligned max value.
1519     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1520   }
1521 
1522   intptr_t expected = (intptr_t) capacity_until_GC;
1523   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1524 
1525   if (expected != actual) {
1526     return false;
1527   }
1528 
1529   if (new_cap_until_GC != NULL) {
1530     *new_cap_until_GC = new_value;
1531   }
1532   if (old_cap_until_GC != NULL) {
1533     *old_cap_until_GC = capacity_until_GC;
1534   }
1535   return true;
1536 }
1537 
1538 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1539   assert_is_aligned(v, Metaspace::commit_alignment());
1540 
1541   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1542 }
1543 
1544 void MetaspaceGC::initialize() {
1545   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1546   // we can't do a GC during initialization.
1547   _capacity_until_GC = MaxMetaspaceSize;
1548 }
1549 
1550 void MetaspaceGC::post_initialize() {
1551   // Reset the high-water mark once the VM initialization is done.
1552   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1553 }
1554 
1555 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1556   // Check if the compressed class space is full.
1557   if (is_class && Metaspace::using_class_space()) {
1558     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1559     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1560       return false;
1561     }
1562   }
1563 
1564   // Check if the user has imposed a limit on the metaspace memory.
1565   size_t committed_bytes = MetaspaceAux::committed_bytes();
1566   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1567     return false;
1568   }
1569 
1570   return true;
1571 }
1572 
1573 size_t MetaspaceGC::allowed_expansion() {
1574   size_t committed_bytes = MetaspaceAux::committed_bytes();
1575   size_t capacity_until_gc = capacity_until_GC();
1576 
1577   assert(capacity_until_gc >= committed_bytes,
1578          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1579          capacity_until_gc, committed_bytes);
1580 
1581   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1582   size_t left_until_GC = capacity_until_gc - committed_bytes;
1583   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1584 
1585   return left_to_commit / BytesPerWord;
1586 }
1587 
1588 void MetaspaceGC::compute_new_size() {
1589   assert(_shrink_factor <= 100, "invalid shrink factor");
1590   uint current_shrink_factor = _shrink_factor;
1591   _shrink_factor = 0;
1592 
1593   // Using committed_bytes() for used_after_gc is an overestimation, since the
1594   // chunk free lists are included in committed_bytes() and the memory in an
1595   // un-fragmented chunk free list is available for future allocations.
1596   // However, if the chunk free lists becomes fragmented, then the memory may
1597   // not be available for future allocations and the memory is therefore "in use".
1598   // Including the chunk free lists in the definition of "in use" is therefore
1599   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1600   // shrink below committed_bytes() and this has caused serious bugs in the past.
1601   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1602   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1603 
1604   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1605   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1606 
1607   const double min_tmp = used_after_gc / maximum_used_percentage;
1608   size_t minimum_desired_capacity =
1609     (size_t)MIN2(min_tmp, double(max_uintx));
1610   // Don't shrink less than the initial generation size
1611   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1612                                   MetaspaceSize);
1613 
1614   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1615   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1616                            minimum_free_percentage, maximum_used_percentage);
1617   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1618 
1619 
1620   size_t shrink_bytes = 0;
1621   if (capacity_until_GC < minimum_desired_capacity) {
1622     // If we have less capacity below the metaspace HWM, then
1623     // increment the HWM.
1624     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1625     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1626     // Don't expand unless it's significant
1627     if (expand_bytes >= MinMetaspaceExpansion) {
1628       size_t new_capacity_until_GC = 0;
1629       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1630       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1631 
1632       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1633                                                new_capacity_until_GC,
1634                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1635       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1636                                minimum_desired_capacity / (double) K,
1637                                expand_bytes / (double) K,
1638                                MinMetaspaceExpansion / (double) K,
1639                                new_capacity_until_GC / (double) K);
1640     }
1641     return;
1642   }
1643 
1644   // No expansion, now see if we want to shrink
1645   // We would never want to shrink more than this
1646   assert(capacity_until_GC >= minimum_desired_capacity,
1647          SIZE_FORMAT " >= " SIZE_FORMAT,
1648          capacity_until_GC, minimum_desired_capacity);
1649   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1650 
1651   // Should shrinking be considered?
1652   if (MaxMetaspaceFreeRatio < 100) {
1653     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1654     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1655     const double max_tmp = used_after_gc / minimum_used_percentage;
1656     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1657     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1658                                     MetaspaceSize);
1659     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1660                              maximum_free_percentage, minimum_used_percentage);
1661     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1662                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1663 
1664     assert(minimum_desired_capacity <= maximum_desired_capacity,
1665            "sanity check");
1666 
1667     if (capacity_until_GC > maximum_desired_capacity) {
1668       // Capacity too large, compute shrinking size
1669       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1670       // We don't want shrink all the way back to initSize if people call
1671       // System.gc(), because some programs do that between "phases" and then
1672       // we'd just have to grow the heap up again for the next phase.  So we
1673       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1674       // on the third call, and 100% by the fourth call.  But if we recompute
1675       // size without shrinking, it goes back to 0%.
1676       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1677 
1678       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1679 
1680       assert(shrink_bytes <= max_shrink_bytes,
1681              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1682              shrink_bytes, max_shrink_bytes);
1683       if (current_shrink_factor == 0) {
1684         _shrink_factor = 10;
1685       } else {
1686         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1687       }
1688       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1689                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1690       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1691                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1692     }
1693   }
1694 
1695   // Don't shrink unless it's significant
1696   if (shrink_bytes >= MinMetaspaceExpansion &&
1697       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1698     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1699     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1700                                              new_capacity_until_GC,
1701                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1702   }
1703 }
1704 
1705 // Metadebug methods
1706 
1707 void Metadebug::init_allocation_fail_alot_count() {
1708   if (MetadataAllocationFailALot) {
1709     _allocation_fail_alot_count =
1710       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1711   }
1712 }
1713 
1714 #ifdef ASSERT
1715 bool Metadebug::test_metadata_failure() {
1716   if (MetadataAllocationFailALot &&
1717       Threads::is_vm_complete()) {
1718     if (_allocation_fail_alot_count > 0) {
1719       _allocation_fail_alot_count--;
1720     } else {
1721       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1722       init_allocation_fail_alot_count();
1723       return true;
1724     }
1725   }
1726   return false;
1727 }
1728 #endif
1729 
1730 // ChunkManager methods
1731 
1732 size_t ChunkManager::free_chunks_total_words() {
1733   return _free_chunks_total;
1734 }
1735 
1736 size_t ChunkManager::free_chunks_total_bytes() {
1737   return free_chunks_total_words() * BytesPerWord;
1738 }
1739 
1740 // Update internal accounting after a chunk was added
1741 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1742   assert_lock_strong(SpaceManager::expand_lock());
1743   _free_chunks_count ++;
1744   _free_chunks_total += c->word_size();
1745 }
1746 
1747 // Update internal accounting after a chunk was removed
1748 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1749   assert_lock_strong(SpaceManager::expand_lock());
1750   assert(_free_chunks_count >= 1,
1751     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1752   assert(_free_chunks_total >= c->word_size(),
1753     "ChunkManager::_free_chunks_total: about to go negative"
1754      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1755   _free_chunks_count --;
1756   _free_chunks_total -= c->word_size();
1757 }
1758 
1759 size_t ChunkManager::free_chunks_count() {
1760 #ifdef ASSERT
1761   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1762     MutexLockerEx cl(SpaceManager::expand_lock(),
1763                      Mutex::_no_safepoint_check_flag);
1764     // This lock is only needed in debug because the verification
1765     // of the _free_chunks_totals walks the list of free chunks
1766     slow_locked_verify_free_chunks_count();
1767   }
1768 #endif
1769   return _free_chunks_count;
1770 }
1771 
1772 ChunkIndex ChunkManager::list_index(size_t size) {
1773   if (size_by_index(SpecializedIndex) == size) {
1774     return SpecializedIndex;
1775   }
1776   if (size_by_index(SmallIndex) == size) {
1777     return SmallIndex;
1778   }
1779   const size_t med_size = size_by_index(MediumIndex);
1780   if (med_size == size) {
1781     return MediumIndex;
1782   }
1783 
1784   assert(size > med_size, "Not a humongous chunk");
1785   return HumongousIndex;
1786 }
1787 
1788 size_t ChunkManager::size_by_index(ChunkIndex index) {
1789   index_bounds_check(index);
1790   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1791   return free_chunks(index)->size();
1792 }
1793 
1794 void ChunkManager::locked_verify_free_chunks_total() {
1795   assert_lock_strong(SpaceManager::expand_lock());
1796   assert(sum_free_chunks() == _free_chunks_total,
1797          "_free_chunks_total " SIZE_FORMAT " is not the"
1798          " same as sum " SIZE_FORMAT, _free_chunks_total,
1799          sum_free_chunks());
1800 }
1801 
1802 void ChunkManager::verify_free_chunks_total() {
1803   MutexLockerEx cl(SpaceManager::expand_lock(),
1804                      Mutex::_no_safepoint_check_flag);
1805   locked_verify_free_chunks_total();
1806 }
1807 
1808 void ChunkManager::locked_verify_free_chunks_count() {
1809   assert_lock_strong(SpaceManager::expand_lock());
1810   assert(sum_free_chunks_count() == _free_chunks_count,
1811          "_free_chunks_count " SIZE_FORMAT " is not the"
1812          " same as sum " SIZE_FORMAT, _free_chunks_count,
1813          sum_free_chunks_count());
1814 }
1815 
1816 void ChunkManager::verify_free_chunks_count() {
1817 #ifdef ASSERT
1818   MutexLockerEx cl(SpaceManager::expand_lock(),
1819                      Mutex::_no_safepoint_check_flag);
1820   locked_verify_free_chunks_count();
1821 #endif
1822 }
1823 
1824 void ChunkManager::verify() {
1825   MutexLockerEx cl(SpaceManager::expand_lock(),
1826                      Mutex::_no_safepoint_check_flag);
1827   locked_verify();
1828 }
1829 
1830 void ChunkManager::locked_verify() {
1831   locked_verify_free_chunks_count();
1832   locked_verify_free_chunks_total();
1833 }
1834 
1835 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1836   assert_lock_strong(SpaceManager::expand_lock());
1837   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1838                 _free_chunks_total, _free_chunks_count);
1839 }
1840 
1841 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1842   assert_lock_strong(SpaceManager::expand_lock());
1843   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1844                 sum_free_chunks(), sum_free_chunks_count());
1845 }
1846 
1847 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1848   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1849          "Bad index: %d", (int)index);
1850 
1851   return &_free_chunks[index];
1852 }
1853 
1854 // These methods that sum the free chunk lists are used in printing
1855 // methods that are used in product builds.
1856 size_t ChunkManager::sum_free_chunks() {
1857   assert_lock_strong(SpaceManager::expand_lock());
1858   size_t result = 0;
1859   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1860     ChunkList* list = free_chunks(i);
1861 
1862     if (list == NULL) {
1863       continue;
1864     }
1865 
1866     result = result + list->count() * list->size();
1867   }
1868   result = result + humongous_dictionary()->total_size();
1869   return result;
1870 }
1871 
1872 size_t ChunkManager::sum_free_chunks_count() {
1873   assert_lock_strong(SpaceManager::expand_lock());
1874   size_t count = 0;
1875   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1876     ChunkList* list = free_chunks(i);
1877     if (list == NULL) {
1878       continue;
1879     }
1880     count = count + list->count();
1881   }
1882   count = count + humongous_dictionary()->total_free_blocks();
1883   return count;
1884 }
1885 
1886 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1887   ChunkIndex index = list_index(word_size);
1888   assert(index < HumongousIndex, "No humongous list");
1889   return free_chunks(index);
1890 }
1891 
1892 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1893   assert_lock_strong(SpaceManager::expand_lock());
1894 
1895   slow_locked_verify();
1896 
1897   Metachunk* chunk = NULL;
1898   if (list_index(word_size) != HumongousIndex) {
1899     ChunkList* free_list = find_free_chunks_list(word_size);
1900     assert(free_list != NULL, "Sanity check");
1901 
1902     chunk = free_list->head();
1903 
1904     if (chunk == NULL) {
1905       return NULL;
1906     }
1907 
1908     // Remove the chunk as the head of the list.
1909     free_list->remove_chunk(chunk);
1910 
1911     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1912                                        p2i(free_list), p2i(chunk), chunk->word_size());
1913   } else {
1914     chunk = humongous_dictionary()->get_chunk(word_size);
1915 
1916     if (chunk == NULL) {
1917       return NULL;
1918     }
1919 
1920     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1921                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
1922   }
1923 
1924   // Chunk has been removed from the chunk manager; update counters.
1925   account_for_removed_chunk(chunk);
1926 
1927   // Remove it from the links to this freelist
1928   chunk->set_next(NULL);
1929   chunk->set_prev(NULL);
1930 #ifdef ASSERT
1931   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1932   // work.
1933   chunk->set_is_tagged_free(false);
1934 #endif
1935   chunk->container()->inc_container_count();
1936 
1937   slow_locked_verify();
1938   return chunk;
1939 }
1940 
1941 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1942   assert_lock_strong(SpaceManager::expand_lock());
1943   slow_locked_verify();
1944 
1945   // Take from the beginning of the list
1946   Metachunk* chunk = free_chunks_get(word_size);
1947   if (chunk == NULL) {
1948     return NULL;
1949   }
1950 
1951   assert((word_size <= chunk->word_size()) ||
1952          (list_index(chunk->word_size()) == HumongousIndex),
1953          "Non-humongous variable sized chunk");
1954   Log(gc, metaspace, freelist) log;
1955   if (log.is_debug()) {
1956     size_t list_count;
1957     if (list_index(word_size) < HumongousIndex) {
1958       ChunkList* list = find_free_chunks_list(word_size);
1959       list_count = list->count();
1960     } else {
1961       list_count = humongous_dictionary()->total_count();
1962     }
1963     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1964                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1965     ResourceMark rm;
1966     locked_print_free_chunks(log.debug_stream());
1967   }
1968 
1969   return chunk;
1970 }
1971 
1972 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1973   assert_lock_strong(SpaceManager::expand_lock());
1974   assert(chunk != NULL, "Expected chunk.");
1975   assert(chunk->container() != NULL, "Container should have been set.");
1976   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1977   index_bounds_check(index);
1978 
1979   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1980   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1981   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1982   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1983 
1984   if (index != HumongousIndex) {
1985     // Return non-humongous chunk to freelist.
1986     ChunkList* list = free_chunks(index);
1987     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
1988     list->return_chunk_at_head(chunk);
1989     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
1990         chunk_size_name(index), p2i(chunk));
1991   } else {
1992     // Return humongous chunk to dictionary.
1993     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
1994     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
1995            "Humongous chunk has wrong alignment.");
1996     _humongous_dictionary.return_chunk(chunk);
1997     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
1998         chunk_size_name(index), p2i(chunk), chunk->word_size());
1999   }
2000   chunk->container()->dec_container_count();
2001   DEBUG_ONLY(chunk->set_is_tagged_free(true);)
2002 
2003   // Chunk has been added; update counters.
2004   account_for_added_chunk(chunk);
2005 
2006 }
2007 
2008 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2009   index_bounds_check(index);
2010   if (chunks == NULL) {
2011     return;
2012   }
2013   LogTarget(Trace, gc, metaspace, freelist) log;
2014   if (log.is_enabled()) { // tracing
2015     log.print("returning list of %s chunks...", chunk_size_name(index));
2016   }
2017   unsigned num_chunks_returned = 0;
2018   size_t size_chunks_returned = 0;
2019   Metachunk* cur = chunks;
2020   while (cur != NULL) {
2021     // Capture the next link before it is changed
2022     // by the call to return_chunk_at_head();
2023     Metachunk* next = cur->next();
2024     if (log.is_enabled()) { // tracing
2025       num_chunks_returned ++;
2026       size_chunks_returned += cur->word_size();
2027     }
2028     return_single_chunk(index, cur);
2029     cur = next;
2030   }
2031   if (log.is_enabled()) { // tracing
2032     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2033         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2034     if (index != HumongousIndex) {
2035       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2036     } else {
2037       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2038     }
2039   }
2040 }
2041 
2042 void ChunkManager::print_on(outputStream* out) const {
2043   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
2044 }
2045 
2046 // SpaceManager methods
2047 
2048 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2049   size_t chunk_sizes[] = {
2050       specialized_chunk_size(is_class_space),
2051       small_chunk_size(is_class_space),
2052       medium_chunk_size(is_class_space)
2053   };
2054 
2055   // Adjust up to one of the fixed chunk sizes ...
2056   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2057     if (requested <= chunk_sizes[i]) {
2058       return chunk_sizes[i];
2059     }
2060   }
2061 
2062   // ... or return the size as a humongous chunk.
2063   return requested;
2064 }
2065 
2066 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2067   return adjust_initial_chunk_size(requested, is_class());
2068 }
2069 
2070 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2071   size_t requested;
2072 
2073   if (is_class()) {
2074     switch (type) {
2075     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2076     case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
2077     case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
2078     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2079     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2080     default:                                 requested = ClassSmallChunk; break;
2081     }
2082   } else {
2083     switch (type) {
2084     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2085     case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
2086     case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
2087     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2088     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2089     default:                                 requested = SmallChunk; break;
2090     }
2091   }
2092 
2093   // Adjust to one of the fixed chunk sizes (unless humongous)
2094   const size_t adjusted = adjust_initial_chunk_size(requested);
2095 
2096   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2097          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2098 
2099   return adjusted;
2100 }
2101 
2102 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2103   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2104   size_t free = 0;
2105   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2106     Metachunk* chunk = chunks_in_use(i);
2107     while (chunk != NULL) {
2108       free += chunk->free_word_size();
2109       chunk = chunk->next();
2110     }
2111   }
2112   return free;
2113 }
2114 
2115 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2116   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2117   size_t result = 0;
2118   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2119    result += sum_waste_in_chunks_in_use(i);
2120   }
2121 
2122   return result;
2123 }
2124 
2125 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2126   size_t result = 0;
2127   Metachunk* chunk = chunks_in_use(index);
2128   // Count the free space in all the chunk but not the
2129   // current chunk from which allocations are still being done.
2130   while (chunk != NULL) {
2131     if (chunk != current_chunk()) {
2132       result += chunk->free_word_size();
2133     }
2134     chunk = chunk->next();
2135   }
2136   return result;
2137 }
2138 
2139 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2140   // For CMS use "allocated_chunks_words()" which does not need the
2141   // Metaspace lock.  For the other collectors sum over the
2142   // lists.  Use both methods as a check that "allocated_chunks_words()"
2143   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2144   // to use in the product and allocated_chunks_words() should be used
2145   // but allow for  checking that allocated_chunks_words() returns the same
2146   // value as sum_capacity_in_chunks_in_use() which is the definitive
2147   // answer.
2148   if (UseConcMarkSweepGC) {
2149     return allocated_chunks_words();
2150   } else {
2151     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2152     size_t sum = 0;
2153     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2154       Metachunk* chunk = chunks_in_use(i);
2155       while (chunk != NULL) {
2156         sum += chunk->word_size();
2157         chunk = chunk->next();
2158       }
2159     }
2160   return sum;
2161   }
2162 }
2163 
2164 size_t SpaceManager::sum_count_in_chunks_in_use() {
2165   size_t count = 0;
2166   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2167     count = count + sum_count_in_chunks_in_use(i);
2168   }
2169 
2170   return count;
2171 }
2172 
2173 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2174   size_t count = 0;
2175   Metachunk* chunk = chunks_in_use(i);
2176   while (chunk != NULL) {
2177     count++;
2178     chunk = chunk->next();
2179   }
2180   return count;
2181 }
2182 
2183 
2184 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2185   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2186   size_t used = 0;
2187   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2188     Metachunk* chunk = chunks_in_use(i);
2189     while (chunk != NULL) {
2190       used += chunk->used_word_size();
2191       chunk = chunk->next();
2192     }
2193   }
2194   return used;
2195 }
2196 
2197 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2198 
2199   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2200     Metachunk* chunk = chunks_in_use(i);
2201     st->print("SpaceManager: %s " PTR_FORMAT,
2202                  chunk_size_name(i), p2i(chunk));
2203     if (chunk != NULL) {
2204       st->print_cr(" free " SIZE_FORMAT,
2205                    chunk->free_word_size());
2206     } else {
2207       st->cr();
2208     }
2209   }
2210 
2211   chunk_manager()->locked_print_free_chunks(st);
2212   chunk_manager()->locked_print_sum_free_chunks(st);
2213 }
2214 
2215 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2216 
2217   // Decide between a small chunk and a medium chunk.  Up to
2218   // _small_chunk_limit small chunks can be allocated.
2219   // After that a medium chunk is preferred.
2220   size_t chunk_word_size;
2221   if (chunks_in_use(MediumIndex) == NULL &&
2222       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2223     chunk_word_size = (size_t) small_chunk_size();
2224     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2225       chunk_word_size = medium_chunk_size();
2226     }
2227   } else {
2228     chunk_word_size = medium_chunk_size();
2229   }
2230 
2231   // Might still need a humongous chunk.  Enforce
2232   // humongous allocations sizes to be aligned up to
2233   // the smallest chunk size.
2234   size_t if_humongous_sized_chunk =
2235     align_up(word_size + Metachunk::overhead(),
2236                   smallest_chunk_size());
2237   chunk_word_size =
2238     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2239 
2240   assert(!SpaceManager::is_humongous(word_size) ||
2241          chunk_word_size == if_humongous_sized_chunk,
2242          "Size calculation is wrong, word_size " SIZE_FORMAT
2243          " chunk_word_size " SIZE_FORMAT,
2244          word_size, chunk_word_size);
2245   Log(gc, metaspace, alloc) log;
2246   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2247     log.debug("Metadata humongous allocation:");
2248     log.debug("  word_size " PTR_FORMAT, word_size);
2249     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2250     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2251   }
2252   return chunk_word_size;
2253 }
2254 
2255 void SpaceManager::track_metaspace_memory_usage() {
2256   if (is_init_completed()) {
2257     if (is_class()) {
2258       MemoryService::track_compressed_class_memory_usage();
2259     }
2260     MemoryService::track_metaspace_memory_usage();
2261   }
2262 }
2263 
2264 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2265   assert(vs_list()->current_virtual_space() != NULL,
2266          "Should have been set");
2267   assert(current_chunk() == NULL ||
2268          current_chunk()->allocate(word_size) == NULL,
2269          "Don't need to expand");
2270   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2271 
2272   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2273     size_t words_left = 0;
2274     size_t words_used = 0;
2275     if (current_chunk() != NULL) {
2276       words_left = current_chunk()->free_word_size();
2277       words_used = current_chunk()->used_word_size();
2278     }
2279     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2280                                        word_size, words_used, words_left);
2281   }
2282 
2283   // Get another chunk
2284   size_t chunk_word_size = calc_chunk_size(word_size);
2285   Metachunk* next = get_new_chunk(chunk_word_size);
2286 
2287   MetaWord* mem = NULL;
2288 
2289   // If a chunk was available, add it to the in-use chunk list
2290   // and do an allocation from it.
2291   if (next != NULL) {
2292     // Add to this manager's list of chunks in use.
2293     add_chunk(next, false);
2294     mem = next->allocate(word_size);
2295   }
2296 
2297   // Track metaspace memory usage statistic.
2298   track_metaspace_memory_usage();
2299 
2300   return mem;
2301 }
2302 
2303 void SpaceManager::print_on(outputStream* st) const {
2304 
2305   for (ChunkIndex i = ZeroIndex;
2306        i < NumberOfInUseLists ;
2307        i = next_chunk_index(i) ) {
2308     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2309                  p2i(chunks_in_use(i)),
2310                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2311   }
2312   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2313                " Humongous " SIZE_FORMAT,
2314                sum_waste_in_chunks_in_use(SmallIndex),
2315                sum_waste_in_chunks_in_use(MediumIndex),
2316                sum_waste_in_chunks_in_use(HumongousIndex));
2317   // block free lists
2318   if (block_freelists() != NULL) {
2319     st->print_cr("total in block free lists " SIZE_FORMAT,
2320       block_freelists()->total_size());
2321   }
2322 }
2323 
2324 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2325                            Mutex* lock) :
2326   _mdtype(mdtype),
2327   _allocated_blocks_words(0),
2328   _allocated_chunks_words(0),
2329   _allocated_chunks_count(0),
2330   _block_freelists(NULL),
2331   _lock(lock)
2332 {
2333   initialize();
2334 }
2335 
2336 void SpaceManager::inc_size_metrics(size_t words) {
2337   assert_lock_strong(SpaceManager::expand_lock());
2338   // Total of allocated Metachunks and allocated Metachunks count
2339   // for each SpaceManager
2340   _allocated_chunks_words = _allocated_chunks_words + words;
2341   _allocated_chunks_count++;
2342   // Global total of capacity in allocated Metachunks
2343   MetaspaceAux::inc_capacity(mdtype(), words);
2344   // Global total of allocated Metablocks.
2345   // used_words_slow() includes the overhead in each
2346   // Metachunk so include it in the used when the
2347   // Metachunk is first added (so only added once per
2348   // Metachunk).
2349   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2350 }
2351 
2352 void SpaceManager::inc_used_metrics(size_t words) {
2353   // Add to the per SpaceManager total
2354   Atomic::add_ptr(words, &_allocated_blocks_words);
2355   // Add to the global total
2356   MetaspaceAux::inc_used(mdtype(), words);
2357 }
2358 
2359 void SpaceManager::dec_total_from_size_metrics() {
2360   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2361   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2362   // Also deduct the overhead per Metachunk
2363   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2364 }
2365 
2366 void SpaceManager::initialize() {
2367   Metadebug::init_allocation_fail_alot_count();
2368   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2369     _chunks_in_use[i] = NULL;
2370   }
2371   _current_chunk = NULL;
2372   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2373 }
2374 
2375 SpaceManager::~SpaceManager() {
2376   // This call this->_lock which can't be done while holding expand_lock()
2377   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2378          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2379          " allocated_chunks_words() " SIZE_FORMAT,
2380          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2381 
2382   MutexLockerEx fcl(SpaceManager::expand_lock(),
2383                     Mutex::_no_safepoint_check_flag);
2384 
2385   chunk_manager()->slow_locked_verify();
2386 
2387   dec_total_from_size_metrics();
2388 
2389   Log(gc, metaspace, freelist) log;
2390   if (log.is_trace()) {
2391     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2392     ResourceMark rm;
2393     locked_print_chunks_in_use_on(log.trace_stream());
2394     if (block_freelists() != NULL) {
2395     block_freelists()->print_on(log.trace_stream());
2396   }
2397   }
2398 
2399   // Add all the chunks in use by this space manager
2400   // to the global list of free chunks.
2401 
2402   // Follow each list of chunks-in-use and add them to the
2403   // free lists.  Each list is NULL terminated.
2404 
2405   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2406     Metachunk* chunks = chunks_in_use(i);
2407     chunk_manager()->return_chunk_list(i, chunks);
2408     set_chunks_in_use(i, NULL);
2409   }
2410 
2411   chunk_manager()->slow_locked_verify();
2412 
2413   if (_block_freelists != NULL) {
2414     delete _block_freelists;
2415   }
2416 }
2417 
2418 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2419   assert_lock_strong(_lock);
2420   // Allocations and deallocations are in raw_word_size
2421   size_t raw_word_size = get_allocation_word_size(word_size);
2422   // Lazily create a block_freelist
2423   if (block_freelists() == NULL) {
2424     _block_freelists = new BlockFreelist();
2425   }
2426   block_freelists()->return_block(p, raw_word_size);
2427 }
2428 
2429 // Adds a chunk to the list of chunks in use.
2430 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2431 
2432   assert(new_chunk != NULL, "Should not be NULL");
2433   assert(new_chunk->next() == NULL, "Should not be on a list");
2434 
2435   new_chunk->reset_empty();
2436 
2437   // Find the correct list and and set the current
2438   // chunk for that list.
2439   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2440 
2441   if (index != HumongousIndex) {
2442     retire_current_chunk();
2443     set_current_chunk(new_chunk);
2444     new_chunk->set_next(chunks_in_use(index));
2445     set_chunks_in_use(index, new_chunk);
2446   } else {
2447     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2448     // small, so small will be null.  Link this first chunk as the current
2449     // chunk.
2450     if (make_current) {
2451       // Set as the current chunk but otherwise treat as a humongous chunk.
2452       set_current_chunk(new_chunk);
2453     }
2454     // Link at head.  The _current_chunk only points to a humongous chunk for
2455     // the null class loader metaspace (class and data virtual space managers)
2456     // any humongous chunks so will not point to the tail
2457     // of the humongous chunks list.
2458     new_chunk->set_next(chunks_in_use(HumongousIndex));
2459     set_chunks_in_use(HumongousIndex, new_chunk);
2460 
2461     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2462   }
2463 
2464   // Add to the running sum of capacity
2465   inc_size_metrics(new_chunk->word_size());
2466 
2467   assert(new_chunk->is_empty(), "Not ready for reuse");
2468   Log(gc, metaspace, freelist) log;
2469   if (log.is_trace()) {
2470     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2471     ResourceMark rm;
2472     outputStream* out = log.trace_stream();
2473     new_chunk->print_on(out);
2474     chunk_manager()->locked_print_free_chunks(out);
2475   }
2476 }
2477 
2478 void SpaceManager::retire_current_chunk() {
2479   if (current_chunk() != NULL) {
2480     size_t remaining_words = current_chunk()->free_word_size();
2481     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2482       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2483       deallocate(ptr, remaining_words);
2484       inc_used_metrics(remaining_words);
2485     }
2486   }
2487 }
2488 
2489 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2490   // Get a chunk from the chunk freelist
2491   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2492 
2493   if (next == NULL) {
2494     next = vs_list()->get_new_chunk(chunk_word_size,
2495                                     medium_chunk_bunch());
2496   }
2497 
2498   Log(gc, metaspace, alloc) log;
2499   if (log.is_debug() && next != NULL &&
2500       SpaceManager::is_humongous(next->word_size())) {
2501     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2502   }
2503 
2504   return next;
2505 }
2506 
2507 /*
2508  * The policy is to allocate up to _small_chunk_limit small chunks
2509  * after which only medium chunks are allocated.  This is done to
2510  * reduce fragmentation.  In some cases, this can result in a lot
2511  * of small chunks being allocated to the point where it's not
2512  * possible to expand.  If this happens, there may be no medium chunks
2513  * available and OOME would be thrown.  Instead of doing that,
2514  * if the allocation request size fits in a small chunk, an attempt
2515  * will be made to allocate a small chunk.
2516  */
2517 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2518   size_t raw_word_size = get_allocation_word_size(word_size);
2519 
2520   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2521     return NULL;
2522   }
2523 
2524   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2525   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2526 
2527   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2528 
2529   MetaWord* mem = NULL;
2530 
2531   if (chunk != NULL) {
2532     // Add chunk to the in-use chunk list and do an allocation from it.
2533     // Add to this manager's list of chunks in use.
2534     add_chunk(chunk, false);
2535     mem = chunk->allocate(raw_word_size);
2536 
2537     inc_used_metrics(raw_word_size);
2538 
2539     // Track metaspace memory usage statistic.
2540     track_metaspace_memory_usage();
2541   }
2542 
2543   return mem;
2544 }
2545 
2546 MetaWord* SpaceManager::allocate(size_t word_size) {
2547   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2548   size_t raw_word_size = get_allocation_word_size(word_size);
2549   BlockFreelist* fl =  block_freelists();
2550   MetaWord* p = NULL;
2551   // Allocation from the dictionary is expensive in the sense that
2552   // the dictionary has to be searched for a size.  Don't allocate
2553   // from the dictionary until it starts to get fat.  Is this
2554   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2555   // for allocations.  Do some profiling.  JJJ
2556   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2557     p = fl->get_block(raw_word_size);
2558   }
2559   if (p == NULL) {
2560     p = allocate_work(raw_word_size);
2561   }
2562 
2563   return p;
2564 }
2565 
2566 // Returns the address of spaced allocated for "word_size".
2567 // This methods does not know about blocks (Metablocks)
2568 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2569   assert_lock_strong(_lock);
2570 #ifdef ASSERT
2571   if (Metadebug::test_metadata_failure()) {
2572     return NULL;
2573   }
2574 #endif
2575   // Is there space in the current chunk?
2576   MetaWord* result = NULL;
2577 
2578   // For DumpSharedSpaces, only allocate out of the current chunk which is
2579   // never null because we gave it the size we wanted.   Caller reports out
2580   // of memory if this returns null.
2581   if (DumpSharedSpaces) {
2582     assert(current_chunk() != NULL, "should never happen");
2583     inc_used_metrics(word_size);
2584     return current_chunk()->allocate(word_size); // caller handles null result
2585   }
2586 
2587   if (current_chunk() != NULL) {
2588     result = current_chunk()->allocate(word_size);
2589   }
2590 
2591   if (result == NULL) {
2592     result = grow_and_allocate(word_size);
2593   }
2594 
2595   if (result != NULL) {
2596     inc_used_metrics(word_size);
2597     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2598            "Head of the list is being allocated");
2599   }
2600 
2601   return result;
2602 }
2603 
2604 void SpaceManager::verify() {
2605   // If there are blocks in the dictionary, then
2606   // verification of chunks does not work since
2607   // being in the dictionary alters a chunk.
2608   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2609     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2610       Metachunk* curr = chunks_in_use(i);
2611       while (curr != NULL) {
2612         curr->verify();
2613         verify_chunk_size(curr);
2614         curr = curr->next();
2615       }
2616     }
2617   }
2618 }
2619 
2620 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2621   assert(is_humongous(chunk->word_size()) ||
2622          chunk->word_size() == medium_chunk_size() ||
2623          chunk->word_size() == small_chunk_size() ||
2624          chunk->word_size() == specialized_chunk_size(),
2625          "Chunk size is wrong");
2626   return;
2627 }
2628 
2629 #ifdef ASSERT
2630 void SpaceManager::verify_allocated_blocks_words() {
2631   // Verification is only guaranteed at a safepoint.
2632   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2633     "Verification can fail if the applications is running");
2634   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2635          "allocation total is not consistent " SIZE_FORMAT
2636          " vs " SIZE_FORMAT,
2637          allocated_blocks_words(), sum_used_in_chunks_in_use());
2638 }
2639 
2640 #endif
2641 
2642 void SpaceManager::dump(outputStream* const out) const {
2643   size_t curr_total = 0;
2644   size_t waste = 0;
2645   uint i = 0;
2646   size_t used = 0;
2647   size_t capacity = 0;
2648 
2649   // Add up statistics for all chunks in this SpaceManager.
2650   for (ChunkIndex index = ZeroIndex;
2651        index < NumberOfInUseLists;
2652        index = next_chunk_index(index)) {
2653     for (Metachunk* curr = chunks_in_use(index);
2654          curr != NULL;
2655          curr = curr->next()) {
2656       out->print("%d) ", i++);
2657       curr->print_on(out);
2658       curr_total += curr->word_size();
2659       used += curr->used_word_size();
2660       capacity += curr->word_size();
2661       waste += curr->free_word_size() + curr->overhead();;
2662     }
2663   }
2664 
2665   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2666     if (block_freelists() != NULL) block_freelists()->print_on(out);
2667   }
2668 
2669   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2670   // Free space isn't wasted.
2671   waste -= free;
2672 
2673   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2674                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2675                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2676 }
2677 
2678 // MetaspaceAux
2679 
2680 
2681 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2682 size_t MetaspaceAux::_used_words[] = {0, 0};
2683 
2684 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2685   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2686   return list == NULL ? 0 : list->free_bytes();
2687 }
2688 
2689 size_t MetaspaceAux::free_bytes() {
2690   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2691 }
2692 
2693 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2694   assert_lock_strong(SpaceManager::expand_lock());
2695   assert(words <= capacity_words(mdtype),
2696          "About to decrement below 0: words " SIZE_FORMAT
2697          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2698          words, mdtype, capacity_words(mdtype));
2699   _capacity_words[mdtype] -= words;
2700 }
2701 
2702 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2703   assert_lock_strong(SpaceManager::expand_lock());
2704   // Needs to be atomic
2705   _capacity_words[mdtype] += words;
2706 }
2707 
2708 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2709   assert(words <= used_words(mdtype),
2710          "About to decrement below 0: words " SIZE_FORMAT
2711          " is greater than _used_words[%u] " SIZE_FORMAT,
2712          words, mdtype, used_words(mdtype));
2713   // For CMS deallocation of the Metaspaces occurs during the
2714   // sweep which is a concurrent phase.  Protection by the expand_lock()
2715   // is not enough since allocation is on a per Metaspace basis
2716   // and protected by the Metaspace lock.
2717   jlong minus_words = (jlong) - (jlong) words;
2718   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2719 }
2720 
2721 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2722   // _used_words tracks allocations for
2723   // each piece of metadata.  Those allocations are
2724   // generally done concurrently by different application
2725   // threads so must be done atomically.
2726   Atomic::add_ptr(words, &_used_words[mdtype]);
2727 }
2728 
2729 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2730   size_t used = 0;
2731   ClassLoaderDataGraphMetaspaceIterator iter;
2732   while (iter.repeat()) {
2733     Metaspace* msp = iter.get_next();
2734     // Sum allocated_blocks_words for each metaspace
2735     if (msp != NULL) {
2736       used += msp->used_words_slow(mdtype);
2737     }
2738   }
2739   return used * BytesPerWord;
2740 }
2741 
2742 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2743   size_t free = 0;
2744   ClassLoaderDataGraphMetaspaceIterator iter;
2745   while (iter.repeat()) {
2746     Metaspace* msp = iter.get_next();
2747     if (msp != NULL) {
2748       free += msp->free_words_slow(mdtype);
2749     }
2750   }
2751   return free * BytesPerWord;
2752 }
2753 
2754 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2755   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2756     return 0;
2757   }
2758   // Don't count the space in the freelists.  That space will be
2759   // added to the capacity calculation as needed.
2760   size_t capacity = 0;
2761   ClassLoaderDataGraphMetaspaceIterator iter;
2762   while (iter.repeat()) {
2763     Metaspace* msp = iter.get_next();
2764     if (msp != NULL) {
2765       capacity += msp->capacity_words_slow(mdtype);
2766     }
2767   }
2768   return capacity * BytesPerWord;
2769 }
2770 
2771 size_t MetaspaceAux::capacity_bytes_slow() {
2772 #ifdef PRODUCT
2773   // Use capacity_bytes() in PRODUCT instead of this function.
2774   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2775 #endif
2776   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2777   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2778   assert(capacity_bytes() == class_capacity + non_class_capacity,
2779          "bad accounting: capacity_bytes() " SIZE_FORMAT
2780          " class_capacity + non_class_capacity " SIZE_FORMAT
2781          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2782          capacity_bytes(), class_capacity + non_class_capacity,
2783          class_capacity, non_class_capacity);
2784 
2785   return class_capacity + non_class_capacity;
2786 }
2787 
2788 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2789   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2790   return list == NULL ? 0 : list->reserved_bytes();
2791 }
2792 
2793 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2794   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2795   return list == NULL ? 0 : list->committed_bytes();
2796 }
2797 
2798 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2799 
2800 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2801   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2802   if (chunk_manager == NULL) {
2803     return 0;
2804   }
2805   chunk_manager->slow_verify();
2806   return chunk_manager->free_chunks_total_words();
2807 }
2808 
2809 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2810   return free_chunks_total_words(mdtype) * BytesPerWord;
2811 }
2812 
2813 size_t MetaspaceAux::free_chunks_total_words() {
2814   return free_chunks_total_words(Metaspace::ClassType) +
2815          free_chunks_total_words(Metaspace::NonClassType);
2816 }
2817 
2818 size_t MetaspaceAux::free_chunks_total_bytes() {
2819   return free_chunks_total_words() * BytesPerWord;
2820 }
2821 
2822 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2823   return Metaspace::get_chunk_manager(mdtype) != NULL;
2824 }
2825 
2826 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2827   if (!has_chunk_free_list(mdtype)) {
2828     return MetaspaceChunkFreeListSummary();
2829   }
2830 
2831   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2832   return cm->chunk_free_list_summary();
2833 }
2834 
2835 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2836   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2837                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2838 }
2839 
2840 void MetaspaceAux::print_on(outputStream* out) {
2841   Metaspace::MetadataType nct = Metaspace::NonClassType;
2842 
2843   out->print_cr(" Metaspace       "
2844                 "used "      SIZE_FORMAT "K, "
2845                 "capacity "  SIZE_FORMAT "K, "
2846                 "committed " SIZE_FORMAT "K, "
2847                 "reserved "  SIZE_FORMAT "K",
2848                 used_bytes()/K,
2849                 capacity_bytes()/K,
2850                 committed_bytes()/K,
2851                 reserved_bytes()/K);
2852 
2853   if (Metaspace::using_class_space()) {
2854     Metaspace::MetadataType ct = Metaspace::ClassType;
2855     out->print_cr("  class space    "
2856                   "used "      SIZE_FORMAT "K, "
2857                   "capacity "  SIZE_FORMAT "K, "
2858                   "committed " SIZE_FORMAT "K, "
2859                   "reserved "  SIZE_FORMAT "K",
2860                   used_bytes(ct)/K,
2861                   capacity_bytes(ct)/K,
2862                   committed_bytes(ct)/K,
2863                   reserved_bytes(ct)/K);
2864   }
2865 }
2866 
2867 // Print information for class space and data space separately.
2868 // This is almost the same as above.
2869 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2870   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2871   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2872   size_t used_bytes = used_bytes_slow(mdtype);
2873   size_t free_bytes = free_bytes_slow(mdtype);
2874   size_t used_and_free = used_bytes + free_bytes +
2875                            free_chunks_capacity_bytes;
2876   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2877              "K + unused in chunks " SIZE_FORMAT "K  + "
2878              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2879              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2880              used_bytes / K,
2881              free_bytes / K,
2882              free_chunks_capacity_bytes / K,
2883              used_and_free / K,
2884              capacity_bytes / K);
2885   // Accounting can only be correct if we got the values during a safepoint
2886   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2887 }
2888 
2889 // Print total fragmentation for class metaspaces
2890 void MetaspaceAux::print_class_waste(outputStream* out) {
2891   assert(Metaspace::using_class_space(), "class metaspace not used");
2892   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2893   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2894   ClassLoaderDataGraphMetaspaceIterator iter;
2895   while (iter.repeat()) {
2896     Metaspace* msp = iter.get_next();
2897     if (msp != NULL) {
2898       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2899       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2900       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2901       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2902       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2903       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2904       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2905     }
2906   }
2907   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2908                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2909                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2910                 "large count " SIZE_FORMAT,
2911                 cls_specialized_count, cls_specialized_waste,
2912                 cls_small_count, cls_small_waste,
2913                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2914 }
2915 
2916 // Print total fragmentation for data and class metaspaces separately
2917 void MetaspaceAux::print_waste(outputStream* out) {
2918   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2919   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2920 
2921   ClassLoaderDataGraphMetaspaceIterator iter;
2922   while (iter.repeat()) {
2923     Metaspace* msp = iter.get_next();
2924     if (msp != NULL) {
2925       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2926       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2927       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2928       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2929       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2930       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2931       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2932     }
2933   }
2934   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2935   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2936                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2937                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2938                         "large count " SIZE_FORMAT,
2939              specialized_count, specialized_waste, small_count,
2940              small_waste, medium_count, medium_waste, humongous_count);
2941   if (Metaspace::using_class_space()) {
2942     print_class_waste(out);
2943   }
2944 }
2945 
2946 // Dump global metaspace things from the end of ClassLoaderDataGraph
2947 void MetaspaceAux::dump(outputStream* out) {
2948   out->print_cr("All Metaspace:");
2949   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2950   out->print("class space: "); print_on(out, Metaspace::ClassType);
2951   print_waste(out);
2952 }
2953 
2954 void MetaspaceAux::verify_free_chunks() {
2955   Metaspace::chunk_manager_metadata()->verify();
2956   if (Metaspace::using_class_space()) {
2957     Metaspace::chunk_manager_class()->verify();
2958   }
2959 }
2960 
2961 void MetaspaceAux::verify_capacity() {
2962 #ifdef ASSERT
2963   size_t running_sum_capacity_bytes = capacity_bytes();
2964   // For purposes of the running sum of capacity, verify against capacity
2965   size_t capacity_in_use_bytes = capacity_bytes_slow();
2966   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2967          "capacity_words() * BytesPerWord " SIZE_FORMAT
2968          " capacity_bytes_slow()" SIZE_FORMAT,
2969          running_sum_capacity_bytes, capacity_in_use_bytes);
2970   for (Metaspace::MetadataType i = Metaspace::ClassType;
2971        i < Metaspace:: MetadataTypeCount;
2972        i = (Metaspace::MetadataType)(i + 1)) {
2973     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2974     assert(capacity_bytes(i) == capacity_in_use_bytes,
2975            "capacity_bytes(%u) " SIZE_FORMAT
2976            " capacity_bytes_slow(%u)" SIZE_FORMAT,
2977            i, capacity_bytes(i), i, capacity_in_use_bytes);
2978   }
2979 #endif
2980 }
2981 
2982 void MetaspaceAux::verify_used() {
2983 #ifdef ASSERT
2984   size_t running_sum_used_bytes = used_bytes();
2985   // For purposes of the running sum of used, verify against used
2986   size_t used_in_use_bytes = used_bytes_slow();
2987   assert(used_bytes() == used_in_use_bytes,
2988          "used_bytes() " SIZE_FORMAT
2989          " used_bytes_slow()" SIZE_FORMAT,
2990          used_bytes(), used_in_use_bytes);
2991   for (Metaspace::MetadataType i = Metaspace::ClassType;
2992        i < Metaspace:: MetadataTypeCount;
2993        i = (Metaspace::MetadataType)(i + 1)) {
2994     size_t used_in_use_bytes = used_bytes_slow(i);
2995     assert(used_bytes(i) == used_in_use_bytes,
2996            "used_bytes(%u) " SIZE_FORMAT
2997            " used_bytes_slow(%u)" SIZE_FORMAT,
2998            i, used_bytes(i), i, used_in_use_bytes);
2999   }
3000 #endif
3001 }
3002 
3003 void MetaspaceAux::verify_metrics() {
3004   verify_capacity();
3005   verify_used();
3006 }
3007 
3008 
3009 // Metaspace methods
3010 
3011 size_t Metaspace::_first_chunk_word_size = 0;
3012 size_t Metaspace::_first_class_chunk_word_size = 0;
3013 
3014 size_t Metaspace::_commit_alignment = 0;
3015 size_t Metaspace::_reserve_alignment = 0;
3016 
3017 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3018   initialize(lock, type);
3019 }
3020 
3021 Metaspace::~Metaspace() {
3022   delete _vsm;
3023   if (using_class_space()) {
3024     delete _class_vsm;
3025   }
3026 }
3027 
3028 VirtualSpaceList* Metaspace::_space_list = NULL;
3029 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3030 
3031 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3032 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3033 
3034 #define VIRTUALSPACEMULTIPLIER 2
3035 
3036 #ifdef _LP64
3037 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3038 
3039 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3040   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3041   // narrow_klass_base is the lower of the metaspace base and the cds base
3042   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3043   // between the lower base and higher address.
3044   address lower_base;
3045   address higher_address;
3046 #if INCLUDE_CDS
3047   if (UseSharedSpaces) {
3048     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3049                           (address)(metaspace_base + compressed_class_space_size()));
3050     lower_base = MIN2(metaspace_base, cds_base);
3051   } else
3052 #endif
3053   {
3054     higher_address = metaspace_base + compressed_class_space_size();
3055     lower_base = metaspace_base;
3056 
3057     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3058     // If compressed class space fits in lower 32G, we don't need a base.
3059     if (higher_address <= (address)klass_encoding_max) {
3060       lower_base = 0; // Effectively lower base is zero.
3061     }
3062   }
3063 
3064   Universe::set_narrow_klass_base(lower_base);
3065 
3066   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3067     Universe::set_narrow_klass_shift(0);
3068   } else {
3069     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3070     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3071   }
3072   AOTLoader::set_narrow_klass_shift();
3073 }
3074 
3075 #if INCLUDE_CDS
3076 // Return TRUE if the specified metaspace_base and cds_base are close enough
3077 // to work with compressed klass pointers.
3078 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3079   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3080   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3081   address lower_base = MIN2((address)metaspace_base, cds_base);
3082   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3083                                 (address)(metaspace_base + compressed_class_space_size()));
3084   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3085 }
3086 #endif
3087 
3088 // Try to allocate the metaspace at the requested addr.
3089 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3090   assert(using_class_space(), "called improperly");
3091   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3092   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3093          "Metaspace size is too big");
3094   assert_is_aligned(requested_addr, _reserve_alignment);
3095   assert_is_aligned(cds_base, _reserve_alignment);
3096   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3097 
3098   // Don't use large pages for the class space.
3099   bool large_pages = false;
3100 
3101 #if !(defined(AARCH64) || defined(AIX))
3102   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3103                                              _reserve_alignment,
3104                                              large_pages,
3105                                              requested_addr);
3106 #else // AARCH64
3107   ReservedSpace metaspace_rs;
3108 
3109   // Our compressed klass pointers may fit nicely into the lower 32
3110   // bits.
3111   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3112     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3113                                  _reserve_alignment,
3114                                  large_pages,
3115                                  requested_addr);
3116   }
3117 
3118   if (! metaspace_rs.is_reserved()) {
3119     // Aarch64: Try to align metaspace so that we can decode a compressed
3120     // klass with a single MOVK instruction.  We can do this iff the
3121     // compressed class base is a multiple of 4G.
3122     // Aix: Search for a place where we can find memory. If we need to load
3123     // the base, 4G alignment is helpful, too.
3124     size_t increment = AARCH64_ONLY(4*)G;
3125     for (char *a = align_up(requested_addr, increment);
3126          a < (char*)(1024*G);
3127          a += increment) {
3128       if (a == (char *)(32*G)) {
3129         // Go faster from here on. Zero-based is no longer possible.
3130         increment = 4*G;
3131       }
3132 
3133 #if INCLUDE_CDS
3134       if (UseSharedSpaces
3135           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3136         // We failed to find an aligned base that will reach.  Fall
3137         // back to using our requested addr.
3138         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3139                                      _reserve_alignment,
3140                                      large_pages,
3141                                      requested_addr);
3142         break;
3143       }
3144 #endif
3145 
3146       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3147                                    _reserve_alignment,
3148                                    large_pages,
3149                                    a);
3150       if (metaspace_rs.is_reserved())
3151         break;
3152     }
3153   }
3154 
3155 #endif // AARCH64
3156 
3157   if (!metaspace_rs.is_reserved()) {
3158 #if INCLUDE_CDS
3159     if (UseSharedSpaces) {
3160       size_t increment = align_up(1*G, _reserve_alignment);
3161 
3162       // Keep trying to allocate the metaspace, increasing the requested_addr
3163       // by 1GB each time, until we reach an address that will no longer allow
3164       // use of CDS with compressed klass pointers.
3165       char *addr = requested_addr;
3166       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3167              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3168         addr = addr + increment;
3169         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3170                                      _reserve_alignment, large_pages, addr);
3171       }
3172     }
3173 #endif
3174     // If no successful allocation then try to allocate the space anywhere.  If
3175     // that fails then OOM doom.  At this point we cannot try allocating the
3176     // metaspace as if UseCompressedClassPointers is off because too much
3177     // initialization has happened that depends on UseCompressedClassPointers.
3178     // So, UseCompressedClassPointers cannot be turned off at this point.
3179     if (!metaspace_rs.is_reserved()) {
3180       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3181                                    _reserve_alignment, large_pages);
3182       if (!metaspace_rs.is_reserved()) {
3183         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3184                                               compressed_class_space_size()));
3185       }
3186     }
3187   }
3188 
3189   // If we got here then the metaspace got allocated.
3190   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3191 
3192 #if INCLUDE_CDS
3193   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3194   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3195     FileMapInfo::stop_sharing_and_unmap(
3196         "Could not allocate metaspace at a compatible address");
3197   }
3198 #endif
3199   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3200                                   UseSharedSpaces ? (address)cds_base : 0);
3201 
3202   initialize_class_space(metaspace_rs);
3203 
3204   if (log_is_enabled(Trace, gc, metaspace)) {
3205     Log(gc, metaspace) log;
3206     ResourceMark rm;
3207     print_compressed_class_space(log.trace_stream(), requested_addr);
3208   }
3209 }
3210 
3211 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3212   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3213                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3214   if (_class_space_list != NULL) {
3215     address base = (address)_class_space_list->current_virtual_space()->bottom();
3216     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3217                  compressed_class_space_size(), p2i(base));
3218     if (requested_addr != 0) {
3219       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3220     }
3221     st->cr();
3222   }
3223 }
3224 
3225 // For UseCompressedClassPointers the class space is reserved above the top of
3226 // the Java heap.  The argument passed in is at the base of the compressed space.
3227 void Metaspace::initialize_class_space(ReservedSpace rs) {
3228   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3229   assert(rs.size() >= CompressedClassSpaceSize,
3230          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3231   assert(using_class_space(), "Must be using class space");
3232   _class_space_list = new VirtualSpaceList(rs);
3233   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3234 
3235   if (!_class_space_list->initialization_succeeded()) {
3236     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3237   }
3238 }
3239 
3240 #endif
3241 
3242 void Metaspace::ergo_initialize() {
3243   if (DumpSharedSpaces) {
3244     // Using large pages when dumping the shared archive is currently not implemented.
3245     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3246   }
3247 
3248   size_t page_size = os::vm_page_size();
3249   if (UseLargePages && UseLargePagesInMetaspace) {
3250     page_size = os::large_page_size();
3251   }
3252 
3253   _commit_alignment  = page_size;
3254   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3255 
3256   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3257   // override if MaxMetaspaceSize was set on the command line or not.
3258   // This information is needed later to conform to the specification of the
3259   // java.lang.management.MemoryUsage API.
3260   //
3261   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3262   // globals.hpp to the aligned value, but this is not possible, since the
3263   // alignment depends on other flags being parsed.
3264   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3265 
3266   if (MetaspaceSize > MaxMetaspaceSize) {
3267     MetaspaceSize = MaxMetaspaceSize;
3268   }
3269 
3270   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3271 
3272   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3273 
3274   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3275   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3276 
3277   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3278   set_compressed_class_space_size(CompressedClassSpaceSize);
3279 }
3280 
3281 void Metaspace::global_initialize() {
3282   MetaspaceGC::initialize();
3283 
3284   // Initialize the alignment for shared spaces.
3285   int max_alignment = os::vm_allocation_granularity();
3286   size_t cds_total = 0;
3287 
3288   MetaspaceShared::set_max_alignment(max_alignment);
3289 
3290   if (DumpSharedSpaces) {
3291 #if INCLUDE_CDS
3292     MetaspaceShared::estimate_regions_size();
3293 
3294     SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
3295     SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
3296     SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
3297     SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
3298 
3299     // Initialize with the sum of the shared space sizes.  The read-only
3300     // and read write metaspace chunks will be allocated out of this and the
3301     // remainder is the misc code and data chunks.
3302     cds_total = FileMapInfo::shared_spaces_size();
3303     cds_total = align_up(cds_total, _reserve_alignment);
3304     _space_list = new VirtualSpaceList(cds_total/wordSize);
3305     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3306 
3307     if (!_space_list->initialization_succeeded()) {
3308       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3309     }
3310 
3311 #ifdef _LP64
3312     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3313       vm_exit_during_initialization("Unable to dump shared archive.",
3314           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3315                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3316                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3317                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3318     }
3319 
3320     // Set the compressed klass pointer base so that decoding of these pointers works
3321     // properly when creating the shared archive.
3322     assert(UseCompressedOops && UseCompressedClassPointers,
3323       "UseCompressedOops and UseCompressedClassPointers must be set");
3324     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3325     log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3326                                      p2i(_space_list->current_virtual_space()->bottom()));
3327 
3328     Universe::set_narrow_klass_shift(0);
3329 #endif // _LP64
3330 #endif // INCLUDE_CDS
3331   } else {
3332 #if INCLUDE_CDS
3333     if (UseSharedSpaces) {
3334       // If using shared space, open the file that contains the shared space
3335       // and map in the memory before initializing the rest of metaspace (so
3336       // the addresses don't conflict)
3337       address cds_address = NULL;
3338       FileMapInfo* mapinfo = new FileMapInfo();
3339 
3340       // Open the shared archive file, read and validate the header. If
3341       // initialization fails, shared spaces [UseSharedSpaces] are
3342       // disabled and the file is closed.
3343       // Map in spaces now also
3344       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3345         cds_total = FileMapInfo::shared_spaces_size();
3346         cds_address = (address)mapinfo->header()->region_addr(0);
3347 #ifdef _LP64
3348         if (using_class_space()) {
3349           char* cds_end = (char*)(cds_address + cds_total);
3350           cds_end = align_up(cds_end, _reserve_alignment);
3351           // If UseCompressedClassPointers is set then allocate the metaspace area
3352           // above the heap and above the CDS area (if it exists).
3353           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3354           // Map the shared string space after compressed pointers
3355           // because it relies on compressed class pointers setting to work
3356           mapinfo->map_string_regions();
3357         }
3358 #endif // _LP64
3359       } else {
3360         assert(!mapinfo->is_open() && !UseSharedSpaces,
3361                "archive file not closed or shared spaces not disabled.");
3362       }
3363     }
3364 #endif // INCLUDE_CDS
3365 
3366 #ifdef _LP64
3367     if (!UseSharedSpaces && using_class_space()) {
3368       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3369       allocate_metaspace_compressed_klass_ptrs(base, 0);
3370     }
3371 #endif // _LP64
3372 
3373     // Initialize these before initializing the VirtualSpaceList
3374     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3375     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3376     // Make the first class chunk bigger than a medium chunk so it's not put
3377     // on the medium chunk list.   The next chunk will be small and progress
3378     // from there.  This size calculated by -version.
3379     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3380                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3381     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3382     // Arbitrarily set the initial virtual space to a multiple
3383     // of the boot class loader size.
3384     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3385     word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3386 
3387     // Initialize the list of virtual spaces.
3388     _space_list = new VirtualSpaceList(word_size);
3389     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3390 
3391     if (!_space_list->initialization_succeeded()) {
3392       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3393     }
3394   }
3395 
3396   _tracer = new MetaspaceTracer();
3397 }
3398 
3399 void Metaspace::post_initialize() {
3400   MetaspaceGC::post_initialize();
3401 }
3402 
3403 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3404   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3405   if (chunk != NULL) {
3406     // Add to this manager's list of chunks in use and current_chunk().
3407     get_space_manager(mdtype)->add_chunk(chunk, true);
3408   }
3409 }
3410 
3411 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3412   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3413 
3414   // Get a chunk from the chunk freelist
3415   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3416 
3417   if (chunk == NULL) {
3418     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3419                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3420   }
3421 
3422   // For dumping shared archive, report error if allocation has failed.
3423   if (DumpSharedSpaces && chunk == NULL) {
3424     report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3425   }
3426 
3427   return chunk;
3428 }
3429 
3430 void Metaspace::verify_global_initialization() {
3431   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3432   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3433 
3434   if (using_class_space()) {
3435     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3436     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3437   }
3438 }
3439 
3440 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3441   verify_global_initialization();
3442 
3443   // Allocate SpaceManager for metadata objects.
3444   _vsm = new SpaceManager(NonClassType, lock);
3445 
3446   if (using_class_space()) {
3447     // Allocate SpaceManager for classes.
3448     _class_vsm = new SpaceManager(ClassType, lock);
3449   }
3450 
3451   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3452 
3453   // Allocate chunk for metadata objects
3454   initialize_first_chunk(type, NonClassType);
3455 
3456   // Allocate chunk for class metadata objects
3457   if (using_class_space()) {
3458     initialize_first_chunk(type, ClassType);
3459   }
3460 
3461   _alloc_record_head = NULL;
3462   _alloc_record_tail = NULL;
3463 }
3464 
3465 size_t Metaspace::align_word_size_up(size_t word_size) {
3466   size_t byte_size = word_size * wordSize;
3467   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3468 }
3469 
3470 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3471   // DumpSharedSpaces doesn't use class metadata area (yet)
3472   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3473   if (is_class_space_allocation(mdtype)) {
3474     return  class_vsm()->allocate(word_size);
3475   } else {
3476     return  vsm()->allocate(word_size);
3477   }
3478 }
3479 
3480 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3481   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3482   assert(delta_bytes > 0, "Must be");
3483 
3484   size_t before = 0;
3485   size_t after = 0;
3486   MetaWord* res;
3487   bool incremented;
3488 
3489   // Each thread increments the HWM at most once. Even if the thread fails to increment
3490   // the HWM, an allocation is still attempted. This is because another thread must then
3491   // have incremented the HWM and therefore the allocation might still succeed.
3492   do {
3493     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3494     res = allocate(word_size, mdtype);
3495   } while (!incremented && res == NULL);
3496 
3497   if (incremented) {
3498     tracer()->report_gc_threshold(before, after,
3499                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3500     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3501   }
3502 
3503   return res;
3504 }
3505 
3506 // Space allocated in the Metaspace.  This may
3507 // be across several metadata virtual spaces.
3508 char* Metaspace::bottom() const {
3509   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3510   return (char*)vsm()->current_chunk()->bottom();
3511 }
3512 
3513 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3514   if (mdtype == ClassType) {
3515     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3516   } else {
3517     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3518   }
3519 }
3520 
3521 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3522   if (mdtype == ClassType) {
3523     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3524   } else {
3525     return vsm()->sum_free_in_chunks_in_use();
3526   }
3527 }
3528 
3529 // Space capacity in the Metaspace.  It includes
3530 // space in the list of chunks from which allocations
3531 // have been made. Don't include space in the global freelist and
3532 // in the space available in the dictionary which
3533 // is already counted in some chunk.
3534 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3535   if (mdtype == ClassType) {
3536     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3537   } else {
3538     return vsm()->sum_capacity_in_chunks_in_use();
3539   }
3540 }
3541 
3542 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3543   return used_words_slow(mdtype) * BytesPerWord;
3544 }
3545 
3546 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3547   return capacity_words_slow(mdtype) * BytesPerWord;
3548 }
3549 
3550 size_t Metaspace::allocated_blocks_bytes() const {
3551   return vsm()->allocated_blocks_bytes() +
3552       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3553 }
3554 
3555 size_t Metaspace::allocated_chunks_bytes() const {
3556   return vsm()->allocated_chunks_bytes() +
3557       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3558 }
3559 
3560 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3561   assert(!SafepointSynchronize::is_at_safepoint()
3562          || Thread::current()->is_VM_thread(), "should be the VM thread");
3563 
3564   if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
3565     record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
3566   }
3567 
3568   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3569 
3570   if (is_class && using_class_space()) {
3571     class_vsm()->deallocate(ptr, word_size);
3572   } else {
3573     vsm()->deallocate(ptr, word_size);
3574   }
3575 }
3576 
3577 
3578 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3579                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3580   if (HAS_PENDING_EXCEPTION) {
3581     assert(false, "Should not allocate with exception pending");
3582     return NULL;  // caller does a CHECK_NULL too
3583   }
3584 
3585   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3586         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3587 
3588   // Allocate in metaspaces without taking out a lock, because it deadlocks
3589   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3590   // to revisit this for application class data sharing.
3591   if (DumpSharedSpaces) {
3592     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3593     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3594     MetaWord* result = space->allocate(word_size, NonClassType);
3595     if (result == NULL) {
3596       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3597     }
3598     if (log_is_enabled(Info, cds)) {
3599       space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
3600     }
3601 
3602     // Zero initialize.
3603     Copy::fill_to_words((HeapWord*)result, word_size, 0);
3604 
3605     return result;
3606   }
3607 
3608   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3609 
3610   // Try to allocate metadata.
3611   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3612 
3613   if (result == NULL) {
3614     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3615 
3616     // Allocation failed.
3617     if (is_init_completed()) {
3618       // Only start a GC if the bootstrapping has completed.
3619 
3620       // Try to clean out some memory and retry.
3621       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3622           loader_data, word_size, mdtype);
3623     }
3624   }
3625 
3626   if (result == NULL) {
3627     SpaceManager* sm;
3628     if (is_class_space_allocation(mdtype)) {
3629       sm = loader_data->metaspace_non_null()->class_vsm();
3630     } else {
3631       sm = loader_data->metaspace_non_null()->vsm();
3632     }
3633 
3634     result = sm->get_small_chunk_and_allocate(word_size);
3635 
3636     if (result == NULL) {
3637       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3638     }
3639   }
3640 
3641   // Zero initialize.
3642   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3643 
3644   return result;
3645 }
3646 
3647 size_t Metaspace::class_chunk_size(size_t word_size) {
3648   assert(using_class_space(), "Has to use class space");
3649   return class_vsm()->calc_chunk_size(word_size);
3650 }
3651 
3652 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3653   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3654 
3655   // If result is still null, we are out of memory.
3656   Log(gc, metaspace, freelist) log;
3657   if (log.is_info()) {
3658     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3659              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3660     ResourceMark rm;
3661     outputStream* out = log.info_stream();
3662     if (loader_data->metaspace_or_null() != NULL) {
3663       loader_data->dump(out);
3664     }
3665     MetaspaceAux::dump(out);
3666   }
3667 
3668   bool out_of_compressed_class_space = false;
3669   if (is_class_space_allocation(mdtype)) {
3670     Metaspace* metaspace = loader_data->metaspace_non_null();
3671     out_of_compressed_class_space =
3672       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3673       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3674       CompressedClassSpaceSize;
3675   }
3676 
3677   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3678   const char* space_string = out_of_compressed_class_space ?
3679     "Compressed class space" : "Metaspace";
3680 
3681   report_java_out_of_memory(space_string);
3682 
3683   if (JvmtiExport::should_post_resource_exhausted()) {
3684     JvmtiExport::post_resource_exhausted(
3685         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3686         space_string);
3687   }
3688 
3689   if (!is_init_completed()) {
3690     vm_exit_during_initialization("OutOfMemoryError", space_string);
3691   }
3692 
3693   if (out_of_compressed_class_space) {
3694     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3695   } else {
3696     THROW_OOP(Universe::out_of_memory_error_metaspace());
3697   }
3698 }
3699 
3700 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3701   switch (mdtype) {
3702     case Metaspace::ClassType: return "Class";
3703     case Metaspace::NonClassType: return "Metadata";
3704     default:
3705       assert(false, "Got bad mdtype: %d", (int) mdtype);
3706       return NULL;
3707   }
3708 }
3709 
3710 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3711   assert(DumpSharedSpaces, "sanity");
3712 
3713   int byte_size = (int)word_size * wordSize;
3714   AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3715 
3716   if (_alloc_record_head == NULL) {
3717     _alloc_record_head = _alloc_record_tail = rec;
3718   } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3719     _alloc_record_tail->_next = rec;
3720     _alloc_record_tail = rec;
3721   } else {
3722     // slow linear search, but this doesn't happen that often, and only when dumping
3723     for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3724       if (old->_ptr == ptr) {
3725         assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3726         int remain_bytes = old->_byte_size - byte_size;
3727         assert(remain_bytes >= 0, "sanity");
3728         old->_type = type;
3729 
3730         if (remain_bytes == 0) {
3731           delete(rec);
3732         } else {
3733           address remain_ptr = address(ptr) + byte_size;
3734           rec->_ptr = remain_ptr;
3735           rec->_byte_size = remain_bytes;
3736           rec->_type = MetaspaceObj::DeallocatedType;
3737           rec->_next = old->_next;
3738           old->_byte_size = byte_size;
3739           old->_next = rec;
3740         }
3741         return;
3742       }
3743     }
3744     assert(0, "reallocating a freed pointer that was not recorded");
3745   }
3746 }
3747 
3748 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3749   assert(DumpSharedSpaces, "sanity");
3750 
3751   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3752     if (rec->_ptr == ptr) {
3753       assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3754       rec->_type = MetaspaceObj::DeallocatedType;
3755       return;
3756     }
3757   }
3758 
3759   assert(0, "deallocating a pointer that was not recorded");
3760 }
3761 
3762 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3763   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3764 
3765   address last_addr = (address)bottom();
3766 
3767   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3768     address ptr = rec->_ptr;
3769     if (last_addr < ptr) {
3770       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3771     }
3772     closure->doit(ptr, rec->_type, rec->_byte_size);
3773     last_addr = ptr + rec->_byte_size;
3774   }
3775 
3776   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3777   if (last_addr < top) {
3778     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3779   }
3780 }
3781 
3782 void Metaspace::purge(MetadataType mdtype) {
3783   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3784 }
3785 
3786 void Metaspace::purge() {
3787   MutexLockerEx cl(SpaceManager::expand_lock(),
3788                    Mutex::_no_safepoint_check_flag);
3789   purge(NonClassType);
3790   if (using_class_space()) {
3791     purge(ClassType);
3792   }
3793 }
3794 
3795 void Metaspace::print_on(outputStream* out) const {
3796   // Print both class virtual space counts and metaspace.
3797   if (Verbose) {
3798     vsm()->print_on(out);
3799     if (using_class_space()) {
3800       class_vsm()->print_on(out);
3801     }
3802   }
3803 }
3804 
3805 bool Metaspace::contains(const void* ptr) {
3806   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3807     return true;
3808   }
3809   return contains_non_shared(ptr);
3810 }
3811 
3812 bool Metaspace::contains_non_shared(const void* ptr) {
3813   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3814      return true;
3815   }
3816 
3817   return get_space_list(NonClassType)->contains(ptr);
3818 }
3819 
3820 void Metaspace::verify() {
3821   vsm()->verify();
3822   if (using_class_space()) {
3823     class_vsm()->verify();
3824   }
3825 }
3826 
3827 void Metaspace::dump(outputStream* const out) const {
3828   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3829   vsm()->dump(out);
3830   if (using_class_space()) {
3831     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3832     class_vsm()->dump(out);
3833   }
3834 }
3835 
3836 /////////////// Unit tests ///////////////
3837 
3838 #ifndef PRODUCT
3839 
3840 class TestMetaspaceAuxTest : AllStatic {
3841  public:
3842   static void test_reserved() {
3843     size_t reserved = MetaspaceAux::reserved_bytes();
3844 
3845     assert(reserved > 0, "assert");
3846 
3847     size_t committed  = MetaspaceAux::committed_bytes();
3848     assert(committed <= reserved, "assert");
3849 
3850     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3851     assert(reserved_metadata > 0, "assert");
3852     assert(reserved_metadata <= reserved, "assert");
3853 
3854     if (UseCompressedClassPointers) {
3855       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3856       assert(reserved_class > 0, "assert");
3857       assert(reserved_class < reserved, "assert");
3858     }
3859   }
3860 
3861   static void test_committed() {
3862     size_t committed = MetaspaceAux::committed_bytes();
3863 
3864     assert(committed > 0, "assert");
3865 
3866     size_t reserved  = MetaspaceAux::reserved_bytes();
3867     assert(committed <= reserved, "assert");
3868 
3869     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3870     assert(committed_metadata > 0, "assert");
3871     assert(committed_metadata <= committed, "assert");
3872 
3873     if (UseCompressedClassPointers) {
3874       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3875       assert(committed_class > 0, "assert");
3876       assert(committed_class < committed, "assert");
3877     }
3878   }
3879 
3880   static void test_virtual_space_list_large_chunk() {
3881     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3882     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3883     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3884     // vm_allocation_granularity aligned on Windows.
3885     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3886     large_size += (os::vm_page_size()/BytesPerWord);
3887     vs_list->get_new_chunk(large_size, 0);
3888   }
3889 
3890   static void test() {
3891     test_reserved();
3892     test_committed();
3893     test_virtual_space_list_large_chunk();
3894   }
3895 };
3896 
3897 void TestMetaspaceAux_test() {
3898   TestMetaspaceAuxTest::test();
3899 }
3900 
3901 class TestVirtualSpaceNodeTest {
3902   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3903                                           size_t& num_small_chunks,
3904                                           size_t& num_specialized_chunks) {
3905     num_medium_chunks = words_left / MediumChunk;
3906     words_left = words_left % MediumChunk;
3907 
3908     num_small_chunks = words_left / SmallChunk;
3909     words_left = words_left % SmallChunk;
3910     // how many specialized chunks can we get?
3911     num_specialized_chunks = words_left / SpecializedChunk;
3912     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3913   }
3914 
3915  public:
3916   static void test() {
3917     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3918     const size_t vsn_test_size_words = MediumChunk  * 4;
3919     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3920 
3921     // The chunk sizes must be multiples of eachother, or this will fail
3922     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3923     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3924 
3925     { // No committed memory in VSN
3926       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3927       VirtualSpaceNode vsn(vsn_test_size_bytes);
3928       vsn.initialize();
3929       vsn.retire(&cm);
3930       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3931     }
3932 
3933     { // All of VSN is committed, half is used by chunks
3934       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3935       VirtualSpaceNode vsn(vsn_test_size_bytes);
3936       vsn.initialize();
3937       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3938       vsn.get_chunk_vs(MediumChunk);
3939       vsn.get_chunk_vs(MediumChunk);
3940       vsn.retire(&cm);
3941       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3942       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3943     }
3944 
3945     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3946     // This doesn't work for systems with vm_page_size >= 16K.
3947     if (page_chunks < MediumChunk) {
3948       // 4 pages of VSN is committed, some is used by chunks
3949       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3950       VirtualSpaceNode vsn(vsn_test_size_bytes);
3951 
3952       vsn.initialize();
3953       vsn.expand_by(page_chunks, page_chunks);
3954       vsn.get_chunk_vs(SmallChunk);
3955       vsn.get_chunk_vs(SpecializedChunk);
3956       vsn.retire(&cm);
3957 
3958       // committed - used = words left to retire
3959       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3960 
3961       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3962       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3963 
3964       assert(num_medium_chunks == 0, "should not get any medium chunks");
3965       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3966       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3967     }
3968 
3969     { // Half of VSN is committed, a humongous chunk is used
3970       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3971       VirtualSpaceNode vsn(vsn_test_size_bytes);
3972       vsn.initialize();
3973       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3974       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3975       vsn.retire(&cm);
3976 
3977       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3978       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3979       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3980 
3981       assert(num_medium_chunks == 0, "should not get any medium chunks");
3982       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3983       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3984     }
3985 
3986   }
3987 
3988 #define assert_is_available_positive(word_size) \
3989   assert(vsn.is_available(word_size), \
3990          #word_size ": " PTR_FORMAT " bytes were not available in " \
3991          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3992          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3993 
3994 #define assert_is_available_negative(word_size) \
3995   assert(!vsn.is_available(word_size), \
3996          #word_size ": " PTR_FORMAT " bytes should not be available in " \
3997          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3998          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3999 
4000   static void test_is_available_positive() {
4001     // Reserve some memory.
4002     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4003     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4004 
4005     // Commit some memory.
4006     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4007     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4008     assert(expanded, "Failed to commit");
4009 
4010     // Check that is_available accepts the committed size.
4011     assert_is_available_positive(commit_word_size);
4012 
4013     // Check that is_available accepts half the committed size.
4014     size_t expand_word_size = commit_word_size / 2;
4015     assert_is_available_positive(expand_word_size);
4016   }
4017 
4018   static void test_is_available_negative() {
4019     // Reserve some memory.
4020     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4021     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4022 
4023     // Commit some memory.
4024     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4025     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4026     assert(expanded, "Failed to commit");
4027 
4028     // Check that is_available doesn't accept a too large size.
4029     size_t two_times_commit_word_size = commit_word_size * 2;
4030     assert_is_available_negative(two_times_commit_word_size);
4031   }
4032 
4033   static void test_is_available_overflow() {
4034     // Reserve some memory.
4035     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4036     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4037 
4038     // Commit some memory.
4039     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4040     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4041     assert(expanded, "Failed to commit");
4042 
4043     // Calculate a size that will overflow the virtual space size.
4044     void* virtual_space_max = (void*)(uintptr_t)-1;
4045     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4046     size_t overflow_size = bottom_to_max + BytesPerWord;
4047     size_t overflow_word_size = overflow_size / BytesPerWord;
4048 
4049     // Check that is_available can handle the overflow.
4050     assert_is_available_negative(overflow_word_size);
4051   }
4052 
4053   static void test_is_available() {
4054     TestVirtualSpaceNodeTest::test_is_available_positive();
4055     TestVirtualSpaceNodeTest::test_is_available_negative();
4056     TestVirtualSpaceNodeTest::test_is_available_overflow();
4057   }
4058 };
4059 
4060 void TestVirtualSpaceNode_test() {
4061   TestVirtualSpaceNodeTest::test();
4062   TestVirtualSpaceNodeTest::test_is_available();
4063 }
4064 
4065 // The following test is placed here instead of a gtest / unittest file
4066 // because the ChunkManager class is only available in this file.
4067 void ChunkManager_test_list_index() {
4068   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4069 
4070   // Test previous bug where a query for a humongous class metachunk,
4071   // incorrectly matched the non-class medium metachunk size.
4072   {
4073     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4074 
4075     ChunkIndex index = manager.list_index(MediumChunk);
4076 
4077     assert(index == HumongousIndex,
4078            "Requested size is larger than ClassMediumChunk,"
4079            " so should return HumongousIndex. Got index: %d", (int)index);
4080   }
4081 
4082   // Check the specified sizes as well.
4083   {
4084     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4085     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4086   }
4087   {
4088     ChunkIndex index = manager.list_index(ClassSmallChunk);
4089     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4090   }
4091   {
4092     ChunkIndex index = manager.list_index(ClassMediumChunk);
4093     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4094   }
4095   {
4096     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4097     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4098   }
4099 }
4100 
4101 #endif // !PRODUCT
4102 
4103 #ifdef ASSERT
4104 
4105 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4106 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4107 // content.
4108 class ChunkManagerReturnTestImpl {
4109 
4110   VirtualSpaceNode _vsn;
4111   ChunkManager _cm;
4112 
4113   // The expected content of the chunk manager.
4114   unsigned _chunks_in_chunkmanager;
4115   size_t _words_in_chunkmanager;
4116 
4117   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4118   static const int num_chunks = 256;
4119   Metachunk* _pool[num_chunks];
4120 
4121   // Helper, return a random position into the chunk pool.
4122   static int get_random_position() {
4123     return os::random() % num_chunks;
4124   }
4125 
4126   // Asserts that ChunkManager counters match expectations.
4127   void assert_counters() {
4128     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4129     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4130     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4131   }
4132 
4133   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4134   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4135   size_t get_random_chunk_size() {
4136     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4137     const int rand = os::random() % 4;
4138     if (rand < 3) {
4139       return sizes[rand];
4140     } else {
4141       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4142       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4143     }
4144   }
4145 
4146   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4147   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4148   int next_matching_chunk(int start, bool is_free) const {
4149     assert(start >= 0 && start < num_chunks, "invalid parameter");
4150     int pos = start;
4151     do {
4152       if (++pos == num_chunks) {
4153         pos = 0;
4154       }
4155       if (_pool[pos]->is_tagged_free() == is_free) {
4156         return pos;
4157       }
4158     } while (pos != start);
4159     return -1;
4160   }
4161 
4162   // A structure to keep information about a chunk list including which
4163   // chunks are part of this list. This is needed to keep information about a chunk list
4164   // we will to return to the ChunkManager, because the original list will be destroyed.
4165   struct AChunkList {
4166     Metachunk* head;
4167     Metachunk* all[num_chunks];
4168     size_t size;
4169     int num;
4170     ChunkIndex index;
4171   };
4172 
4173   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4174   // a random chunk list of max. length <list_size> of chunks with the same
4175   // ChunkIndex (chunk size).
4176   // Returns false if list cannot be assembled. List is returned in the <out>
4177   // structure. Returned list may be smaller than <list_size>.
4178   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4179     // Choose a random in-use chunk from the pool...
4180     const int headpos = next_matching_chunk(get_random_position(), false);
4181     if (headpos == -1) {
4182       return false;
4183     }
4184     Metachunk* const head = _pool[headpos];
4185     out->all[0] = head;
4186     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4187     // ..then go from there, chain it up with up to list_size - 1 number of other
4188     // in-use chunks of the same index.
4189     const ChunkIndex index = _cm.list_index(head->word_size());
4190     int num_added = 1;
4191     size_t size_added = head->word_size();
4192     int pos = headpos;
4193     Metachunk* tail = head;
4194     do {
4195       pos = next_matching_chunk(pos, false);
4196       if (pos != headpos) {
4197         Metachunk* c = _pool[pos];
4198         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4199         if (index == _cm.list_index(c->word_size())) {
4200           tail->set_next(c);
4201           c->set_prev(tail);
4202           tail = c;
4203           out->all[num_added] = c;
4204           num_added ++;
4205           size_added += c->word_size();
4206         }
4207       }
4208     } while (num_added < list_size && pos != headpos);
4209     out->head = head;
4210     out->index = index;
4211     out->size = size_added;
4212     out->num = num_added;
4213     return true;
4214   }
4215 
4216   // Take a single random chunk from the ChunkManager.
4217   bool take_single_random_chunk_from_chunkmanager() {
4218     assert_counters();
4219     _cm.locked_verify();
4220     int pos = next_matching_chunk(get_random_position(), true);
4221     if (pos == -1) {
4222       return false;
4223     }
4224     Metachunk* c = _pool[pos];
4225     assert(c->is_tagged_free(), "Chunk state mismatch");
4226     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4227     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4228     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4229     // normal chunk allocation.
4230     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4231     assert(c2 != NULL, "Unexpected.");
4232     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4233     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4234     _chunks_in_chunkmanager --;
4235     _words_in_chunkmanager -= c->word_size();
4236     assert_counters();
4237     _cm.locked_verify();
4238     return true;
4239   }
4240 
4241   // Returns a single random chunk to the chunk manager. Returns false if that
4242   // was not possible (all chunks are already in the chunk manager).
4243   bool return_single_random_chunk_to_chunkmanager() {
4244     assert_counters();
4245     _cm.locked_verify();
4246     int pos = next_matching_chunk(get_random_position(), false);
4247     if (pos == -1) {
4248       return false;
4249     }
4250     Metachunk* c = _pool[pos];
4251     assert(c->is_tagged_free() == false, "wrong chunk information");
4252     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4253     _chunks_in_chunkmanager ++;
4254     _words_in_chunkmanager += c->word_size();
4255     assert(c->is_tagged_free() == true, "wrong chunk information");
4256     assert_counters();
4257     _cm.locked_verify();
4258     return true;
4259   }
4260 
4261   // Return a random chunk list to the chunk manager. Returns the length of the
4262   // returned list.
4263   int return_random_chunk_list_to_chunkmanager(int list_size) {
4264     assert_counters();
4265     _cm.locked_verify();
4266     AChunkList aChunkList;
4267     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4268       return 0;
4269     }
4270     // Before returning chunks are returned, they should be tagged in use.
4271     for (int i = 0; i < aChunkList.num; i ++) {
4272       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4273     }
4274     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4275     _chunks_in_chunkmanager += aChunkList.num;
4276     _words_in_chunkmanager += aChunkList.size;
4277     // After all chunks are returned, check that they are now tagged free.
4278     for (int i = 0; i < aChunkList.num; i ++) {
4279       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4280     }
4281     assert_counters();
4282     _cm.locked_verify();
4283     return aChunkList.num;
4284   }
4285 
4286 public:
4287 
4288   ChunkManagerReturnTestImpl()
4289     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4290     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4291     , _chunks_in_chunkmanager(0)
4292     , _words_in_chunkmanager(0)
4293   {
4294     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4295     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4296     // "in use", because not yet added to any chunk manager.
4297     _vsn.initialize();
4298     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4299     for (int i = 0; i < num_chunks; i ++) {
4300       const size_t size = get_random_chunk_size();
4301       _pool[i] = _vsn.get_chunk_vs(size);
4302       assert(_pool[i] != NULL, "allocation failed");
4303     }
4304     assert_counters();
4305     _cm.locked_verify();
4306   }
4307 
4308   // Test entry point.
4309   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4310   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4311   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4312   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4313   // chunks manager, thereby emptying or filling it completely.
4314   void do_test(float phase_length_factor) {
4315     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4316     assert_counters();
4317     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4318     const int num_max_ops = num_chunks * 100;
4319     int num_ops = num_max_ops;
4320     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4321     int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4322     bool return_phase = true;
4323     while (num_ops > 0) {
4324       int chunks_moved = 0;
4325       if (return_phase) {
4326         // Randomly switch between returning a single chunk or a random length chunk list.
4327         if (os::random() % 2 == 0) {
4328           if (return_single_random_chunk_to_chunkmanager()) {
4329             chunks_moved = 1;
4330           }
4331         } else {
4332           const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4333           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4334         }
4335       } else {
4336         // Breath out.
4337         if (take_single_random_chunk_from_chunkmanager()) {
4338           chunks_moved = 1;
4339         }
4340       }
4341       num_ops -= chunks_moved;
4342       num_ops_until_switch -= chunks_moved;
4343       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4344         return_phase = !return_phase;
4345         num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4346       }
4347     }
4348   }
4349 };
4350 
4351 void* setup_chunkmanager_returntests() {
4352   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4353   return p;
4354 }
4355 
4356 void teardown_chunkmanager_returntests(void* p) {
4357   delete (ChunkManagerReturnTestImpl*) p;
4358 }
4359 
4360 void run_chunkmanager_returntests(void* p, float phase_length) {
4361   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4362   test->do_test(phase_length);
4363 }
4364 
4365 // The following test is placed here instead of a gtest / unittest file
4366 // because the ChunkManager class is only available in this file.
4367 class SpaceManagerTest : AllStatic {
4368   friend void SpaceManager_test_adjust_initial_chunk_size();
4369 
4370   static void test_adjust_initial_chunk_size(bool is_class) {
4371     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4372     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4373     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4374 
4375 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4376     do {                                                                         \
4377       size_t v = value;                                                          \
4378       size_t e = expected;                                                       \
4379       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4380              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4381     } while (0)
4382 
4383     // Smallest (specialized)
4384     test_adjust_initial_chunk_size(1,            smallest, is_class);
4385     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4386     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4387 
4388     // Small
4389     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4390     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4391     test_adjust_initial_chunk_size(normal,       normal, is_class);
4392 
4393     // Medium
4394     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4395     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4396     test_adjust_initial_chunk_size(medium,     medium, is_class);
4397 
4398     // Humongous
4399     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4400 
4401 #undef test_adjust_initial_chunk_size
4402   }
4403 
4404   static void test_adjust_initial_chunk_size() {
4405     test_adjust_initial_chunk_size(false);
4406     test_adjust_initial_chunk_size(true);
4407   }
4408 };
4409 
4410 void SpaceManager_test_adjust_initial_chunk_size() {
4411   SpaceManagerTest::test_adjust_initial_chunk_size();
4412 }
4413 
4414 #endif // ASSERT