1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = false;
  60 
  61 size_t const allocation_from_dictionary_limit = 4 * K;
  62 
  63 MetaWord* last_allocated = 0;
  64 
  65 size_t Metaspace::_compressed_class_space_size;
  66 const MetaspaceTracer* Metaspace::_tracer = NULL;
  67 
  68 // Used in declarations in SpaceManager and ChunkManager
  69 enum ChunkIndex {
  70   ZeroIndex = 0,
  71   SpecializedIndex = ZeroIndex,
  72   SmallIndex = SpecializedIndex + 1,
  73   MediumIndex = SmallIndex + 1,
  74   HumongousIndex = MediumIndex + 1,
  75   NumberOfFreeLists = 3,
  76   NumberOfInUseLists = 4
  77 };
  78 
  79 // Helper, returns a descriptive name for the given index.
  80 static const char* chunk_size_name(ChunkIndex index) {
  81   switch (index) {
  82     case SpecializedIndex:
  83       return "specialized";
  84     case SmallIndex:
  85       return "small";
  86     case MediumIndex:
  87       return "medium";
  88     case HumongousIndex:
  89       return "humongous";
  90     default:
  91       return "Invalid index";
  92   }
  93 }
  94 
  95 enum ChunkSizes {    // in words.
  96   ClassSpecializedChunk = 128,
  97   SpecializedChunk = 128,
  98   ClassSmallChunk = 256,
  99   SmallChunk = 512,
 100   ClassMediumChunk = 4 * K,
 101   MediumChunk = 8 * K
 102 };
 103 
 104 static ChunkIndex next_chunk_index(ChunkIndex i) {
 105   assert(i < NumberOfInUseLists, "Out of bound");
 106   return (ChunkIndex) (i+1);
 107 }
 108 
 109 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 110 uint MetaspaceGC::_shrink_factor = 0;
 111 bool MetaspaceGC::_should_concurrent_collect = false;
 112 
 113 typedef class FreeList<Metachunk> ChunkList;
 114 
 115 // Manages the global free lists of chunks.
 116 class ChunkManager : public CHeapObj<mtInternal> {
 117   friend class TestVirtualSpaceNodeTest;
 118 
 119   // Free list of chunks of different sizes.
 120   //   SpecializedChunk
 121   //   SmallChunk
 122   //   MediumChunk
 123   ChunkList _free_chunks[NumberOfFreeLists];
 124 
 125   // Return non-humongous chunk list by its index.
 126   ChunkList* free_chunks(ChunkIndex index);
 127 
 128   // Returns non-humongous chunk list for the given chunk word size.
 129   ChunkList* find_free_chunks_list(size_t word_size);
 130 
 131   //   HumongousChunk
 132   ChunkTreeDictionary _humongous_dictionary;
 133 
 134   // Returns the humongous chunk dictionary.
 135   ChunkTreeDictionary* humongous_dictionary() {
 136     return &_humongous_dictionary;
 137   }
 138 
 139   // Size, in metaspace words, of all chunks managed by this ChunkManager
 140   size_t _free_chunks_total;
 141   // Number of chunks in this ChunkManager
 142   size_t _free_chunks_count;
 143 
 144   // Update counters after a chunk was added or removed removed.
 145   void account_for_added_chunk(const Metachunk* c);
 146   void account_for_removed_chunk(const Metachunk* c);
 147 
 148   // Debug support
 149 
 150   size_t sum_free_chunks();
 151   size_t sum_free_chunks_count();
 152 
 153   void locked_verify_free_chunks_total();
 154   void slow_locked_verify_free_chunks_total() {
 155     if (metaspace_slow_verify) {
 156       locked_verify_free_chunks_total();
 157     }
 158   }
 159   void locked_verify_free_chunks_count();
 160   void slow_locked_verify_free_chunks_count() {
 161     if (metaspace_slow_verify) {
 162       locked_verify_free_chunks_count();
 163     }
 164   }
 165   void verify_free_chunks_count();
 166 
 167  public:
 168 
 169   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 170       : _free_chunks_total(0), _free_chunks_count(0) {
 171     _free_chunks[SpecializedIndex].set_size(specialized_size);
 172     _free_chunks[SmallIndex].set_size(small_size);
 173     _free_chunks[MediumIndex].set_size(medium_size);
 174   }
 175 
 176   // add or delete (return) a chunk to the global freelist.
 177   Metachunk* chunk_freelist_allocate(size_t word_size);
 178 
 179   // Map a size to a list index assuming that there are lists
 180   // for special, small, medium, and humongous chunks.
 181   ChunkIndex list_index(size_t size);
 182 
 183   // Map a given index to the chunk size.
 184   size_t size_by_index(ChunkIndex index);
 185 
 186   // Take a chunk from the ChunkManager. The chunk is expected to be in
 187   // the chunk manager (the freelist if non-humongous, the dictionary if
 188   // humongous).
 189   void remove_chunk(Metachunk* chunk);
 190 
 191   // Return a single chunk of type index to the ChunkManager.
 192   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 193 
 194   // Add the simple linked list of chunks to the freelist of chunks
 195   // of type index.
 196   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 197 
 198   // Total of the space in the free chunks list
 199   size_t free_chunks_total_words();
 200   size_t free_chunks_total_bytes();
 201 
 202   // Number of chunks in the free chunks list
 203   size_t free_chunks_count();
 204 
 205   // Remove from a list by size.  Selects list based on size of chunk.
 206   Metachunk* free_chunks_get(size_t chunk_word_size);
 207 
 208 #define index_bounds_check(index)                                         \
 209   assert(index == SpecializedIndex ||                                     \
 210          index == SmallIndex ||                                           \
 211          index == MediumIndex ||                                          \
 212          index == HumongousIndex, "Bad index: %d", (int) index)
 213 
 214   size_t num_free_chunks(ChunkIndex index) const {
 215     index_bounds_check(index);
 216 
 217     if (index == HumongousIndex) {
 218       return _humongous_dictionary.total_free_blocks();
 219     }
 220 
 221     ssize_t count = _free_chunks[index].count();
 222     return count == -1 ? 0 : (size_t) count;
 223   }
 224 
 225   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 226     index_bounds_check(index);
 227 
 228     size_t word_size = 0;
 229     if (index == HumongousIndex) {
 230       word_size = _humongous_dictionary.total_size();
 231     } else {
 232       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 233       word_size = size_per_chunk_in_words * num_free_chunks(index);
 234     }
 235 
 236     return word_size * BytesPerWord;
 237   }
 238 
 239   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 240     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 241                                          num_free_chunks(SmallIndex),
 242                                          num_free_chunks(MediumIndex),
 243                                          num_free_chunks(HumongousIndex),
 244                                          size_free_chunks_in_bytes(SpecializedIndex),
 245                                          size_free_chunks_in_bytes(SmallIndex),
 246                                          size_free_chunks_in_bytes(MediumIndex),
 247                                          size_free_chunks_in_bytes(HumongousIndex));
 248   }
 249 
 250   // Debug support
 251   void verify();
 252   void slow_verify() {
 253     if (metaspace_slow_verify) {
 254       verify();
 255     }
 256   }
 257   void locked_verify();
 258   void slow_locked_verify() {
 259     if (metaspace_slow_verify) {
 260       locked_verify();
 261     }
 262   }
 263   void verify_free_chunks_total();
 264 
 265   void locked_print_free_chunks(outputStream* st);
 266   void locked_print_sum_free_chunks(outputStream* st);
 267 
 268   void print_on(outputStream* st) const;
 269 };
 270 
 271 class SmallBlocks : public CHeapObj<mtClass> {
 272   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 273   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 274 
 275  private:
 276   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 277 
 278   FreeList<Metablock>& list_at(size_t word_size) {
 279     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 280     return _small_lists[word_size - _small_block_min_size];
 281   }
 282 
 283  public:
 284   SmallBlocks() {
 285     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 286       uint k = i - _small_block_min_size;
 287       _small_lists[k].set_size(i);
 288     }
 289   }
 290 
 291   size_t total_size() const {
 292     size_t result = 0;
 293     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 294       uint k = i - _small_block_min_size;
 295       result = result + _small_lists[k].count() * _small_lists[k].size();
 296     }
 297     return result;
 298   }
 299 
 300   static uint small_block_max_size() { return _small_block_max_size; }
 301   static uint small_block_min_size() { return _small_block_min_size; }
 302 
 303   MetaWord* get_block(size_t word_size) {
 304     if (list_at(word_size).count() > 0) {
 305       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 306       return new_block;
 307     } else {
 308       return NULL;
 309     }
 310   }
 311   void return_block(Metablock* free_chunk, size_t word_size) {
 312     list_at(word_size).return_chunk_at_head(free_chunk, false);
 313     assert(list_at(word_size).count() > 0, "Should have a chunk");
 314   }
 315 
 316   void print_on(outputStream* st) const {
 317     st->print_cr("SmallBlocks:");
 318     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 319       uint k = i - _small_block_min_size;
 320       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 321     }
 322   }
 323 };
 324 
 325 // Used to manage the free list of Metablocks (a block corresponds
 326 // to the allocation of a quantum of metadata).
 327 class BlockFreelist : public CHeapObj<mtClass> {
 328   BlockTreeDictionary* const _dictionary;
 329   SmallBlocks* _small_blocks;
 330 
 331   // Only allocate and split from freelist if the size of the allocation
 332   // is at least 1/4th the size of the available block.
 333   const static int WasteMultiplier = 4;
 334 
 335   // Accessors
 336   BlockTreeDictionary* dictionary() const { return _dictionary; }
 337   SmallBlocks* small_blocks() {
 338     if (_small_blocks == NULL) {
 339       _small_blocks = new SmallBlocks();
 340     }
 341     return _small_blocks;
 342   }
 343 
 344  public:
 345   BlockFreelist();
 346   ~BlockFreelist();
 347 
 348   // Get and return a block to the free list
 349   MetaWord* get_block(size_t word_size);
 350   void return_block(MetaWord* p, size_t word_size);
 351 
 352   size_t total_size() const  {
 353     size_t result = dictionary()->total_size();
 354     if (_small_blocks != NULL) {
 355       result = result + _small_blocks->total_size();
 356     }
 357     return result;
 358   }
 359 
 360   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 361   void print_on(outputStream* st) const;
 362 };
 363 
 364 // A VirtualSpaceList node.
 365 class VirtualSpaceNode : public CHeapObj<mtClass> {
 366   friend class VirtualSpaceList;
 367 
 368   // Link to next VirtualSpaceNode
 369   VirtualSpaceNode* _next;
 370 
 371   // total in the VirtualSpace
 372   MemRegion _reserved;
 373   ReservedSpace _rs;
 374   VirtualSpace _virtual_space;
 375   MetaWord* _top;
 376   // count of chunks contained in this VirtualSpace
 377   uintx _container_count;
 378 
 379   // Convenience functions to access the _virtual_space
 380   char* low()  const { return virtual_space()->low(); }
 381   char* high() const { return virtual_space()->high(); }
 382 
 383   // The first Metachunk will be allocated at the bottom of the
 384   // VirtualSpace
 385   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 386 
 387   // Committed but unused space in the virtual space
 388   size_t free_words_in_vs() const;
 389  public:
 390 
 391   VirtualSpaceNode(size_t byte_size);
 392   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 393   ~VirtualSpaceNode();
 394 
 395   // Convenience functions for logical bottom and end
 396   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 397   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 398 
 399   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 400 
 401   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 402   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 403 
 404   bool is_pre_committed() const { return _virtual_space.special(); }
 405 
 406   // address of next available space in _virtual_space;
 407   // Accessors
 408   VirtualSpaceNode* next() { return _next; }
 409   void set_next(VirtualSpaceNode* v) { _next = v; }
 410 
 411   void set_reserved(MemRegion const v) { _reserved = v; }
 412   void set_top(MetaWord* v) { _top = v; }
 413 
 414   // Accessors
 415   MemRegion* reserved() { return &_reserved; }
 416   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 417 
 418   // Returns true if "word_size" is available in the VirtualSpace
 419   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 420 
 421   MetaWord* top() const { return _top; }
 422   void inc_top(size_t word_size) { _top += word_size; }
 423 
 424   uintx container_count() { return _container_count; }
 425   void inc_container_count();
 426   void dec_container_count();
 427 #ifdef ASSERT
 428   uintx container_count_slow();
 429   void verify_container_count();
 430 #endif
 431 
 432   // used and capacity in this single entry in the list
 433   size_t used_words_in_vs() const;
 434   size_t capacity_words_in_vs() const;
 435 
 436   bool initialize();
 437 
 438   // get space from the virtual space
 439   Metachunk* take_from_committed(size_t chunk_word_size);
 440 
 441   // Allocate a chunk from the virtual space and return it.
 442   Metachunk* get_chunk_vs(size_t chunk_word_size);
 443 
 444   // Expands/shrinks the committed space in a virtual space.  Delegates
 445   // to Virtualspace
 446   bool expand_by(size_t min_words, size_t preferred_words);
 447 
 448   // In preparation for deleting this node, remove all the chunks
 449   // in the node from any freelist.
 450   void purge(ChunkManager* chunk_manager);
 451 
 452   // If an allocation doesn't fit in the current node a new node is created.
 453   // Allocate chunks out of the remaining committed space in this node
 454   // to avoid wasting that memory.
 455   // This always adds up because all the chunk sizes are multiples of
 456   // the smallest chunk size.
 457   void retire(ChunkManager* chunk_manager);
 458 
 459 #ifdef ASSERT
 460   // Debug support
 461   void mangle();
 462 #endif
 463 
 464   void print_on(outputStream* st) const;
 465 };
 466 
 467 #define assert_is_aligned(value, alignment)                  \
 468   assert(is_aligned((value), (alignment)),                   \
 469          SIZE_FORMAT_HEX " is not aligned to "               \
 470          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 471 
 472 // Decide if large pages should be committed when the memory is reserved.
 473 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 474   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 475     size_t words = bytes / BytesPerWord;
 476     bool is_class = false; // We never reserve large pages for the class space.
 477     if (MetaspaceGC::can_expand(words, is_class) &&
 478         MetaspaceGC::allowed_expansion() >= words) {
 479       return true;
 480     }
 481   }
 482 
 483   return false;
 484 }
 485 
 486   // byte_size is the size of the associated virtualspace.
 487 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 488   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 489 
 490 #if INCLUDE_CDS
 491   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 492   // configurable address, generally at the top of the Java heap so other
 493   // memory addresses don't conflict.
 494   if (DumpSharedSpaces) {
 495     bool large_pages = false; // No large pages when dumping the CDS archive.
 496     char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 497 
 498     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 499     if (_rs.is_reserved()) {
 500       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 501     } else {
 502       // Get a mmap region anywhere if the SharedBaseAddress fails.
 503       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 504     }
 505     if (!_rs.is_reserved()) {
 506       vm_exit_during_initialization("Unable to allocate memory for shared space",
 507         err_msg(SIZE_FORMAT " bytes.", bytes));
 508     }
 509     MetaspaceShared::initialize_shared_rs(&_rs);
 510   } else
 511 #endif
 512   {
 513     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 514 
 515     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 516   }
 517 
 518   if (_rs.is_reserved()) {
 519     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 520     assert(_rs.size() != 0, "Catch if we get a 0 size");
 521     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 522     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 523 
 524     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 525   }
 526 }
 527 
 528 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 529   Metachunk* chunk = first_chunk();
 530   Metachunk* invalid_chunk = (Metachunk*) top();
 531   while (chunk < invalid_chunk ) {
 532     assert(chunk->is_tagged_free(), "Should be tagged free");
 533     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 534     chunk_manager->remove_chunk(chunk);
 535     assert(chunk->next() == NULL &&
 536            chunk->prev() == NULL,
 537            "Was not removed from its list");
 538     chunk = (Metachunk*) next;
 539   }
 540 }
 541 
 542 #ifdef ASSERT
 543 uintx VirtualSpaceNode::container_count_slow() {
 544   uintx count = 0;
 545   Metachunk* chunk = first_chunk();
 546   Metachunk* invalid_chunk = (Metachunk*) top();
 547   while (chunk < invalid_chunk ) {
 548     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 549     // Don't count the chunks on the free lists.  Those are
 550     // still part of the VirtualSpaceNode but not currently
 551     // counted.
 552     if (!chunk->is_tagged_free()) {
 553       count++;
 554     }
 555     chunk = (Metachunk*) next;
 556   }
 557   return count;
 558 }
 559 #endif
 560 
 561 // List of VirtualSpaces for metadata allocation.
 562 class VirtualSpaceList : public CHeapObj<mtClass> {
 563   friend class VirtualSpaceNode;
 564 
 565   enum VirtualSpaceSizes {
 566     VirtualSpaceSize = 256 * K
 567   };
 568 
 569   // Head of the list
 570   VirtualSpaceNode* _virtual_space_list;
 571   // virtual space currently being used for allocations
 572   VirtualSpaceNode* _current_virtual_space;
 573 
 574   // Is this VirtualSpaceList used for the compressed class space
 575   bool _is_class;
 576 
 577   // Sum of reserved and committed memory in the virtual spaces
 578   size_t _reserved_words;
 579   size_t _committed_words;
 580 
 581   // Number of virtual spaces
 582   size_t _virtual_space_count;
 583 
 584   ~VirtualSpaceList();
 585 
 586   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 587 
 588   void set_virtual_space_list(VirtualSpaceNode* v) {
 589     _virtual_space_list = v;
 590   }
 591   void set_current_virtual_space(VirtualSpaceNode* v) {
 592     _current_virtual_space = v;
 593   }
 594 
 595   void link_vs(VirtualSpaceNode* new_entry);
 596 
 597   // Get another virtual space and add it to the list.  This
 598   // is typically prompted by a failed attempt to allocate a chunk
 599   // and is typically followed by the allocation of a chunk.
 600   bool create_new_virtual_space(size_t vs_word_size);
 601 
 602   // Chunk up the unused committed space in the current
 603   // virtual space and add the chunks to the free list.
 604   void retire_current_virtual_space();
 605 
 606  public:
 607   VirtualSpaceList(size_t word_size);
 608   VirtualSpaceList(ReservedSpace rs);
 609 
 610   size_t free_bytes();
 611 
 612   Metachunk* get_new_chunk(size_t chunk_word_size,
 613                            size_t suggested_commit_granularity);
 614 
 615   bool expand_node_by(VirtualSpaceNode* node,
 616                       size_t min_words,
 617                       size_t preferred_words);
 618 
 619   bool expand_by(size_t min_words,
 620                  size_t preferred_words);
 621 
 622   VirtualSpaceNode* current_virtual_space() {
 623     return _current_virtual_space;
 624   }
 625 
 626   bool is_class() const { return _is_class; }
 627 
 628   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 629 
 630   size_t reserved_words()  { return _reserved_words; }
 631   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 632   size_t committed_words() { return _committed_words; }
 633   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 634 
 635   void inc_reserved_words(size_t v);
 636   void dec_reserved_words(size_t v);
 637   void inc_committed_words(size_t v);
 638   void dec_committed_words(size_t v);
 639   void inc_virtual_space_count();
 640   void dec_virtual_space_count();
 641 
 642   bool contains(const void* ptr);
 643 
 644   // Unlink empty VirtualSpaceNodes and free it.
 645   void purge(ChunkManager* chunk_manager);
 646 
 647   void print_on(outputStream* st) const;
 648 
 649   class VirtualSpaceListIterator : public StackObj {
 650     VirtualSpaceNode* _virtual_spaces;
 651    public:
 652     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 653       _virtual_spaces(virtual_spaces) {}
 654 
 655     bool repeat() {
 656       return _virtual_spaces != NULL;
 657     }
 658 
 659     VirtualSpaceNode* get_next() {
 660       VirtualSpaceNode* result = _virtual_spaces;
 661       if (_virtual_spaces != NULL) {
 662         _virtual_spaces = _virtual_spaces->next();
 663       }
 664       return result;
 665     }
 666   };
 667 };
 668 
 669 class Metadebug : AllStatic {
 670   // Debugging support for Metaspaces
 671   static int _allocation_fail_alot_count;
 672 
 673  public:
 674 
 675   static void init_allocation_fail_alot_count();
 676 #ifdef ASSERT
 677   static bool test_metadata_failure();
 678 #endif
 679 };
 680 
 681 int Metadebug::_allocation_fail_alot_count = 0;
 682 
 683 //  SpaceManager - used by Metaspace to handle allocations
 684 class SpaceManager : public CHeapObj<mtClass> {
 685   friend class Metaspace;
 686   friend class Metadebug;
 687 
 688  private:
 689 
 690   // protects allocations
 691   Mutex* const _lock;
 692 
 693   // Type of metadata allocated.
 694   Metaspace::MetadataType _mdtype;
 695 
 696   // List of chunks in use by this SpaceManager.  Allocations
 697   // are done from the current chunk.  The list is used for deallocating
 698   // chunks when the SpaceManager is freed.
 699   Metachunk* _chunks_in_use[NumberOfInUseLists];
 700   Metachunk* _current_chunk;
 701 
 702   // Maximum number of small chunks to allocate to a SpaceManager
 703   static uint const _small_chunk_limit;
 704 
 705   // Sum of all space in allocated chunks
 706   size_t _allocated_blocks_words;
 707 
 708   // Sum of all allocated chunks
 709   size_t _allocated_chunks_words;
 710   size_t _allocated_chunks_count;
 711 
 712   // Free lists of blocks are per SpaceManager since they
 713   // are assumed to be in chunks in use by the SpaceManager
 714   // and all chunks in use by a SpaceManager are freed when
 715   // the class loader using the SpaceManager is collected.
 716   BlockFreelist* _block_freelists;
 717 
 718   // protects virtualspace and chunk expansions
 719   static const char*  _expand_lock_name;
 720   static const int    _expand_lock_rank;
 721   static Mutex* const _expand_lock;
 722 
 723  private:
 724   // Accessors
 725   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 726   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 727     _chunks_in_use[index] = v;
 728   }
 729 
 730   BlockFreelist* block_freelists() const { return _block_freelists; }
 731 
 732   Metaspace::MetadataType mdtype() { return _mdtype; }
 733 
 734   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 735   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 736 
 737   Metachunk* current_chunk() const { return _current_chunk; }
 738   void set_current_chunk(Metachunk* v) {
 739     _current_chunk = v;
 740   }
 741 
 742   Metachunk* find_current_chunk(size_t word_size);
 743 
 744   // Add chunk to the list of chunks in use
 745   void add_chunk(Metachunk* v, bool make_current);
 746   void retire_current_chunk();
 747 
 748   Mutex* lock() const { return _lock; }
 749 
 750  protected:
 751   void initialize();
 752 
 753  public:
 754   SpaceManager(Metaspace::MetadataType mdtype,
 755                Mutex* lock);
 756   ~SpaceManager();
 757 
 758   enum ChunkMultiples {
 759     MediumChunkMultiple = 4
 760   };
 761 
 762   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 763   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 764   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 765 
 766   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 767 
 768   // Accessors
 769   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 770 
 771   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 772   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 773   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 774 
 775   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 776 
 777   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 778 
 779   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 780   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 781   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 782   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 783   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 784 
 785   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 786 
 787   static Mutex* expand_lock() { return _expand_lock; }
 788 
 789   // Increment the per Metaspace and global running sums for Metachunks
 790   // by the given size.  This is used when a Metachunk to added to
 791   // the in-use list.
 792   void inc_size_metrics(size_t words);
 793   // Increment the per Metaspace and global running sums Metablocks by the given
 794   // size.  This is used when a Metablock is allocated.
 795   void inc_used_metrics(size_t words);
 796   // Delete the portion of the running sums for this SpaceManager. That is,
 797   // the globals running sums for the Metachunks and Metablocks are
 798   // decremented for all the Metachunks in-use by this SpaceManager.
 799   void dec_total_from_size_metrics();
 800 
 801   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 802   // or return the unadjusted size if the requested size is humongous.
 803   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 804   size_t adjust_initial_chunk_size(size_t requested) const;
 805 
 806   // Get the initial chunks size for this metaspace type.
 807   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 808 
 809   size_t sum_capacity_in_chunks_in_use() const;
 810   size_t sum_used_in_chunks_in_use() const;
 811   size_t sum_free_in_chunks_in_use() const;
 812   size_t sum_waste_in_chunks_in_use() const;
 813   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 814 
 815   size_t sum_count_in_chunks_in_use();
 816   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 817 
 818   Metachunk* get_new_chunk(size_t chunk_word_size);
 819 
 820   // Block allocation and deallocation.
 821   // Allocates a block from the current chunk
 822   MetaWord* allocate(size_t word_size);
 823   // Allocates a block from a small chunk
 824   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 825 
 826   // Helper for allocations
 827   MetaWord* allocate_work(size_t word_size);
 828 
 829   // Returns a block to the per manager freelist
 830   void deallocate(MetaWord* p, size_t word_size);
 831 
 832   // Based on the allocation size and a minimum chunk size,
 833   // returned chunk size (for expanding space for chunk allocation).
 834   size_t calc_chunk_size(size_t allocation_word_size);
 835 
 836   // Called when an allocation from the current chunk fails.
 837   // Gets a new chunk (may require getting a new virtual space),
 838   // and allocates from that chunk.
 839   MetaWord* grow_and_allocate(size_t word_size);
 840 
 841   // Notify memory usage to MemoryService.
 842   void track_metaspace_memory_usage();
 843 
 844   // debugging support.
 845 
 846   void dump(outputStream* const out) const;
 847   void print_on(outputStream* st) const;
 848   void locked_print_chunks_in_use_on(outputStream* st) const;
 849 
 850   void verify();
 851   void verify_chunk_size(Metachunk* chunk);
 852 #ifdef ASSERT
 853   void verify_allocated_blocks_words();
 854 #endif
 855 
 856   // This adjusts the size given to be greater than the minimum allocation size in
 857   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 858   size_t get_allocation_word_size(size_t word_size) {
 859     size_t byte_size = word_size * BytesPerWord;
 860 
 861     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 862     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 863 
 864     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 865     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 866 
 867     return raw_word_size;
 868   }
 869 };
 870 
 871 uint const SpaceManager::_small_chunk_limit = 4;
 872 
 873 const char* SpaceManager::_expand_lock_name =
 874   "SpaceManager chunk allocation lock";
 875 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 876 Mutex* const SpaceManager::_expand_lock =
 877   new Mutex(SpaceManager::_expand_lock_rank,
 878             SpaceManager::_expand_lock_name,
 879             Mutex::_allow_vm_block_flag,
 880             Monitor::_safepoint_check_never);
 881 
 882 void VirtualSpaceNode::inc_container_count() {
 883   assert_lock_strong(SpaceManager::expand_lock());
 884   _container_count++;
 885 }
 886 
 887 void VirtualSpaceNode::dec_container_count() {
 888   assert_lock_strong(SpaceManager::expand_lock());
 889   _container_count--;
 890 }
 891 
 892 #ifdef ASSERT
 893 void VirtualSpaceNode::verify_container_count() {
 894   assert(_container_count == container_count_slow(),
 895          "Inconsistency in container_count _container_count " UINTX_FORMAT
 896          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 897 }
 898 #endif
 899 
 900 // BlockFreelist methods
 901 
 902 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 903 
 904 BlockFreelist::~BlockFreelist() {
 905   delete _dictionary;
 906   if (_small_blocks != NULL) {
 907     delete _small_blocks;
 908   }
 909 }
 910 
 911 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 912   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
 913 
 914   Metablock* free_chunk = ::new (p) Metablock(word_size);
 915   if (word_size < SmallBlocks::small_block_max_size()) {
 916     small_blocks()->return_block(free_chunk, word_size);
 917   } else {
 918   dictionary()->return_chunk(free_chunk);
 919 }
 920   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
 921             SIZE_FORMAT, p2i(free_chunk), word_size);
 922 }
 923 
 924 MetaWord* BlockFreelist::get_block(size_t word_size) {
 925   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
 926 
 927   // Try small_blocks first.
 928   if (word_size < SmallBlocks::small_block_max_size()) {
 929     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
 930     // this space manager.
 931     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
 932     if (new_block != NULL) {
 933       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 934               p2i(new_block), word_size);
 935       return new_block;
 936     }
 937   }
 938 
 939   if (word_size < BlockFreelist::min_dictionary_size()) {
 940     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
 941     return NULL;
 942   }
 943 
 944   Metablock* free_block = dictionary()->get_chunk(word_size);
 945   if (free_block == NULL) {
 946     return NULL;
 947   }
 948 
 949   const size_t block_size = free_block->size();
 950   if (block_size > WasteMultiplier * word_size) {
 951     return_block((MetaWord*)free_block, block_size);
 952     return NULL;
 953   }
 954 
 955   MetaWord* new_block = (MetaWord*)free_block;
 956   assert(block_size >= word_size, "Incorrect size of block from freelist");
 957   const size_t unused = block_size - word_size;
 958   if (unused >= SmallBlocks::small_block_min_size()) {
 959     return_block(new_block + word_size, unused);
 960   }
 961 
 962   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 963             p2i(new_block), word_size);
 964   return new_block;
 965 }
 966 
 967 void BlockFreelist::print_on(outputStream* st) const {
 968   dictionary()->print_free_lists(st);
 969   if (_small_blocks != NULL) {
 970     _small_blocks->print_on(st);
 971   }
 972 }
 973 
 974 // VirtualSpaceNode methods
 975 
 976 VirtualSpaceNode::~VirtualSpaceNode() {
 977   _rs.release();
 978 #ifdef ASSERT
 979   size_t word_size = sizeof(*this) / BytesPerWord;
 980   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 981 #endif
 982 }
 983 
 984 size_t VirtualSpaceNode::used_words_in_vs() const {
 985   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 986 }
 987 
 988 // Space committed in the VirtualSpace
 989 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 990   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 991 }
 992 
 993 size_t VirtualSpaceNode::free_words_in_vs() const {
 994   return pointer_delta(end(), top(), sizeof(MetaWord));
 995 }
 996 
 997 // Allocates the chunk from the virtual space only.
 998 // This interface is also used internally for debugging.  Not all
 999 // chunks removed here are necessarily used for allocation.
1000 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1001   // Bottom of the new chunk
1002   MetaWord* chunk_limit = top();
1003   assert(chunk_limit != NULL, "Not safe to call this method");
1004 
1005   // The virtual spaces are always expanded by the
1006   // commit granularity to enforce the following condition.
1007   // Without this the is_available check will not work correctly.
1008   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1009       "The committed memory doesn't match the expanded memory.");
1010 
1011   if (!is_available(chunk_word_size)) {
1012     LogTarget(Debug, gc, metaspace, freelist) lt;
1013     if (lt.is_enabled()) {
1014       LogStream ls(lt);
1015       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1016       // Dump some information about the virtual space that is nearly full
1017       print_on(&ls);
1018     }
1019     return NULL;
1020   }
1021 
1022   // Take the space  (bump top on the current virtual space).
1023   inc_top(chunk_word_size);
1024 
1025   // Initialize the chunk
1026   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1027   return result;
1028 }
1029 
1030 
1031 // Expand the virtual space (commit more of the reserved space)
1032 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1033   size_t min_bytes = min_words * BytesPerWord;
1034   size_t preferred_bytes = preferred_words * BytesPerWord;
1035 
1036   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1037 
1038   if (uncommitted < min_bytes) {
1039     return false;
1040   }
1041 
1042   size_t commit = MIN2(preferred_bytes, uncommitted);
1043   bool result = virtual_space()->expand_by(commit, false);
1044 
1045   assert(result, "Failed to commit memory");
1046 
1047   return result;
1048 }
1049 
1050 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1051   assert_lock_strong(SpaceManager::expand_lock());
1052   Metachunk* result = take_from_committed(chunk_word_size);
1053   if (result != NULL) {
1054     inc_container_count();
1055   }
1056   return result;
1057 }
1058 
1059 bool VirtualSpaceNode::initialize() {
1060 
1061   if (!_rs.is_reserved()) {
1062     return false;
1063   }
1064 
1065   // These are necessary restriction to make sure that the virtual space always
1066   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1067   // aligned only the middle alignment of the VirtualSpace is used.
1068   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1069   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1070 
1071   // ReservedSpaces marked as special will have the entire memory
1072   // pre-committed. Setting a committed size will make sure that
1073   // committed_size and actual_committed_size agrees.
1074   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1075 
1076   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1077                                             Metaspace::commit_alignment());
1078   if (result) {
1079     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1080         "Checking that the pre-committed memory was registered by the VirtualSpace");
1081 
1082     set_top((MetaWord*)virtual_space()->low());
1083     set_reserved(MemRegion((HeapWord*)_rs.base(),
1084                  (HeapWord*)(_rs.base() + _rs.size())));
1085 
1086     assert(reserved()->start() == (HeapWord*) _rs.base(),
1087            "Reserved start was not set properly " PTR_FORMAT
1088            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1089     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1090            "Reserved size was not set properly " SIZE_FORMAT
1091            " != " SIZE_FORMAT, reserved()->word_size(),
1092            _rs.size() / BytesPerWord);
1093   }
1094 
1095   return result;
1096 }
1097 
1098 void VirtualSpaceNode::print_on(outputStream* st) const {
1099   size_t used = used_words_in_vs();
1100   size_t capacity = capacity_words_in_vs();
1101   VirtualSpace* vs = virtual_space();
1102   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1103            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1104            PTR_FORMAT ", " PTR_FORMAT ")",
1105            p2i(vs), capacity / K,
1106            capacity == 0 ? 0 : used * 100 / capacity,
1107            p2i(bottom()), p2i(top()), p2i(end()),
1108            p2i(vs->high_boundary()));
1109 }
1110 
1111 #ifdef ASSERT
1112 void VirtualSpaceNode::mangle() {
1113   size_t word_size = capacity_words_in_vs();
1114   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1115 }
1116 #endif // ASSERT
1117 
1118 // VirtualSpaceList methods
1119 // Space allocated from the VirtualSpace
1120 
1121 VirtualSpaceList::~VirtualSpaceList() {
1122   VirtualSpaceListIterator iter(virtual_space_list());
1123   while (iter.repeat()) {
1124     VirtualSpaceNode* vsl = iter.get_next();
1125     delete vsl;
1126   }
1127 }
1128 
1129 void VirtualSpaceList::inc_reserved_words(size_t v) {
1130   assert_lock_strong(SpaceManager::expand_lock());
1131   _reserved_words = _reserved_words + v;
1132 }
1133 void VirtualSpaceList::dec_reserved_words(size_t v) {
1134   assert_lock_strong(SpaceManager::expand_lock());
1135   _reserved_words = _reserved_words - v;
1136 }
1137 
1138 #define assert_committed_below_limit()                        \
1139   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1140          "Too much committed memory. Committed: " SIZE_FORMAT \
1141          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1142          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1143 
1144 void VirtualSpaceList::inc_committed_words(size_t v) {
1145   assert_lock_strong(SpaceManager::expand_lock());
1146   _committed_words = _committed_words + v;
1147 
1148   assert_committed_below_limit();
1149 }
1150 void VirtualSpaceList::dec_committed_words(size_t v) {
1151   assert_lock_strong(SpaceManager::expand_lock());
1152   _committed_words = _committed_words - v;
1153 
1154   assert_committed_below_limit();
1155 }
1156 
1157 void VirtualSpaceList::inc_virtual_space_count() {
1158   assert_lock_strong(SpaceManager::expand_lock());
1159   _virtual_space_count++;
1160 }
1161 void VirtualSpaceList::dec_virtual_space_count() {
1162   assert_lock_strong(SpaceManager::expand_lock());
1163   _virtual_space_count--;
1164 }
1165 
1166 void ChunkManager::remove_chunk(Metachunk* chunk) {
1167   size_t word_size = chunk->word_size();
1168   ChunkIndex index = list_index(word_size);
1169   if (index != HumongousIndex) {
1170     free_chunks(index)->remove_chunk(chunk);
1171   } else {
1172     humongous_dictionary()->remove_chunk(chunk);
1173   }
1174 
1175   // Chunk has been removed from the chunks free list, update counters.
1176   account_for_removed_chunk(chunk);
1177 }
1178 
1179 // Walk the list of VirtualSpaceNodes and delete
1180 // nodes with a 0 container_count.  Remove Metachunks in
1181 // the node from their respective freelists.
1182 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1183   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1184   assert_lock_strong(SpaceManager::expand_lock());
1185   // Don't use a VirtualSpaceListIterator because this
1186   // list is being changed and a straightforward use of an iterator is not safe.
1187   VirtualSpaceNode* purged_vsl = NULL;
1188   VirtualSpaceNode* prev_vsl = virtual_space_list();
1189   VirtualSpaceNode* next_vsl = prev_vsl;
1190   while (next_vsl != NULL) {
1191     VirtualSpaceNode* vsl = next_vsl;
1192     DEBUG_ONLY(vsl->verify_container_count();)
1193     next_vsl = vsl->next();
1194     // Don't free the current virtual space since it will likely
1195     // be needed soon.
1196     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1197       // Unlink it from the list
1198       if (prev_vsl == vsl) {
1199         // This is the case of the current node being the first node.
1200         assert(vsl == virtual_space_list(), "Expected to be the first node");
1201         set_virtual_space_list(vsl->next());
1202       } else {
1203         prev_vsl->set_next(vsl->next());
1204       }
1205 
1206       vsl->purge(chunk_manager);
1207       dec_reserved_words(vsl->reserved_words());
1208       dec_committed_words(vsl->committed_words());
1209       dec_virtual_space_count();
1210       purged_vsl = vsl;
1211       delete vsl;
1212     } else {
1213       prev_vsl = vsl;
1214     }
1215   }
1216 #ifdef ASSERT
1217   if (purged_vsl != NULL) {
1218     // List should be stable enough to use an iterator here.
1219     VirtualSpaceListIterator iter(virtual_space_list());
1220     while (iter.repeat()) {
1221       VirtualSpaceNode* vsl = iter.get_next();
1222       assert(vsl != purged_vsl, "Purge of vsl failed");
1223     }
1224   }
1225 #endif
1226 }
1227 
1228 
1229 // This function looks at the mmap regions in the metaspace without locking.
1230 // The chunks are added with store ordering and not deleted except for at
1231 // unloading time during a safepoint.
1232 bool VirtualSpaceList::contains(const void* ptr) {
1233   // List should be stable enough to use an iterator here because removing virtual
1234   // space nodes is only allowed at a safepoint.
1235   VirtualSpaceListIterator iter(virtual_space_list());
1236   while (iter.repeat()) {
1237     VirtualSpaceNode* vsn = iter.get_next();
1238     if (vsn->contains(ptr)) {
1239       return true;
1240     }
1241   }
1242   return false;
1243 }
1244 
1245 void VirtualSpaceList::retire_current_virtual_space() {
1246   assert_lock_strong(SpaceManager::expand_lock());
1247 
1248   VirtualSpaceNode* vsn = current_virtual_space();
1249 
1250   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1251                                   Metaspace::chunk_manager_metadata();
1252 
1253   vsn->retire(cm);
1254 }
1255 
1256 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1257   DEBUG_ONLY(verify_container_count();)
1258   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1259     ChunkIndex index = (ChunkIndex)i;
1260     size_t chunk_size = chunk_manager->size_by_index(index);
1261 
1262     while (free_words_in_vs() >= chunk_size) {
1263       Metachunk* chunk = get_chunk_vs(chunk_size);
1264       assert(chunk != NULL, "allocation should have been successful");
1265 
1266       chunk_manager->return_single_chunk(index, chunk);
1267     }
1268     DEBUG_ONLY(verify_container_count();)
1269   }
1270   assert(free_words_in_vs() == 0, "should be empty now");
1271 }
1272 
1273 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1274                                    _is_class(false),
1275                                    _virtual_space_list(NULL),
1276                                    _current_virtual_space(NULL),
1277                                    _reserved_words(0),
1278                                    _committed_words(0),
1279                                    _virtual_space_count(0) {
1280   MutexLockerEx cl(SpaceManager::expand_lock(),
1281                    Mutex::_no_safepoint_check_flag);
1282   create_new_virtual_space(word_size);
1283 }
1284 
1285 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1286                                    _is_class(true),
1287                                    _virtual_space_list(NULL),
1288                                    _current_virtual_space(NULL),
1289                                    _reserved_words(0),
1290                                    _committed_words(0),
1291                                    _virtual_space_count(0) {
1292   MutexLockerEx cl(SpaceManager::expand_lock(),
1293                    Mutex::_no_safepoint_check_flag);
1294   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1295   bool succeeded = class_entry->initialize();
1296   if (succeeded) {
1297     link_vs(class_entry);
1298   }
1299 }
1300 
1301 size_t VirtualSpaceList::free_bytes() {
1302   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1303 }
1304 
1305 // Allocate another meta virtual space and add it to the list.
1306 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1307   assert_lock_strong(SpaceManager::expand_lock());
1308 
1309   if (is_class()) {
1310     assert(false, "We currently don't support more than one VirtualSpace for"
1311                   " the compressed class space. The initialization of the"
1312                   " CCS uses another code path and should not hit this path.");
1313     return false;
1314   }
1315 
1316   if (vs_word_size == 0) {
1317     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1318     return false;
1319   }
1320 
1321   // Reserve the space
1322   size_t vs_byte_size = vs_word_size * BytesPerWord;
1323   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1324 
1325   // Allocate the meta virtual space and initialize it.
1326   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1327   if (!new_entry->initialize()) {
1328     delete new_entry;
1329     return false;
1330   } else {
1331     assert(new_entry->reserved_words() == vs_word_size,
1332         "Reserved memory size differs from requested memory size");
1333     // ensure lock-free iteration sees fully initialized node
1334     OrderAccess::storestore();
1335     link_vs(new_entry);
1336     return true;
1337   }
1338 }
1339 
1340 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1341   if (virtual_space_list() == NULL) {
1342       set_virtual_space_list(new_entry);
1343   } else {
1344     current_virtual_space()->set_next(new_entry);
1345   }
1346   set_current_virtual_space(new_entry);
1347   inc_reserved_words(new_entry->reserved_words());
1348   inc_committed_words(new_entry->committed_words());
1349   inc_virtual_space_count();
1350 #ifdef ASSERT
1351   new_entry->mangle();
1352 #endif
1353   LogTarget(Trace, gc, metaspace) lt;
1354   if (lt.is_enabled()) {
1355     LogStream ls(lt);
1356     VirtualSpaceNode* vsl = current_virtual_space();
1357     ResourceMark rm;
1358     vsl->print_on(&ls);
1359   }
1360 }
1361 
1362 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1363                                       size_t min_words,
1364                                       size_t preferred_words) {
1365   size_t before = node->committed_words();
1366 
1367   bool result = node->expand_by(min_words, preferred_words);
1368 
1369   size_t after = node->committed_words();
1370 
1371   // after and before can be the same if the memory was pre-committed.
1372   assert(after >= before, "Inconsistency");
1373   inc_committed_words(after - before);
1374 
1375   return result;
1376 }
1377 
1378 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1379   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1380   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1381   assert(min_words <= preferred_words, "Invalid arguments");
1382 
1383   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1384     return  false;
1385   }
1386 
1387   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1388   if (allowed_expansion_words < min_words) {
1389     return false;
1390   }
1391 
1392   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1393 
1394   // Commit more memory from the the current virtual space.
1395   bool vs_expanded = expand_node_by(current_virtual_space(),
1396                                     min_words,
1397                                     max_expansion_words);
1398   if (vs_expanded) {
1399     return true;
1400   }
1401   retire_current_virtual_space();
1402 
1403   // Get another virtual space.
1404   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1405   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1406 
1407   if (create_new_virtual_space(grow_vs_words)) {
1408     if (current_virtual_space()->is_pre_committed()) {
1409       // The memory was pre-committed, so we are done here.
1410       assert(min_words <= current_virtual_space()->committed_words(),
1411           "The new VirtualSpace was pre-committed, so it"
1412           "should be large enough to fit the alloc request.");
1413       return true;
1414     }
1415 
1416     return expand_node_by(current_virtual_space(),
1417                           min_words,
1418                           max_expansion_words);
1419   }
1420 
1421   return false;
1422 }
1423 
1424 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1425 
1426   // Allocate a chunk out of the current virtual space.
1427   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1428 
1429   if (next != NULL) {
1430     return next;
1431   }
1432 
1433   // The expand amount is currently only determined by the requested sizes
1434   // and not how much committed memory is left in the current virtual space.
1435 
1436   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1437   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1438   if (min_word_size >= preferred_word_size) {
1439     // Can happen when humongous chunks are allocated.
1440     preferred_word_size = min_word_size;
1441   }
1442 
1443   bool expanded = expand_by(min_word_size, preferred_word_size);
1444   if (expanded) {
1445     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1446     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1447   }
1448 
1449    return next;
1450 }
1451 
1452 void VirtualSpaceList::print_on(outputStream* st) const {
1453   VirtualSpaceListIterator iter(virtual_space_list());
1454   while (iter.repeat()) {
1455     VirtualSpaceNode* node = iter.get_next();
1456     node->print_on(st);
1457   }
1458 }
1459 
1460 // MetaspaceGC methods
1461 
1462 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1463 // Within the VM operation after the GC the attempt to allocate the metadata
1464 // should succeed.  If the GC did not free enough space for the metaspace
1465 // allocation, the HWM is increased so that another virtualspace will be
1466 // allocated for the metadata.  With perm gen the increase in the perm
1467 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1468 // metaspace policy uses those as the small and large steps for the HWM.
1469 //
1470 // After the GC the compute_new_size() for MetaspaceGC is called to
1471 // resize the capacity of the metaspaces.  The current implementation
1472 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1473 // to resize the Java heap by some GC's.  New flags can be implemented
1474 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1475 // free space is desirable in the metaspace capacity to decide how much
1476 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1477 // free space is desirable in the metaspace capacity before decreasing
1478 // the HWM.
1479 
1480 // Calculate the amount to increase the high water mark (HWM).
1481 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1482 // another expansion is not requested too soon.  If that is not
1483 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1484 // If that is still not enough, expand by the size of the allocation
1485 // plus some.
1486 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1487   size_t min_delta = MinMetaspaceExpansion;
1488   size_t max_delta = MaxMetaspaceExpansion;
1489   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1490 
1491   if (delta <= min_delta) {
1492     delta = min_delta;
1493   } else if (delta <= max_delta) {
1494     // Don't want to hit the high water mark on the next
1495     // allocation so make the delta greater than just enough
1496     // for this allocation.
1497     delta = max_delta;
1498   } else {
1499     // This allocation is large but the next ones are probably not
1500     // so increase by the minimum.
1501     delta = delta + min_delta;
1502   }
1503 
1504   assert_is_aligned(delta, Metaspace::commit_alignment());
1505 
1506   return delta;
1507 }
1508 
1509 size_t MetaspaceGC::capacity_until_GC() {
1510   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1511   assert(value >= MetaspaceSize, "Not initialized properly?");
1512   return value;
1513 }
1514 
1515 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1516   assert_is_aligned(v, Metaspace::commit_alignment());
1517 
1518   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1519   size_t new_value = capacity_until_GC + v;
1520 
1521   if (new_value < capacity_until_GC) {
1522     // The addition wrapped around, set new_value to aligned max value.
1523     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1524   }
1525 
1526   intptr_t expected = (intptr_t) capacity_until_GC;
1527   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1528 
1529   if (expected != actual) {
1530     return false;
1531   }
1532 
1533   if (new_cap_until_GC != NULL) {
1534     *new_cap_until_GC = new_value;
1535   }
1536   if (old_cap_until_GC != NULL) {
1537     *old_cap_until_GC = capacity_until_GC;
1538   }
1539   return true;
1540 }
1541 
1542 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1543   assert_is_aligned(v, Metaspace::commit_alignment());
1544 
1545   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1546 }
1547 
1548 void MetaspaceGC::initialize() {
1549   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1550   // we can't do a GC during initialization.
1551   _capacity_until_GC = MaxMetaspaceSize;
1552 }
1553 
1554 void MetaspaceGC::post_initialize() {
1555   // Reset the high-water mark once the VM initialization is done.
1556   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1557 }
1558 
1559 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1560   // Check if the compressed class space is full.
1561   if (is_class && Metaspace::using_class_space()) {
1562     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1563     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1564       return false;
1565     }
1566   }
1567 
1568   // Check if the user has imposed a limit on the metaspace memory.
1569   size_t committed_bytes = MetaspaceAux::committed_bytes();
1570   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1571     return false;
1572   }
1573 
1574   return true;
1575 }
1576 
1577 size_t MetaspaceGC::allowed_expansion() {
1578   size_t committed_bytes = MetaspaceAux::committed_bytes();
1579   size_t capacity_until_gc = capacity_until_GC();
1580 
1581   assert(capacity_until_gc >= committed_bytes,
1582          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1583          capacity_until_gc, committed_bytes);
1584 
1585   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1586   size_t left_until_GC = capacity_until_gc - committed_bytes;
1587   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1588 
1589   return left_to_commit / BytesPerWord;
1590 }
1591 
1592 void MetaspaceGC::compute_new_size() {
1593   assert(_shrink_factor <= 100, "invalid shrink factor");
1594   uint current_shrink_factor = _shrink_factor;
1595   _shrink_factor = 0;
1596 
1597   // Using committed_bytes() for used_after_gc is an overestimation, since the
1598   // chunk free lists are included in committed_bytes() and the memory in an
1599   // un-fragmented chunk free list is available for future allocations.
1600   // However, if the chunk free lists becomes fragmented, then the memory may
1601   // not be available for future allocations and the memory is therefore "in use".
1602   // Including the chunk free lists in the definition of "in use" is therefore
1603   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1604   // shrink below committed_bytes() and this has caused serious bugs in the past.
1605   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1606   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1607 
1608   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1609   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1610 
1611   const double min_tmp = used_after_gc / maximum_used_percentage;
1612   size_t minimum_desired_capacity =
1613     (size_t)MIN2(min_tmp, double(max_uintx));
1614   // Don't shrink less than the initial generation size
1615   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1616                                   MetaspaceSize);
1617 
1618   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1619   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1620                            minimum_free_percentage, maximum_used_percentage);
1621   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1622 
1623 
1624   size_t shrink_bytes = 0;
1625   if (capacity_until_GC < minimum_desired_capacity) {
1626     // If we have less capacity below the metaspace HWM, then
1627     // increment the HWM.
1628     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1629     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1630     // Don't expand unless it's significant
1631     if (expand_bytes >= MinMetaspaceExpansion) {
1632       size_t new_capacity_until_GC = 0;
1633       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1634       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1635 
1636       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1637                                                new_capacity_until_GC,
1638                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1639       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1640                                minimum_desired_capacity / (double) K,
1641                                expand_bytes / (double) K,
1642                                MinMetaspaceExpansion / (double) K,
1643                                new_capacity_until_GC / (double) K);
1644     }
1645     return;
1646   }
1647 
1648   // No expansion, now see if we want to shrink
1649   // We would never want to shrink more than this
1650   assert(capacity_until_GC >= minimum_desired_capacity,
1651          SIZE_FORMAT " >= " SIZE_FORMAT,
1652          capacity_until_GC, minimum_desired_capacity);
1653   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1654 
1655   // Should shrinking be considered?
1656   if (MaxMetaspaceFreeRatio < 100) {
1657     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1658     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1659     const double max_tmp = used_after_gc / minimum_used_percentage;
1660     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1661     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1662                                     MetaspaceSize);
1663     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1664                              maximum_free_percentage, minimum_used_percentage);
1665     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1666                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1667 
1668     assert(minimum_desired_capacity <= maximum_desired_capacity,
1669            "sanity check");
1670 
1671     if (capacity_until_GC > maximum_desired_capacity) {
1672       // Capacity too large, compute shrinking size
1673       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1674       // We don't want shrink all the way back to initSize if people call
1675       // System.gc(), because some programs do that between "phases" and then
1676       // we'd just have to grow the heap up again for the next phase.  So we
1677       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1678       // on the third call, and 100% by the fourth call.  But if we recompute
1679       // size without shrinking, it goes back to 0%.
1680       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1681 
1682       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1683 
1684       assert(shrink_bytes <= max_shrink_bytes,
1685              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1686              shrink_bytes, max_shrink_bytes);
1687       if (current_shrink_factor == 0) {
1688         _shrink_factor = 10;
1689       } else {
1690         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1691       }
1692       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1693                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1694       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1695                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1696     }
1697   }
1698 
1699   // Don't shrink unless it's significant
1700   if (shrink_bytes >= MinMetaspaceExpansion &&
1701       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1702     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1703     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1704                                              new_capacity_until_GC,
1705                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1706   }
1707 }
1708 
1709 // Metadebug methods
1710 
1711 void Metadebug::init_allocation_fail_alot_count() {
1712   if (MetadataAllocationFailALot) {
1713     _allocation_fail_alot_count =
1714       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1715   }
1716 }
1717 
1718 #ifdef ASSERT
1719 bool Metadebug::test_metadata_failure() {
1720   if (MetadataAllocationFailALot &&
1721       Threads::is_vm_complete()) {
1722     if (_allocation_fail_alot_count > 0) {
1723       _allocation_fail_alot_count--;
1724     } else {
1725       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1726       init_allocation_fail_alot_count();
1727       return true;
1728     }
1729   }
1730   return false;
1731 }
1732 #endif
1733 
1734 // ChunkManager methods
1735 
1736 size_t ChunkManager::free_chunks_total_words() {
1737   return _free_chunks_total;
1738 }
1739 
1740 size_t ChunkManager::free_chunks_total_bytes() {
1741   return free_chunks_total_words() * BytesPerWord;
1742 }
1743 
1744 // Update internal accounting after a chunk was added
1745 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1746   assert_lock_strong(SpaceManager::expand_lock());
1747   _free_chunks_count ++;
1748   _free_chunks_total += c->word_size();
1749 }
1750 
1751 // Update internal accounting after a chunk was removed
1752 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1753   assert_lock_strong(SpaceManager::expand_lock());
1754   assert(_free_chunks_count >= 1,
1755     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1756   assert(_free_chunks_total >= c->word_size(),
1757     "ChunkManager::_free_chunks_total: about to go negative"
1758      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1759   _free_chunks_count --;
1760   _free_chunks_total -= c->word_size();
1761 }
1762 
1763 size_t ChunkManager::free_chunks_count() {
1764 #ifdef ASSERT
1765   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1766     MutexLockerEx cl(SpaceManager::expand_lock(),
1767                      Mutex::_no_safepoint_check_flag);
1768     // This lock is only needed in debug because the verification
1769     // of the _free_chunks_totals walks the list of free chunks
1770     slow_locked_verify_free_chunks_count();
1771   }
1772 #endif
1773   return _free_chunks_count;
1774 }
1775 
1776 ChunkIndex ChunkManager::list_index(size_t size) {
1777   if (size_by_index(SpecializedIndex) == size) {
1778     return SpecializedIndex;
1779   }
1780   if (size_by_index(SmallIndex) == size) {
1781     return SmallIndex;
1782   }
1783   const size_t med_size = size_by_index(MediumIndex);
1784   if (med_size == size) {
1785     return MediumIndex;
1786   }
1787 
1788   assert(size > med_size, "Not a humongous chunk");
1789   return HumongousIndex;
1790 }
1791 
1792 size_t ChunkManager::size_by_index(ChunkIndex index) {
1793   index_bounds_check(index);
1794   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1795   return free_chunks(index)->size();
1796 }
1797 
1798 void ChunkManager::locked_verify_free_chunks_total() {
1799   assert_lock_strong(SpaceManager::expand_lock());
1800   assert(sum_free_chunks() == _free_chunks_total,
1801          "_free_chunks_total " SIZE_FORMAT " is not the"
1802          " same as sum " SIZE_FORMAT, _free_chunks_total,
1803          sum_free_chunks());
1804 }
1805 
1806 void ChunkManager::verify_free_chunks_total() {
1807   MutexLockerEx cl(SpaceManager::expand_lock(),
1808                      Mutex::_no_safepoint_check_flag);
1809   locked_verify_free_chunks_total();
1810 }
1811 
1812 void ChunkManager::locked_verify_free_chunks_count() {
1813   assert_lock_strong(SpaceManager::expand_lock());
1814   assert(sum_free_chunks_count() == _free_chunks_count,
1815          "_free_chunks_count " SIZE_FORMAT " is not the"
1816          " same as sum " SIZE_FORMAT, _free_chunks_count,
1817          sum_free_chunks_count());
1818 }
1819 
1820 void ChunkManager::verify_free_chunks_count() {
1821 #ifdef ASSERT
1822   MutexLockerEx cl(SpaceManager::expand_lock(),
1823                      Mutex::_no_safepoint_check_flag);
1824   locked_verify_free_chunks_count();
1825 #endif
1826 }
1827 
1828 void ChunkManager::verify() {
1829   MutexLockerEx cl(SpaceManager::expand_lock(),
1830                      Mutex::_no_safepoint_check_flag);
1831   locked_verify();
1832 }
1833 
1834 void ChunkManager::locked_verify() {
1835   locked_verify_free_chunks_count();
1836   locked_verify_free_chunks_total();
1837 }
1838 
1839 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1840   assert_lock_strong(SpaceManager::expand_lock());
1841   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1842                 _free_chunks_total, _free_chunks_count);
1843 }
1844 
1845 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1846   assert_lock_strong(SpaceManager::expand_lock());
1847   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1848                 sum_free_chunks(), sum_free_chunks_count());
1849 }
1850 
1851 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1852   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1853          "Bad index: %d", (int)index);
1854 
1855   return &_free_chunks[index];
1856 }
1857 
1858 // These methods that sum the free chunk lists are used in printing
1859 // methods that are used in product builds.
1860 size_t ChunkManager::sum_free_chunks() {
1861   assert_lock_strong(SpaceManager::expand_lock());
1862   size_t result = 0;
1863   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1864     ChunkList* list = free_chunks(i);
1865 
1866     if (list == NULL) {
1867       continue;
1868     }
1869 
1870     result = result + list->count() * list->size();
1871   }
1872   result = result + humongous_dictionary()->total_size();
1873   return result;
1874 }
1875 
1876 size_t ChunkManager::sum_free_chunks_count() {
1877   assert_lock_strong(SpaceManager::expand_lock());
1878   size_t count = 0;
1879   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1880     ChunkList* list = free_chunks(i);
1881     if (list == NULL) {
1882       continue;
1883     }
1884     count = count + list->count();
1885   }
1886   count = count + humongous_dictionary()->total_free_blocks();
1887   return count;
1888 }
1889 
1890 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1891   ChunkIndex index = list_index(word_size);
1892   assert(index < HumongousIndex, "No humongous list");
1893   return free_chunks(index);
1894 }
1895 
1896 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1897   assert_lock_strong(SpaceManager::expand_lock());
1898 
1899   slow_locked_verify();
1900 
1901   Metachunk* chunk = NULL;
1902   if (list_index(word_size) != HumongousIndex) {
1903     ChunkList* free_list = find_free_chunks_list(word_size);
1904     assert(free_list != NULL, "Sanity check");
1905 
1906     chunk = free_list->head();
1907 
1908     if (chunk == NULL) {
1909       return NULL;
1910     }
1911 
1912     // Remove the chunk as the head of the list.
1913     free_list->remove_chunk(chunk);
1914 
1915     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1916                                        p2i(free_list), p2i(chunk), chunk->word_size());
1917   } else {
1918     chunk = humongous_dictionary()->get_chunk(word_size);
1919 
1920     if (chunk == NULL) {
1921       return NULL;
1922     }
1923 
1924     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1925                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
1926   }
1927 
1928   // Chunk has been removed from the chunk manager; update counters.
1929   account_for_removed_chunk(chunk);
1930 
1931   // Remove it from the links to this freelist
1932   chunk->set_next(NULL);
1933   chunk->set_prev(NULL);
1934 #ifdef ASSERT
1935   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1936   // work.
1937   chunk->set_is_tagged_free(false);
1938 #endif
1939   chunk->container()->inc_container_count();
1940 
1941   slow_locked_verify();
1942   return chunk;
1943 }
1944 
1945 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1946   assert_lock_strong(SpaceManager::expand_lock());
1947   slow_locked_verify();
1948 
1949   // Take from the beginning of the list
1950   Metachunk* chunk = free_chunks_get(word_size);
1951   if (chunk == NULL) {
1952     return NULL;
1953   }
1954 
1955   assert((word_size <= chunk->word_size()) ||
1956          (list_index(chunk->word_size()) == HumongousIndex),
1957          "Non-humongous variable sized chunk");
1958   LogTarget(Debug, gc, metaspace, freelist) lt;
1959   if (lt.is_enabled()) {
1960     size_t list_count;
1961     if (list_index(word_size) < HumongousIndex) {
1962       ChunkList* list = find_free_chunks_list(word_size);
1963       list_count = list->count();
1964     } else {
1965       list_count = humongous_dictionary()->total_count();
1966     }
1967     LogStream ls(lt);
1968     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1969              p2i(this), p2i(chunk), chunk->word_size(), list_count);
1970     ResourceMark rm;
1971     locked_print_free_chunks(&ls);
1972   }
1973 
1974   return chunk;
1975 }
1976 
1977 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1978   assert_lock_strong(SpaceManager::expand_lock());
1979   assert(chunk != NULL, "Expected chunk.");
1980   assert(chunk->container() != NULL, "Container should have been set.");
1981   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1982   index_bounds_check(index);
1983 
1984   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1985   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1986   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1987   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1988 
1989   if (index != HumongousIndex) {
1990     // Return non-humongous chunk to freelist.
1991     ChunkList* list = free_chunks(index);
1992     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
1993     list->return_chunk_at_head(chunk);
1994     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
1995         chunk_size_name(index), p2i(chunk));
1996   } else {
1997     // Return humongous chunk to dictionary.
1998     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
1999     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2000            "Humongous chunk has wrong alignment.");
2001     _humongous_dictionary.return_chunk(chunk);
2002     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2003         chunk_size_name(index), p2i(chunk), chunk->word_size());
2004   }
2005   chunk->container()->dec_container_count();
2006   DEBUG_ONLY(chunk->set_is_tagged_free(true);)
2007 
2008   // Chunk has been added; update counters.
2009   account_for_added_chunk(chunk);
2010 
2011 }
2012 
2013 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2014   index_bounds_check(index);
2015   if (chunks == NULL) {
2016     return;
2017   }
2018   LogTarget(Trace, gc, metaspace, freelist) log;
2019   if (log.is_enabled()) { // tracing
2020     log.print("returning list of %s chunks...", chunk_size_name(index));
2021   }
2022   unsigned num_chunks_returned = 0;
2023   size_t size_chunks_returned = 0;
2024   Metachunk* cur = chunks;
2025   while (cur != NULL) {
2026     // Capture the next link before it is changed
2027     // by the call to return_chunk_at_head();
2028     Metachunk* next = cur->next();
2029     if (log.is_enabled()) { // tracing
2030       num_chunks_returned ++;
2031       size_chunks_returned += cur->word_size();
2032     }
2033     return_single_chunk(index, cur);
2034     cur = next;
2035   }
2036   if (log.is_enabled()) { // tracing
2037     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2038         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2039     if (index != HumongousIndex) {
2040       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2041     } else {
2042       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2043     }
2044   }
2045 }
2046 
2047 void ChunkManager::print_on(outputStream* out) const {
2048   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
2049 }
2050 
2051 // SpaceManager methods
2052 
2053 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2054   size_t chunk_sizes[] = {
2055       specialized_chunk_size(is_class_space),
2056       small_chunk_size(is_class_space),
2057       medium_chunk_size(is_class_space)
2058   };
2059 
2060   // Adjust up to one of the fixed chunk sizes ...
2061   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2062     if (requested <= chunk_sizes[i]) {
2063       return chunk_sizes[i];
2064     }
2065   }
2066 
2067   // ... or return the size as a humongous chunk.
2068   return requested;
2069 }
2070 
2071 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2072   return adjust_initial_chunk_size(requested, is_class());
2073 }
2074 
2075 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2076   size_t requested;
2077 
2078   if (is_class()) {
2079     switch (type) {
2080     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2081     case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
2082     case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
2083     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2084     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2085     default:                                 requested = ClassSmallChunk; break;
2086     }
2087   } else {
2088     switch (type) {
2089     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2090     case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
2091     case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
2092     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2093     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2094     default:                                 requested = SmallChunk; break;
2095     }
2096   }
2097 
2098   // Adjust to one of the fixed chunk sizes (unless humongous)
2099   const size_t adjusted = adjust_initial_chunk_size(requested);
2100 
2101   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2102          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2103 
2104   return adjusted;
2105 }
2106 
2107 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2108   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2109   size_t free = 0;
2110   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2111     Metachunk* chunk = chunks_in_use(i);
2112     while (chunk != NULL) {
2113       free += chunk->free_word_size();
2114       chunk = chunk->next();
2115     }
2116   }
2117   return free;
2118 }
2119 
2120 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2121   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2122   size_t result = 0;
2123   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2124    result += sum_waste_in_chunks_in_use(i);
2125   }
2126 
2127   return result;
2128 }
2129 
2130 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2131   size_t result = 0;
2132   Metachunk* chunk = chunks_in_use(index);
2133   // Count the free space in all the chunk but not the
2134   // current chunk from which allocations are still being done.
2135   while (chunk != NULL) {
2136     if (chunk != current_chunk()) {
2137       result += chunk->free_word_size();
2138     }
2139     chunk = chunk->next();
2140   }
2141   return result;
2142 }
2143 
2144 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2145   // For CMS use "allocated_chunks_words()" which does not need the
2146   // Metaspace lock.  For the other collectors sum over the
2147   // lists.  Use both methods as a check that "allocated_chunks_words()"
2148   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2149   // to use in the product and allocated_chunks_words() should be used
2150   // but allow for  checking that allocated_chunks_words() returns the same
2151   // value as sum_capacity_in_chunks_in_use() which is the definitive
2152   // answer.
2153   if (UseConcMarkSweepGC) {
2154     return allocated_chunks_words();
2155   } else {
2156     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2157     size_t sum = 0;
2158     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2159       Metachunk* chunk = chunks_in_use(i);
2160       while (chunk != NULL) {
2161         sum += chunk->word_size();
2162         chunk = chunk->next();
2163       }
2164     }
2165   return sum;
2166   }
2167 }
2168 
2169 size_t SpaceManager::sum_count_in_chunks_in_use() {
2170   size_t count = 0;
2171   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2172     count = count + sum_count_in_chunks_in_use(i);
2173   }
2174 
2175   return count;
2176 }
2177 
2178 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2179   size_t count = 0;
2180   Metachunk* chunk = chunks_in_use(i);
2181   while (chunk != NULL) {
2182     count++;
2183     chunk = chunk->next();
2184   }
2185   return count;
2186 }
2187 
2188 
2189 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2190   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2191   size_t used = 0;
2192   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2193     Metachunk* chunk = chunks_in_use(i);
2194     while (chunk != NULL) {
2195       used += chunk->used_word_size();
2196       chunk = chunk->next();
2197     }
2198   }
2199   return used;
2200 }
2201 
2202 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2203 
2204   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2205     Metachunk* chunk = chunks_in_use(i);
2206     st->print("SpaceManager: %s " PTR_FORMAT,
2207                  chunk_size_name(i), p2i(chunk));
2208     if (chunk != NULL) {
2209       st->print_cr(" free " SIZE_FORMAT,
2210                    chunk->free_word_size());
2211     } else {
2212       st->cr();
2213     }
2214   }
2215 
2216   chunk_manager()->locked_print_free_chunks(st);
2217   chunk_manager()->locked_print_sum_free_chunks(st);
2218 }
2219 
2220 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2221 
2222   // Decide between a small chunk and a medium chunk.  Up to
2223   // _small_chunk_limit small chunks can be allocated.
2224   // After that a medium chunk is preferred.
2225   size_t chunk_word_size;
2226   if (chunks_in_use(MediumIndex) == NULL &&
2227       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2228     chunk_word_size = (size_t) small_chunk_size();
2229     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2230       chunk_word_size = medium_chunk_size();
2231     }
2232   } else {
2233     chunk_word_size = medium_chunk_size();
2234   }
2235 
2236   // Might still need a humongous chunk.  Enforce
2237   // humongous allocations sizes to be aligned up to
2238   // the smallest chunk size.
2239   size_t if_humongous_sized_chunk =
2240     align_up(word_size + Metachunk::overhead(),
2241                   smallest_chunk_size());
2242   chunk_word_size =
2243     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2244 
2245   assert(!SpaceManager::is_humongous(word_size) ||
2246          chunk_word_size == if_humongous_sized_chunk,
2247          "Size calculation is wrong, word_size " SIZE_FORMAT
2248          " chunk_word_size " SIZE_FORMAT,
2249          word_size, chunk_word_size);
2250   Log(gc, metaspace, alloc) log;
2251   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2252     log.debug("Metadata humongous allocation:");
2253     log.debug("  word_size " PTR_FORMAT, word_size);
2254     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2255     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2256   }
2257   return chunk_word_size;
2258 }
2259 
2260 void SpaceManager::track_metaspace_memory_usage() {
2261   if (is_init_completed()) {
2262     if (is_class()) {
2263       MemoryService::track_compressed_class_memory_usage();
2264     }
2265     MemoryService::track_metaspace_memory_usage();
2266   }
2267 }
2268 
2269 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2270   assert(vs_list()->current_virtual_space() != NULL,
2271          "Should have been set");
2272   assert(current_chunk() == NULL ||
2273          current_chunk()->allocate(word_size) == NULL,
2274          "Don't need to expand");
2275   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2276 
2277   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2278     size_t words_left = 0;
2279     size_t words_used = 0;
2280     if (current_chunk() != NULL) {
2281       words_left = current_chunk()->free_word_size();
2282       words_used = current_chunk()->used_word_size();
2283     }
2284     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2285                                        word_size, words_used, words_left);
2286   }
2287 
2288   // Get another chunk
2289   size_t chunk_word_size = calc_chunk_size(word_size);
2290   Metachunk* next = get_new_chunk(chunk_word_size);
2291 
2292   MetaWord* mem = NULL;
2293 
2294   // If a chunk was available, add it to the in-use chunk list
2295   // and do an allocation from it.
2296   if (next != NULL) {
2297     // Add to this manager's list of chunks in use.
2298     add_chunk(next, false);
2299     mem = next->allocate(word_size);
2300   }
2301 
2302   // Track metaspace memory usage statistic.
2303   track_metaspace_memory_usage();
2304 
2305   return mem;
2306 }
2307 
2308 void SpaceManager::print_on(outputStream* st) const {
2309 
2310   for (ChunkIndex i = ZeroIndex;
2311        i < NumberOfInUseLists ;
2312        i = next_chunk_index(i) ) {
2313     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2314                  p2i(chunks_in_use(i)),
2315                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2316   }
2317   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2318                " Humongous " SIZE_FORMAT,
2319                sum_waste_in_chunks_in_use(SmallIndex),
2320                sum_waste_in_chunks_in_use(MediumIndex),
2321                sum_waste_in_chunks_in_use(HumongousIndex));
2322   // block free lists
2323   if (block_freelists() != NULL) {
2324     st->print_cr("total in block free lists " SIZE_FORMAT,
2325       block_freelists()->total_size());
2326   }
2327 }
2328 
2329 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2330                            Mutex* lock) :
2331   _mdtype(mdtype),
2332   _allocated_blocks_words(0),
2333   _allocated_chunks_words(0),
2334   _allocated_chunks_count(0),
2335   _block_freelists(NULL),
2336   _lock(lock)
2337 {
2338   initialize();
2339 }
2340 
2341 void SpaceManager::inc_size_metrics(size_t words) {
2342   assert_lock_strong(SpaceManager::expand_lock());
2343   // Total of allocated Metachunks and allocated Metachunks count
2344   // for each SpaceManager
2345   _allocated_chunks_words = _allocated_chunks_words + words;
2346   _allocated_chunks_count++;
2347   // Global total of capacity in allocated Metachunks
2348   MetaspaceAux::inc_capacity(mdtype(), words);
2349   // Global total of allocated Metablocks.
2350   // used_words_slow() includes the overhead in each
2351   // Metachunk so include it in the used when the
2352   // Metachunk is first added (so only added once per
2353   // Metachunk).
2354   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2355 }
2356 
2357 void SpaceManager::inc_used_metrics(size_t words) {
2358   // Add to the per SpaceManager total
2359   Atomic::add_ptr(words, &_allocated_blocks_words);
2360   // Add to the global total
2361   MetaspaceAux::inc_used(mdtype(), words);
2362 }
2363 
2364 void SpaceManager::dec_total_from_size_metrics() {
2365   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2366   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2367   // Also deduct the overhead per Metachunk
2368   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2369 }
2370 
2371 void SpaceManager::initialize() {
2372   Metadebug::init_allocation_fail_alot_count();
2373   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2374     _chunks_in_use[i] = NULL;
2375   }
2376   _current_chunk = NULL;
2377   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2378 }
2379 
2380 SpaceManager::~SpaceManager() {
2381   // This call this->_lock which can't be done while holding expand_lock()
2382   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2383          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2384          " allocated_chunks_words() " SIZE_FORMAT,
2385          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2386 
2387   MutexLockerEx fcl(SpaceManager::expand_lock(),
2388                     Mutex::_no_safepoint_check_flag);
2389 
2390   chunk_manager()->slow_locked_verify();
2391 
2392   dec_total_from_size_metrics();
2393 
2394   Log(gc, metaspace, freelist) log;
2395   if (log.is_trace()) {
2396     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2397     ResourceMark rm;
2398     LogStream ls(log.trace());
2399     locked_print_chunks_in_use_on(&ls);
2400     if (block_freelists() != NULL) {
2401       block_freelists()->print_on(&ls);
2402     }
2403   }
2404 
2405   // Add all the chunks in use by this space manager
2406   // to the global list of free chunks.
2407 
2408   // Follow each list of chunks-in-use and add them to the
2409   // free lists.  Each list is NULL terminated.
2410 
2411   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2412     Metachunk* chunks = chunks_in_use(i);
2413     chunk_manager()->return_chunk_list(i, chunks);
2414     set_chunks_in_use(i, NULL);
2415   }
2416 
2417   chunk_manager()->slow_locked_verify();
2418 
2419   if (_block_freelists != NULL) {
2420     delete _block_freelists;
2421   }
2422 }
2423 
2424 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2425   assert_lock_strong(_lock);
2426   // Allocations and deallocations are in raw_word_size
2427   size_t raw_word_size = get_allocation_word_size(word_size);
2428   // Lazily create a block_freelist
2429   if (block_freelists() == NULL) {
2430     _block_freelists = new BlockFreelist();
2431   }
2432   block_freelists()->return_block(p, raw_word_size);
2433 }
2434 
2435 // Adds a chunk to the list of chunks in use.
2436 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2437 
2438   assert(new_chunk != NULL, "Should not be NULL");
2439   assert(new_chunk->next() == NULL, "Should not be on a list");
2440 
2441   new_chunk->reset_empty();
2442 
2443   // Find the correct list and and set the current
2444   // chunk for that list.
2445   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2446 
2447   if (index != HumongousIndex) {
2448     retire_current_chunk();
2449     set_current_chunk(new_chunk);
2450     new_chunk->set_next(chunks_in_use(index));
2451     set_chunks_in_use(index, new_chunk);
2452   } else {
2453     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2454     // small, so small will be null.  Link this first chunk as the current
2455     // chunk.
2456     if (make_current) {
2457       // Set as the current chunk but otherwise treat as a humongous chunk.
2458       set_current_chunk(new_chunk);
2459     }
2460     // Link at head.  The _current_chunk only points to a humongous chunk for
2461     // the null class loader metaspace (class and data virtual space managers)
2462     // any humongous chunks so will not point to the tail
2463     // of the humongous chunks list.
2464     new_chunk->set_next(chunks_in_use(HumongousIndex));
2465     set_chunks_in_use(HumongousIndex, new_chunk);
2466 
2467     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2468   }
2469 
2470   // Add to the running sum of capacity
2471   inc_size_metrics(new_chunk->word_size());
2472 
2473   assert(new_chunk->is_empty(), "Not ready for reuse");
2474   Log(gc, metaspace, freelist) log;
2475   if (log.is_trace()) {
2476     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2477     ResourceMark rm;
2478     LogStream ls(log.trace());
2479     new_chunk->print_on(&ls);
2480     chunk_manager()->locked_print_free_chunks(&ls);
2481   }
2482 }
2483 
2484 void SpaceManager::retire_current_chunk() {
2485   if (current_chunk() != NULL) {
2486     size_t remaining_words = current_chunk()->free_word_size();
2487     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2488       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2489       deallocate(ptr, remaining_words);
2490       inc_used_metrics(remaining_words);
2491     }
2492   }
2493 }
2494 
2495 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2496   // Get a chunk from the chunk freelist
2497   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2498 
2499   if (next == NULL) {
2500     next = vs_list()->get_new_chunk(chunk_word_size,
2501                                     medium_chunk_bunch());
2502   }
2503 
2504   Log(gc, metaspace, alloc) log;
2505   if (log.is_debug() && next != NULL &&
2506       SpaceManager::is_humongous(next->word_size())) {
2507     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2508   }
2509 
2510   return next;
2511 }
2512 
2513 /*
2514  * The policy is to allocate up to _small_chunk_limit small chunks
2515  * after which only medium chunks are allocated.  This is done to
2516  * reduce fragmentation.  In some cases, this can result in a lot
2517  * of small chunks being allocated to the point where it's not
2518  * possible to expand.  If this happens, there may be no medium chunks
2519  * available and OOME would be thrown.  Instead of doing that,
2520  * if the allocation request size fits in a small chunk, an attempt
2521  * will be made to allocate a small chunk.
2522  */
2523 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2524   size_t raw_word_size = get_allocation_word_size(word_size);
2525 
2526   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2527     return NULL;
2528   }
2529 
2530   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2531   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2532 
2533   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2534 
2535   MetaWord* mem = NULL;
2536 
2537   if (chunk != NULL) {
2538     // Add chunk to the in-use chunk list and do an allocation from it.
2539     // Add to this manager's list of chunks in use.
2540     add_chunk(chunk, false);
2541     mem = chunk->allocate(raw_word_size);
2542 
2543     inc_used_metrics(raw_word_size);
2544 
2545     // Track metaspace memory usage statistic.
2546     track_metaspace_memory_usage();
2547   }
2548 
2549   return mem;
2550 }
2551 
2552 MetaWord* SpaceManager::allocate(size_t word_size) {
2553   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2554   size_t raw_word_size = get_allocation_word_size(word_size);
2555   BlockFreelist* fl =  block_freelists();
2556   MetaWord* p = NULL;
2557   // Allocation from the dictionary is expensive in the sense that
2558   // the dictionary has to be searched for a size.  Don't allocate
2559   // from the dictionary until it starts to get fat.  Is this
2560   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2561   // for allocations.  Do some profiling.  JJJ
2562   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2563     p = fl->get_block(raw_word_size);
2564   }
2565   if (p == NULL) {
2566     p = allocate_work(raw_word_size);
2567   }
2568 
2569   return p;
2570 }
2571 
2572 // Returns the address of spaced allocated for "word_size".
2573 // This methods does not know about blocks (Metablocks)
2574 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2575   assert_lock_strong(_lock);
2576 #ifdef ASSERT
2577   if (Metadebug::test_metadata_failure()) {
2578     return NULL;
2579   }
2580 #endif
2581   // Is there space in the current chunk?
2582   MetaWord* result = NULL;
2583 
2584   // For DumpSharedSpaces, only allocate out of the current chunk which is
2585   // never null because we gave it the size we wanted.   Caller reports out
2586   // of memory if this returns null.
2587   if (DumpSharedSpaces) {
2588     assert(current_chunk() != NULL, "should never happen");
2589     inc_used_metrics(word_size);
2590     return current_chunk()->allocate(word_size); // caller handles null result
2591   }
2592 
2593   if (current_chunk() != NULL) {
2594     result = current_chunk()->allocate(word_size);
2595   }
2596 
2597   if (result == NULL) {
2598     result = grow_and_allocate(word_size);
2599   }
2600 
2601   if (result != NULL) {
2602     inc_used_metrics(word_size);
2603     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2604            "Head of the list is being allocated");
2605   }
2606 
2607   return result;
2608 }
2609 
2610 void SpaceManager::verify() {
2611   // If there are blocks in the dictionary, then
2612   // verification of chunks does not work since
2613   // being in the dictionary alters a chunk.
2614   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2615     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2616       Metachunk* curr = chunks_in_use(i);
2617       while (curr != NULL) {
2618         curr->verify();
2619         verify_chunk_size(curr);
2620         curr = curr->next();
2621       }
2622     }
2623   }
2624 }
2625 
2626 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2627   assert(is_humongous(chunk->word_size()) ||
2628          chunk->word_size() == medium_chunk_size() ||
2629          chunk->word_size() == small_chunk_size() ||
2630          chunk->word_size() == specialized_chunk_size(),
2631          "Chunk size is wrong");
2632   return;
2633 }
2634 
2635 #ifdef ASSERT
2636 void SpaceManager::verify_allocated_blocks_words() {
2637   // Verification is only guaranteed at a safepoint.
2638   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2639     "Verification can fail if the applications is running");
2640   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2641          "allocation total is not consistent " SIZE_FORMAT
2642          " vs " SIZE_FORMAT,
2643          allocated_blocks_words(), sum_used_in_chunks_in_use());
2644 }
2645 
2646 #endif
2647 
2648 void SpaceManager::dump(outputStream* const out) const {
2649   size_t curr_total = 0;
2650   size_t waste = 0;
2651   uint i = 0;
2652   size_t used = 0;
2653   size_t capacity = 0;
2654 
2655   // Add up statistics for all chunks in this SpaceManager.
2656   for (ChunkIndex index = ZeroIndex;
2657        index < NumberOfInUseLists;
2658        index = next_chunk_index(index)) {
2659     for (Metachunk* curr = chunks_in_use(index);
2660          curr != NULL;
2661          curr = curr->next()) {
2662       out->print("%d) ", i++);
2663       curr->print_on(out);
2664       curr_total += curr->word_size();
2665       used += curr->used_word_size();
2666       capacity += curr->word_size();
2667       waste += curr->free_word_size() + curr->overhead();;
2668     }
2669   }
2670 
2671   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2672     if (block_freelists() != NULL) block_freelists()->print_on(out);
2673   }
2674 
2675   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2676   // Free space isn't wasted.
2677   waste -= free;
2678 
2679   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2680                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2681                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2682 }
2683 
2684 // MetaspaceAux
2685 
2686 
2687 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2688 size_t MetaspaceAux::_used_words[] = {0, 0};
2689 
2690 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2691   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2692   return list == NULL ? 0 : list->free_bytes();
2693 }
2694 
2695 size_t MetaspaceAux::free_bytes() {
2696   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2697 }
2698 
2699 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2700   assert_lock_strong(SpaceManager::expand_lock());
2701   assert(words <= capacity_words(mdtype),
2702          "About to decrement below 0: words " SIZE_FORMAT
2703          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2704          words, mdtype, capacity_words(mdtype));
2705   _capacity_words[mdtype] -= words;
2706 }
2707 
2708 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2709   assert_lock_strong(SpaceManager::expand_lock());
2710   // Needs to be atomic
2711   _capacity_words[mdtype] += words;
2712 }
2713 
2714 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2715   assert(words <= used_words(mdtype),
2716          "About to decrement below 0: words " SIZE_FORMAT
2717          " is greater than _used_words[%u] " SIZE_FORMAT,
2718          words, mdtype, used_words(mdtype));
2719   // For CMS deallocation of the Metaspaces occurs during the
2720   // sweep which is a concurrent phase.  Protection by the expand_lock()
2721   // is not enough since allocation is on a per Metaspace basis
2722   // and protected by the Metaspace lock.
2723   jlong minus_words = (jlong) - (jlong) words;
2724   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2725 }
2726 
2727 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2728   // _used_words tracks allocations for
2729   // each piece of metadata.  Those allocations are
2730   // generally done concurrently by different application
2731   // threads so must be done atomically.
2732   Atomic::add_ptr(words, &_used_words[mdtype]);
2733 }
2734 
2735 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2736   size_t used = 0;
2737   ClassLoaderDataGraphMetaspaceIterator iter;
2738   while (iter.repeat()) {
2739     Metaspace* msp = iter.get_next();
2740     // Sum allocated_blocks_words for each metaspace
2741     if (msp != NULL) {
2742       used += msp->used_words_slow(mdtype);
2743     }
2744   }
2745   return used * BytesPerWord;
2746 }
2747 
2748 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2749   size_t free = 0;
2750   ClassLoaderDataGraphMetaspaceIterator iter;
2751   while (iter.repeat()) {
2752     Metaspace* msp = iter.get_next();
2753     if (msp != NULL) {
2754       free += msp->free_words_slow(mdtype);
2755     }
2756   }
2757   return free * BytesPerWord;
2758 }
2759 
2760 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2761   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2762     return 0;
2763   }
2764   // Don't count the space in the freelists.  That space will be
2765   // added to the capacity calculation as needed.
2766   size_t capacity = 0;
2767   ClassLoaderDataGraphMetaspaceIterator iter;
2768   while (iter.repeat()) {
2769     Metaspace* msp = iter.get_next();
2770     if (msp != NULL) {
2771       capacity += msp->capacity_words_slow(mdtype);
2772     }
2773   }
2774   return capacity * BytesPerWord;
2775 }
2776 
2777 size_t MetaspaceAux::capacity_bytes_slow() {
2778 #ifdef PRODUCT
2779   // Use capacity_bytes() in PRODUCT instead of this function.
2780   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2781 #endif
2782   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2783   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2784   assert(capacity_bytes() == class_capacity + non_class_capacity,
2785          "bad accounting: capacity_bytes() " SIZE_FORMAT
2786          " class_capacity + non_class_capacity " SIZE_FORMAT
2787          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2788          capacity_bytes(), class_capacity + non_class_capacity,
2789          class_capacity, non_class_capacity);
2790 
2791   return class_capacity + non_class_capacity;
2792 }
2793 
2794 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2795   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2796   return list == NULL ? 0 : list->reserved_bytes();
2797 }
2798 
2799 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2800   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2801   return list == NULL ? 0 : list->committed_bytes();
2802 }
2803 
2804 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2805 
2806 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2807   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2808   if (chunk_manager == NULL) {
2809     return 0;
2810   }
2811   chunk_manager->slow_verify();
2812   return chunk_manager->free_chunks_total_words();
2813 }
2814 
2815 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2816   return free_chunks_total_words(mdtype) * BytesPerWord;
2817 }
2818 
2819 size_t MetaspaceAux::free_chunks_total_words() {
2820   return free_chunks_total_words(Metaspace::ClassType) +
2821          free_chunks_total_words(Metaspace::NonClassType);
2822 }
2823 
2824 size_t MetaspaceAux::free_chunks_total_bytes() {
2825   return free_chunks_total_words() * BytesPerWord;
2826 }
2827 
2828 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2829   return Metaspace::get_chunk_manager(mdtype) != NULL;
2830 }
2831 
2832 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2833   if (!has_chunk_free_list(mdtype)) {
2834     return MetaspaceChunkFreeListSummary();
2835   }
2836 
2837   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2838   return cm->chunk_free_list_summary();
2839 }
2840 
2841 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2842   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2843                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2844 }
2845 
2846 void MetaspaceAux::print_on(outputStream* out) {
2847   Metaspace::MetadataType nct = Metaspace::NonClassType;
2848 
2849   out->print_cr(" Metaspace       "
2850                 "used "      SIZE_FORMAT "K, "
2851                 "capacity "  SIZE_FORMAT "K, "
2852                 "committed " SIZE_FORMAT "K, "
2853                 "reserved "  SIZE_FORMAT "K",
2854                 used_bytes()/K,
2855                 capacity_bytes()/K,
2856                 committed_bytes()/K,
2857                 reserved_bytes()/K);
2858 
2859   if (Metaspace::using_class_space()) {
2860     Metaspace::MetadataType ct = Metaspace::ClassType;
2861     out->print_cr("  class space    "
2862                   "used "      SIZE_FORMAT "K, "
2863                   "capacity "  SIZE_FORMAT "K, "
2864                   "committed " SIZE_FORMAT "K, "
2865                   "reserved "  SIZE_FORMAT "K",
2866                   used_bytes(ct)/K,
2867                   capacity_bytes(ct)/K,
2868                   committed_bytes(ct)/K,
2869                   reserved_bytes(ct)/K);
2870   }
2871 }
2872 
2873 // Print information for class space and data space separately.
2874 // This is almost the same as above.
2875 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2876   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2877   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2878   size_t used_bytes = used_bytes_slow(mdtype);
2879   size_t free_bytes = free_bytes_slow(mdtype);
2880   size_t used_and_free = used_bytes + free_bytes +
2881                            free_chunks_capacity_bytes;
2882   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2883              "K + unused in chunks " SIZE_FORMAT "K  + "
2884              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2885              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2886              used_bytes / K,
2887              free_bytes / K,
2888              free_chunks_capacity_bytes / K,
2889              used_and_free / K,
2890              capacity_bytes / K);
2891   // Accounting can only be correct if we got the values during a safepoint
2892   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2893 }
2894 
2895 // Print total fragmentation for class metaspaces
2896 void MetaspaceAux::print_class_waste(outputStream* out) {
2897   assert(Metaspace::using_class_space(), "class metaspace not used");
2898   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2899   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2900   ClassLoaderDataGraphMetaspaceIterator iter;
2901   while (iter.repeat()) {
2902     Metaspace* msp = iter.get_next();
2903     if (msp != NULL) {
2904       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2905       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2906       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2907       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2908       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2909       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2910       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2911     }
2912   }
2913   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2914                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2915                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2916                 "large count " SIZE_FORMAT,
2917                 cls_specialized_count, cls_specialized_waste,
2918                 cls_small_count, cls_small_waste,
2919                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2920 }
2921 
2922 // Print total fragmentation for data and class metaspaces separately
2923 void MetaspaceAux::print_waste(outputStream* out) {
2924   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2925   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2926 
2927   ClassLoaderDataGraphMetaspaceIterator iter;
2928   while (iter.repeat()) {
2929     Metaspace* msp = iter.get_next();
2930     if (msp != NULL) {
2931       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2932       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2933       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2934       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2935       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2936       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2937       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2938     }
2939   }
2940   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2941   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2942                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2943                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2944                         "large count " SIZE_FORMAT,
2945              specialized_count, specialized_waste, small_count,
2946              small_waste, medium_count, medium_waste, humongous_count);
2947   if (Metaspace::using_class_space()) {
2948     print_class_waste(out);
2949   }
2950 }
2951 
2952 // Dump global metaspace things from the end of ClassLoaderDataGraph
2953 void MetaspaceAux::dump(outputStream* out) {
2954   out->print_cr("All Metaspace:");
2955   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2956   out->print("class space: "); print_on(out, Metaspace::ClassType);
2957   print_waste(out);
2958 }
2959 
2960 void MetaspaceAux::verify_free_chunks() {
2961   Metaspace::chunk_manager_metadata()->verify();
2962   if (Metaspace::using_class_space()) {
2963     Metaspace::chunk_manager_class()->verify();
2964   }
2965 }
2966 
2967 void MetaspaceAux::verify_capacity() {
2968 #ifdef ASSERT
2969   size_t running_sum_capacity_bytes = capacity_bytes();
2970   // For purposes of the running sum of capacity, verify against capacity
2971   size_t capacity_in_use_bytes = capacity_bytes_slow();
2972   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2973          "capacity_words() * BytesPerWord " SIZE_FORMAT
2974          " capacity_bytes_slow()" SIZE_FORMAT,
2975          running_sum_capacity_bytes, capacity_in_use_bytes);
2976   for (Metaspace::MetadataType i = Metaspace::ClassType;
2977        i < Metaspace:: MetadataTypeCount;
2978        i = (Metaspace::MetadataType)(i + 1)) {
2979     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2980     assert(capacity_bytes(i) == capacity_in_use_bytes,
2981            "capacity_bytes(%u) " SIZE_FORMAT
2982            " capacity_bytes_slow(%u)" SIZE_FORMAT,
2983            i, capacity_bytes(i), i, capacity_in_use_bytes);
2984   }
2985 #endif
2986 }
2987 
2988 void MetaspaceAux::verify_used() {
2989 #ifdef ASSERT
2990   size_t running_sum_used_bytes = used_bytes();
2991   // For purposes of the running sum of used, verify against used
2992   size_t used_in_use_bytes = used_bytes_slow();
2993   assert(used_bytes() == used_in_use_bytes,
2994          "used_bytes() " SIZE_FORMAT
2995          " used_bytes_slow()" SIZE_FORMAT,
2996          used_bytes(), used_in_use_bytes);
2997   for (Metaspace::MetadataType i = Metaspace::ClassType;
2998        i < Metaspace:: MetadataTypeCount;
2999        i = (Metaspace::MetadataType)(i + 1)) {
3000     size_t used_in_use_bytes = used_bytes_slow(i);
3001     assert(used_bytes(i) == used_in_use_bytes,
3002            "used_bytes(%u) " SIZE_FORMAT
3003            " used_bytes_slow(%u)" SIZE_FORMAT,
3004            i, used_bytes(i), i, used_in_use_bytes);
3005   }
3006 #endif
3007 }
3008 
3009 void MetaspaceAux::verify_metrics() {
3010   verify_capacity();
3011   verify_used();
3012 }
3013 
3014 
3015 // Metaspace methods
3016 
3017 size_t Metaspace::_first_chunk_word_size = 0;
3018 size_t Metaspace::_first_class_chunk_word_size = 0;
3019 
3020 size_t Metaspace::_commit_alignment = 0;
3021 size_t Metaspace::_reserve_alignment = 0;
3022 
3023 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3024   initialize(lock, type);
3025 }
3026 
3027 Metaspace::~Metaspace() {
3028   delete _vsm;
3029   if (using_class_space()) {
3030     delete _class_vsm;
3031   }
3032 }
3033 
3034 VirtualSpaceList* Metaspace::_space_list = NULL;
3035 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3036 
3037 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3038 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3039 
3040 #define VIRTUALSPACEMULTIPLIER 2
3041 
3042 #ifdef _LP64
3043 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3044 
3045 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3046   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3047   // narrow_klass_base is the lower of the metaspace base and the cds base
3048   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3049   // between the lower base and higher address.
3050   address lower_base;
3051   address higher_address;
3052 #if INCLUDE_CDS
3053   if (UseSharedSpaces) {
3054     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3055                           (address)(metaspace_base + compressed_class_space_size()));
3056     lower_base = MIN2(metaspace_base, cds_base);
3057   } else
3058 #endif
3059   {
3060     higher_address = metaspace_base + compressed_class_space_size();
3061     lower_base = metaspace_base;
3062 
3063     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3064     // If compressed class space fits in lower 32G, we don't need a base.
3065     if (higher_address <= (address)klass_encoding_max) {
3066       lower_base = 0; // Effectively lower base is zero.
3067     }
3068   }
3069 
3070   Universe::set_narrow_klass_base(lower_base);
3071 
3072   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3073     Universe::set_narrow_klass_shift(0);
3074   } else {
3075     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3076     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3077   }
3078   AOTLoader::set_narrow_klass_shift();
3079 }
3080 
3081 #if INCLUDE_CDS
3082 // Return TRUE if the specified metaspace_base and cds_base are close enough
3083 // to work with compressed klass pointers.
3084 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3085   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3086   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3087   address lower_base = MIN2((address)metaspace_base, cds_base);
3088   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3089                                 (address)(metaspace_base + compressed_class_space_size()));
3090   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3091 }
3092 #endif
3093 
3094 // Try to allocate the metaspace at the requested addr.
3095 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3096   assert(using_class_space(), "called improperly");
3097   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3098   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3099          "Metaspace size is too big");
3100   assert_is_aligned(requested_addr, _reserve_alignment);
3101   assert_is_aligned(cds_base, _reserve_alignment);
3102   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3103 
3104   // Don't use large pages for the class space.
3105   bool large_pages = false;
3106 
3107 #if !(defined(AARCH64) || defined(AIX))
3108   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3109                                              _reserve_alignment,
3110                                              large_pages,
3111                                              requested_addr);
3112 #else // AARCH64
3113   ReservedSpace metaspace_rs;
3114 
3115   // Our compressed klass pointers may fit nicely into the lower 32
3116   // bits.
3117   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3118     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3119                                  _reserve_alignment,
3120                                  large_pages,
3121                                  requested_addr);
3122   }
3123 
3124   if (! metaspace_rs.is_reserved()) {
3125     // Aarch64: Try to align metaspace so that we can decode a compressed
3126     // klass with a single MOVK instruction.  We can do this iff the
3127     // compressed class base is a multiple of 4G.
3128     // Aix: Search for a place where we can find memory. If we need to load
3129     // the base, 4G alignment is helpful, too.
3130     size_t increment = AARCH64_ONLY(4*)G;
3131     for (char *a = align_up(requested_addr, increment);
3132          a < (char*)(1024*G);
3133          a += increment) {
3134       if (a == (char *)(32*G)) {
3135         // Go faster from here on. Zero-based is no longer possible.
3136         increment = 4*G;
3137       }
3138 
3139 #if INCLUDE_CDS
3140       if (UseSharedSpaces
3141           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3142         // We failed to find an aligned base that will reach.  Fall
3143         // back to using our requested addr.
3144         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3145                                      _reserve_alignment,
3146                                      large_pages,
3147                                      requested_addr);
3148         break;
3149       }
3150 #endif
3151 
3152       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3153                                    _reserve_alignment,
3154                                    large_pages,
3155                                    a);
3156       if (metaspace_rs.is_reserved())
3157         break;
3158     }
3159   }
3160 
3161 #endif // AARCH64
3162 
3163   if (!metaspace_rs.is_reserved()) {
3164 #if INCLUDE_CDS
3165     if (UseSharedSpaces) {
3166       size_t increment = align_up(1*G, _reserve_alignment);
3167 
3168       // Keep trying to allocate the metaspace, increasing the requested_addr
3169       // by 1GB each time, until we reach an address that will no longer allow
3170       // use of CDS with compressed klass pointers.
3171       char *addr = requested_addr;
3172       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3173              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3174         addr = addr + increment;
3175         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3176                                      _reserve_alignment, large_pages, addr);
3177       }
3178     }
3179 #endif
3180     // If no successful allocation then try to allocate the space anywhere.  If
3181     // that fails then OOM doom.  At this point we cannot try allocating the
3182     // metaspace as if UseCompressedClassPointers is off because too much
3183     // initialization has happened that depends on UseCompressedClassPointers.
3184     // So, UseCompressedClassPointers cannot be turned off at this point.
3185     if (!metaspace_rs.is_reserved()) {
3186       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3187                                    _reserve_alignment, large_pages);
3188       if (!metaspace_rs.is_reserved()) {
3189         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3190                                               compressed_class_space_size()));
3191       }
3192     }
3193   }
3194 
3195   // If we got here then the metaspace got allocated.
3196   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3197 
3198 #if INCLUDE_CDS
3199   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3200   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3201     FileMapInfo::stop_sharing_and_unmap(
3202         "Could not allocate metaspace at a compatible address");
3203   }
3204 #endif
3205   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3206                                   UseSharedSpaces ? (address)cds_base : 0);
3207 
3208   initialize_class_space(metaspace_rs);
3209 
3210   LogTarget(Trace, gc, metaspace) lt;
3211   if (lt.is_enabled()) {
3212     ResourceMark rm;
3213     LogStream ls(lt);
3214     print_compressed_class_space(&ls, requested_addr);
3215   }
3216 }
3217 
3218 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3219   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3220                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3221   if (_class_space_list != NULL) {
3222     address base = (address)_class_space_list->current_virtual_space()->bottom();
3223     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3224                  compressed_class_space_size(), p2i(base));
3225     if (requested_addr != 0) {
3226       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3227     }
3228     st->cr();
3229   }
3230 }
3231 
3232 // For UseCompressedClassPointers the class space is reserved above the top of
3233 // the Java heap.  The argument passed in is at the base of the compressed space.
3234 void Metaspace::initialize_class_space(ReservedSpace rs) {
3235   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3236   assert(rs.size() >= CompressedClassSpaceSize,
3237          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3238   assert(using_class_space(), "Must be using class space");
3239   _class_space_list = new VirtualSpaceList(rs);
3240   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3241 
3242   if (!_class_space_list->initialization_succeeded()) {
3243     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3244   }
3245 }
3246 
3247 #endif
3248 
3249 void Metaspace::ergo_initialize() {
3250   if (DumpSharedSpaces) {
3251     // Using large pages when dumping the shared archive is currently not implemented.
3252     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3253   }
3254 
3255   size_t page_size = os::vm_page_size();
3256   if (UseLargePages && UseLargePagesInMetaspace) {
3257     page_size = os::large_page_size();
3258   }
3259 
3260   _commit_alignment  = page_size;
3261   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3262 
3263   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3264   // override if MaxMetaspaceSize was set on the command line or not.
3265   // This information is needed later to conform to the specification of the
3266   // java.lang.management.MemoryUsage API.
3267   //
3268   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3269   // globals.hpp to the aligned value, but this is not possible, since the
3270   // alignment depends on other flags being parsed.
3271   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3272 
3273   if (MetaspaceSize > MaxMetaspaceSize) {
3274     MetaspaceSize = MaxMetaspaceSize;
3275   }
3276 
3277   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3278 
3279   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3280 
3281   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3282   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3283 
3284   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3285   set_compressed_class_space_size(CompressedClassSpaceSize);
3286 }
3287 
3288 void Metaspace::global_initialize() {
3289   MetaspaceGC::initialize();
3290 
3291   // Initialize the alignment for shared spaces.
3292   int max_alignment = os::vm_allocation_granularity();
3293   size_t cds_total = 0;
3294 
3295   MetaspaceShared::set_max_alignment(max_alignment);
3296 
3297   if (DumpSharedSpaces) {
3298 #if INCLUDE_CDS
3299     MetaspaceShared::estimate_regions_size();
3300 
3301     SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
3302     SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
3303     SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
3304     SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
3305 
3306     // Initialize with the sum of the shared space sizes.  The read-only
3307     // and read write metaspace chunks will be allocated out of this and the
3308     // remainder is the misc code and data chunks.
3309     cds_total = FileMapInfo::shared_spaces_size();
3310     cds_total = align_up(cds_total, _reserve_alignment);
3311     _space_list = new VirtualSpaceList(cds_total/wordSize);
3312     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3313 
3314     if (!_space_list->initialization_succeeded()) {
3315       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3316     }
3317 
3318 #ifdef _LP64
3319     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3320       vm_exit_during_initialization("Unable to dump shared archive.",
3321           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3322                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3323                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3324                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3325     }
3326 
3327     // Set the compressed klass pointer base so that decoding of these pointers works
3328     // properly when creating the shared archive.
3329     assert(UseCompressedOops && UseCompressedClassPointers,
3330       "UseCompressedOops and UseCompressedClassPointers must be set");
3331     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3332     log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3333                                      p2i(_space_list->current_virtual_space()->bottom()));
3334 
3335     Universe::set_narrow_klass_shift(0);
3336 #endif // _LP64
3337 #endif // INCLUDE_CDS
3338   } else {
3339 #if INCLUDE_CDS
3340     if (UseSharedSpaces) {
3341       // If using shared space, open the file that contains the shared space
3342       // and map in the memory before initializing the rest of metaspace (so
3343       // the addresses don't conflict)
3344       address cds_address = NULL;
3345       FileMapInfo* mapinfo = new FileMapInfo();
3346 
3347       // Open the shared archive file, read and validate the header. If
3348       // initialization fails, shared spaces [UseSharedSpaces] are
3349       // disabled and the file is closed.
3350       // Map in spaces now also
3351       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3352         cds_total = FileMapInfo::shared_spaces_size();
3353         cds_address = (address)mapinfo->header()->region_addr(0);
3354 #ifdef _LP64
3355         if (using_class_space()) {
3356           char* cds_end = (char*)(cds_address + cds_total);
3357           cds_end = align_up(cds_end, _reserve_alignment);
3358           // If UseCompressedClassPointers is set then allocate the metaspace area
3359           // above the heap and above the CDS area (if it exists).
3360           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3361           // Map the shared string space after compressed pointers
3362           // because it relies on compressed class pointers setting to work
3363           mapinfo->map_string_regions();
3364         }
3365 #endif // _LP64
3366       } else {
3367         assert(!mapinfo->is_open() && !UseSharedSpaces,
3368                "archive file not closed or shared spaces not disabled.");
3369       }
3370     }
3371 #endif // INCLUDE_CDS
3372 
3373 #ifdef _LP64
3374     if (!UseSharedSpaces && using_class_space()) {
3375       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3376       allocate_metaspace_compressed_klass_ptrs(base, 0);
3377     }
3378 #endif // _LP64
3379 
3380     // Initialize these before initializing the VirtualSpaceList
3381     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3382     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3383     // Make the first class chunk bigger than a medium chunk so it's not put
3384     // on the medium chunk list.   The next chunk will be small and progress
3385     // from there.  This size calculated by -version.
3386     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3387                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3388     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3389     // Arbitrarily set the initial virtual space to a multiple
3390     // of the boot class loader size.
3391     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3392     word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3393 
3394     // Initialize the list of virtual spaces.
3395     _space_list = new VirtualSpaceList(word_size);
3396     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3397 
3398     if (!_space_list->initialization_succeeded()) {
3399       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3400     }
3401   }
3402 
3403   _tracer = new MetaspaceTracer();
3404 }
3405 
3406 void Metaspace::post_initialize() {
3407   MetaspaceGC::post_initialize();
3408 }
3409 
3410 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3411   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3412   if (chunk != NULL) {
3413     // Add to this manager's list of chunks in use and current_chunk().
3414     get_space_manager(mdtype)->add_chunk(chunk, true);
3415   }
3416 }
3417 
3418 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3419   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3420 
3421   // Get a chunk from the chunk freelist
3422   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3423 
3424   if (chunk == NULL) {
3425     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3426                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3427   }
3428 
3429   // For dumping shared archive, report error if allocation has failed.
3430   if (DumpSharedSpaces && chunk == NULL) {
3431     report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3432   }
3433 
3434   return chunk;
3435 }
3436 
3437 void Metaspace::verify_global_initialization() {
3438   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3439   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3440 
3441   if (using_class_space()) {
3442     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3443     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3444   }
3445 }
3446 
3447 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3448   verify_global_initialization();
3449 
3450   // Allocate SpaceManager for metadata objects.
3451   _vsm = new SpaceManager(NonClassType, lock);
3452 
3453   if (using_class_space()) {
3454     // Allocate SpaceManager for classes.
3455     _class_vsm = new SpaceManager(ClassType, lock);
3456   }
3457 
3458   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3459 
3460   // Allocate chunk for metadata objects
3461   initialize_first_chunk(type, NonClassType);
3462 
3463   // Allocate chunk for class metadata objects
3464   if (using_class_space()) {
3465     initialize_first_chunk(type, ClassType);
3466   }
3467 
3468   _alloc_record_head = NULL;
3469   _alloc_record_tail = NULL;
3470 }
3471 
3472 size_t Metaspace::align_word_size_up(size_t word_size) {
3473   size_t byte_size = word_size * wordSize;
3474   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3475 }
3476 
3477 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3478   // DumpSharedSpaces doesn't use class metadata area (yet)
3479   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3480   if (is_class_space_allocation(mdtype)) {
3481     return  class_vsm()->allocate(word_size);
3482   } else {
3483     return  vsm()->allocate(word_size);
3484   }
3485 }
3486 
3487 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3488   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3489   assert(delta_bytes > 0, "Must be");
3490 
3491   size_t before = 0;
3492   size_t after = 0;
3493   MetaWord* res;
3494   bool incremented;
3495 
3496   // Each thread increments the HWM at most once. Even if the thread fails to increment
3497   // the HWM, an allocation is still attempted. This is because another thread must then
3498   // have incremented the HWM and therefore the allocation might still succeed.
3499   do {
3500     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3501     res = allocate(word_size, mdtype);
3502   } while (!incremented && res == NULL);
3503 
3504   if (incremented) {
3505     tracer()->report_gc_threshold(before, after,
3506                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3507     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3508   }
3509 
3510   return res;
3511 }
3512 
3513 // Space allocated in the Metaspace.  This may
3514 // be across several metadata virtual spaces.
3515 char* Metaspace::bottom() const {
3516   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3517   return (char*)vsm()->current_chunk()->bottom();
3518 }
3519 
3520 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3521   if (mdtype == ClassType) {
3522     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3523   } else {
3524     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3525   }
3526 }
3527 
3528 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3529   if (mdtype == ClassType) {
3530     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3531   } else {
3532     return vsm()->sum_free_in_chunks_in_use();
3533   }
3534 }
3535 
3536 // Space capacity in the Metaspace.  It includes
3537 // space in the list of chunks from which allocations
3538 // have been made. Don't include space in the global freelist and
3539 // in the space available in the dictionary which
3540 // is already counted in some chunk.
3541 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3542   if (mdtype == ClassType) {
3543     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3544   } else {
3545     return vsm()->sum_capacity_in_chunks_in_use();
3546   }
3547 }
3548 
3549 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3550   return used_words_slow(mdtype) * BytesPerWord;
3551 }
3552 
3553 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3554   return capacity_words_slow(mdtype) * BytesPerWord;
3555 }
3556 
3557 size_t Metaspace::allocated_blocks_bytes() const {
3558   return vsm()->allocated_blocks_bytes() +
3559       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3560 }
3561 
3562 size_t Metaspace::allocated_chunks_bytes() const {
3563   return vsm()->allocated_chunks_bytes() +
3564       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3565 }
3566 
3567 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3568   assert(!SafepointSynchronize::is_at_safepoint()
3569          || Thread::current()->is_VM_thread(), "should be the VM thread");
3570 
3571   if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
3572     record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
3573   }
3574 
3575   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3576 
3577   if (is_class && using_class_space()) {
3578     class_vsm()->deallocate(ptr, word_size);
3579   } else {
3580     vsm()->deallocate(ptr, word_size);
3581   }
3582 }
3583 
3584 
3585 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3586                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3587   if (HAS_PENDING_EXCEPTION) {
3588     assert(false, "Should not allocate with exception pending");
3589     return NULL;  // caller does a CHECK_NULL too
3590   }
3591 
3592   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3593         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3594 
3595   // Allocate in metaspaces without taking out a lock, because it deadlocks
3596   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3597   // to revisit this for application class data sharing.
3598   if (DumpSharedSpaces) {
3599     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3600     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3601     MetaWord* result = space->allocate(word_size, NonClassType);
3602     if (result == NULL) {
3603       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3604     }
3605     if (log_is_enabled(Info, cds)) {
3606       space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
3607     }
3608 
3609     // Zero initialize.
3610     Copy::fill_to_words((HeapWord*)result, word_size, 0);
3611 
3612     return result;
3613   }
3614 
3615   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3616 
3617   // Try to allocate metadata.
3618   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3619 
3620   if (result == NULL) {
3621     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3622 
3623     // Allocation failed.
3624     if (is_init_completed()) {
3625       // Only start a GC if the bootstrapping has completed.
3626 
3627       // Try to clean out some memory and retry.
3628       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3629           loader_data, word_size, mdtype);
3630     }
3631   }
3632 
3633   if (result == NULL) {
3634     SpaceManager* sm;
3635     if (is_class_space_allocation(mdtype)) {
3636       sm = loader_data->metaspace_non_null()->class_vsm();
3637     } else {
3638       sm = loader_data->metaspace_non_null()->vsm();
3639     }
3640 
3641     result = sm->get_small_chunk_and_allocate(word_size);
3642 
3643     if (result == NULL) {
3644       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3645     }
3646   }
3647 
3648   // Zero initialize.
3649   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3650 
3651   return result;
3652 }
3653 
3654 size_t Metaspace::class_chunk_size(size_t word_size) {
3655   assert(using_class_space(), "Has to use class space");
3656   return class_vsm()->calc_chunk_size(word_size);
3657 }
3658 
3659 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3660   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3661 
3662   // If result is still null, we are out of memory.
3663   Log(gc, metaspace, freelist) log;
3664   if (log.is_info()) {
3665     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3666              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3667     ResourceMark rm;
3668     LogStream ls(log.info());
3669     if (loader_data->metaspace_or_null() != NULL) {
3670       loader_data->dump(&ls);
3671     }
3672     MetaspaceAux::dump(&ls);
3673   }
3674 
3675   bool out_of_compressed_class_space = false;
3676   if (is_class_space_allocation(mdtype)) {
3677     Metaspace* metaspace = loader_data->metaspace_non_null();
3678     out_of_compressed_class_space =
3679       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3680       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3681       CompressedClassSpaceSize;
3682   }
3683 
3684   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3685   const char* space_string = out_of_compressed_class_space ?
3686     "Compressed class space" : "Metaspace";
3687 
3688   report_java_out_of_memory(space_string);
3689 
3690   if (JvmtiExport::should_post_resource_exhausted()) {
3691     JvmtiExport::post_resource_exhausted(
3692         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3693         space_string);
3694   }
3695 
3696   if (!is_init_completed()) {
3697     vm_exit_during_initialization("OutOfMemoryError", space_string);
3698   }
3699 
3700   if (out_of_compressed_class_space) {
3701     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3702   } else {
3703     THROW_OOP(Universe::out_of_memory_error_metaspace());
3704   }
3705 }
3706 
3707 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3708   switch (mdtype) {
3709     case Metaspace::ClassType: return "Class";
3710     case Metaspace::NonClassType: return "Metadata";
3711     default:
3712       assert(false, "Got bad mdtype: %d", (int) mdtype);
3713       return NULL;
3714   }
3715 }
3716 
3717 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3718   assert(DumpSharedSpaces, "sanity");
3719 
3720   int byte_size = (int)word_size * wordSize;
3721   AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3722 
3723   if (_alloc_record_head == NULL) {
3724     _alloc_record_head = _alloc_record_tail = rec;
3725   } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3726     _alloc_record_tail->_next = rec;
3727     _alloc_record_tail = rec;
3728   } else {
3729     // slow linear search, but this doesn't happen that often, and only when dumping
3730     for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3731       if (old->_ptr == ptr) {
3732         assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3733         int remain_bytes = old->_byte_size - byte_size;
3734         assert(remain_bytes >= 0, "sanity");
3735         old->_type = type;
3736 
3737         if (remain_bytes == 0) {
3738           delete(rec);
3739         } else {
3740           address remain_ptr = address(ptr) + byte_size;
3741           rec->_ptr = remain_ptr;
3742           rec->_byte_size = remain_bytes;
3743           rec->_type = MetaspaceObj::DeallocatedType;
3744           rec->_next = old->_next;
3745           old->_byte_size = byte_size;
3746           old->_next = rec;
3747         }
3748         return;
3749       }
3750     }
3751     assert(0, "reallocating a freed pointer that was not recorded");
3752   }
3753 }
3754 
3755 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3756   assert(DumpSharedSpaces, "sanity");
3757 
3758   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3759     if (rec->_ptr == ptr) {
3760       assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3761       rec->_type = MetaspaceObj::DeallocatedType;
3762       return;
3763     }
3764   }
3765 
3766   assert(0, "deallocating a pointer that was not recorded");
3767 }
3768 
3769 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3770   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3771 
3772   address last_addr = (address)bottom();
3773 
3774   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3775     address ptr = rec->_ptr;
3776     if (last_addr < ptr) {
3777       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3778     }
3779     closure->doit(ptr, rec->_type, rec->_byte_size);
3780     last_addr = ptr + rec->_byte_size;
3781   }
3782 
3783   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3784   if (last_addr < top) {
3785     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3786   }
3787 }
3788 
3789 void Metaspace::purge(MetadataType mdtype) {
3790   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3791 }
3792 
3793 void Metaspace::purge() {
3794   MutexLockerEx cl(SpaceManager::expand_lock(),
3795                    Mutex::_no_safepoint_check_flag);
3796   purge(NonClassType);
3797   if (using_class_space()) {
3798     purge(ClassType);
3799   }
3800 }
3801 
3802 void Metaspace::print_on(outputStream* out) const {
3803   // Print both class virtual space counts and metaspace.
3804   if (Verbose) {
3805     vsm()->print_on(out);
3806     if (using_class_space()) {
3807       class_vsm()->print_on(out);
3808     }
3809   }
3810 }
3811 
3812 bool Metaspace::contains(const void* ptr) {
3813   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3814     return true;
3815   }
3816   return contains_non_shared(ptr);
3817 }
3818 
3819 bool Metaspace::contains_non_shared(const void* ptr) {
3820   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3821      return true;
3822   }
3823 
3824   return get_space_list(NonClassType)->contains(ptr);
3825 }
3826 
3827 void Metaspace::verify() {
3828   vsm()->verify();
3829   if (using_class_space()) {
3830     class_vsm()->verify();
3831   }
3832 }
3833 
3834 void Metaspace::dump(outputStream* const out) const {
3835   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3836   vsm()->dump(out);
3837   if (using_class_space()) {
3838     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3839     class_vsm()->dump(out);
3840   }
3841 }
3842 
3843 /////////////// Unit tests ///////////////
3844 
3845 #ifndef PRODUCT
3846 
3847 class TestMetaspaceAuxTest : AllStatic {
3848  public:
3849   static void test_reserved() {
3850     size_t reserved = MetaspaceAux::reserved_bytes();
3851 
3852     assert(reserved > 0, "assert");
3853 
3854     size_t committed  = MetaspaceAux::committed_bytes();
3855     assert(committed <= reserved, "assert");
3856 
3857     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3858     assert(reserved_metadata > 0, "assert");
3859     assert(reserved_metadata <= reserved, "assert");
3860 
3861     if (UseCompressedClassPointers) {
3862       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3863       assert(reserved_class > 0, "assert");
3864       assert(reserved_class < reserved, "assert");
3865     }
3866   }
3867 
3868   static void test_committed() {
3869     size_t committed = MetaspaceAux::committed_bytes();
3870 
3871     assert(committed > 0, "assert");
3872 
3873     size_t reserved  = MetaspaceAux::reserved_bytes();
3874     assert(committed <= reserved, "assert");
3875 
3876     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3877     assert(committed_metadata > 0, "assert");
3878     assert(committed_metadata <= committed, "assert");
3879 
3880     if (UseCompressedClassPointers) {
3881       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3882       assert(committed_class > 0, "assert");
3883       assert(committed_class < committed, "assert");
3884     }
3885   }
3886 
3887   static void test_virtual_space_list_large_chunk() {
3888     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3889     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3890     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3891     // vm_allocation_granularity aligned on Windows.
3892     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3893     large_size += (os::vm_page_size()/BytesPerWord);
3894     vs_list->get_new_chunk(large_size, 0);
3895   }
3896 
3897   static void test() {
3898     test_reserved();
3899     test_committed();
3900     test_virtual_space_list_large_chunk();
3901   }
3902 };
3903 
3904 void TestMetaspaceAux_test() {
3905   TestMetaspaceAuxTest::test();
3906 }
3907 
3908 class TestVirtualSpaceNodeTest {
3909   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3910                                           size_t& num_small_chunks,
3911                                           size_t& num_specialized_chunks) {
3912     num_medium_chunks = words_left / MediumChunk;
3913     words_left = words_left % MediumChunk;
3914 
3915     num_small_chunks = words_left / SmallChunk;
3916     words_left = words_left % SmallChunk;
3917     // how many specialized chunks can we get?
3918     num_specialized_chunks = words_left / SpecializedChunk;
3919     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3920   }
3921 
3922  public:
3923   static void test() {
3924     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3925     const size_t vsn_test_size_words = MediumChunk  * 4;
3926     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3927 
3928     // The chunk sizes must be multiples of eachother, or this will fail
3929     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3930     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3931 
3932     { // No committed memory in VSN
3933       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3934       VirtualSpaceNode vsn(vsn_test_size_bytes);
3935       vsn.initialize();
3936       vsn.retire(&cm);
3937       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3938     }
3939 
3940     { // All of VSN is committed, half is used by chunks
3941       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3942       VirtualSpaceNode vsn(vsn_test_size_bytes);
3943       vsn.initialize();
3944       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3945       vsn.get_chunk_vs(MediumChunk);
3946       vsn.get_chunk_vs(MediumChunk);
3947       vsn.retire(&cm);
3948       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3949       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3950     }
3951 
3952     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3953     // This doesn't work for systems with vm_page_size >= 16K.
3954     if (page_chunks < MediumChunk) {
3955       // 4 pages of VSN is committed, some is used by chunks
3956       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3957       VirtualSpaceNode vsn(vsn_test_size_bytes);
3958 
3959       vsn.initialize();
3960       vsn.expand_by(page_chunks, page_chunks);
3961       vsn.get_chunk_vs(SmallChunk);
3962       vsn.get_chunk_vs(SpecializedChunk);
3963       vsn.retire(&cm);
3964 
3965       // committed - used = words left to retire
3966       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3967 
3968       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3969       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3970 
3971       assert(num_medium_chunks == 0, "should not get any medium chunks");
3972       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3973       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3974     }
3975 
3976     { // Half of VSN is committed, a humongous chunk is used
3977       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3978       VirtualSpaceNode vsn(vsn_test_size_bytes);
3979       vsn.initialize();
3980       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3981       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3982       vsn.retire(&cm);
3983 
3984       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3985       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3986       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3987 
3988       assert(num_medium_chunks == 0, "should not get any medium chunks");
3989       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3990       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3991     }
3992 
3993   }
3994 
3995 #define assert_is_available_positive(word_size) \
3996   assert(vsn.is_available(word_size), \
3997          #word_size ": " PTR_FORMAT " bytes were not available in " \
3998          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3999          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4000 
4001 #define assert_is_available_negative(word_size) \
4002   assert(!vsn.is_available(word_size), \
4003          #word_size ": " PTR_FORMAT " bytes should not be available in " \
4004          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4005          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4006 
4007   static void test_is_available_positive() {
4008     // Reserve some memory.
4009     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4010     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4011 
4012     // Commit some memory.
4013     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4014     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4015     assert(expanded, "Failed to commit");
4016 
4017     // Check that is_available accepts the committed size.
4018     assert_is_available_positive(commit_word_size);
4019 
4020     // Check that is_available accepts half the committed size.
4021     size_t expand_word_size = commit_word_size / 2;
4022     assert_is_available_positive(expand_word_size);
4023   }
4024 
4025   static void test_is_available_negative() {
4026     // Reserve some memory.
4027     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4028     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4029 
4030     // Commit some memory.
4031     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4032     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4033     assert(expanded, "Failed to commit");
4034 
4035     // Check that is_available doesn't accept a too large size.
4036     size_t two_times_commit_word_size = commit_word_size * 2;
4037     assert_is_available_negative(two_times_commit_word_size);
4038   }
4039 
4040   static void test_is_available_overflow() {
4041     // Reserve some memory.
4042     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4043     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4044 
4045     // Commit some memory.
4046     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4047     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4048     assert(expanded, "Failed to commit");
4049 
4050     // Calculate a size that will overflow the virtual space size.
4051     void* virtual_space_max = (void*)(uintptr_t)-1;
4052     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4053     size_t overflow_size = bottom_to_max + BytesPerWord;
4054     size_t overflow_word_size = overflow_size / BytesPerWord;
4055 
4056     // Check that is_available can handle the overflow.
4057     assert_is_available_negative(overflow_word_size);
4058   }
4059 
4060   static void test_is_available() {
4061     TestVirtualSpaceNodeTest::test_is_available_positive();
4062     TestVirtualSpaceNodeTest::test_is_available_negative();
4063     TestVirtualSpaceNodeTest::test_is_available_overflow();
4064   }
4065 };
4066 
4067 void TestVirtualSpaceNode_test() {
4068   TestVirtualSpaceNodeTest::test();
4069   TestVirtualSpaceNodeTest::test_is_available();
4070 }
4071 
4072 // The following test is placed here instead of a gtest / unittest file
4073 // because the ChunkManager class is only available in this file.
4074 void ChunkManager_test_list_index() {
4075   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4076 
4077   // Test previous bug where a query for a humongous class metachunk,
4078   // incorrectly matched the non-class medium metachunk size.
4079   {
4080     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4081 
4082     ChunkIndex index = manager.list_index(MediumChunk);
4083 
4084     assert(index == HumongousIndex,
4085            "Requested size is larger than ClassMediumChunk,"
4086            " so should return HumongousIndex. Got index: %d", (int)index);
4087   }
4088 
4089   // Check the specified sizes as well.
4090   {
4091     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4092     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4093   }
4094   {
4095     ChunkIndex index = manager.list_index(ClassSmallChunk);
4096     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4097   }
4098   {
4099     ChunkIndex index = manager.list_index(ClassMediumChunk);
4100     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4101   }
4102   {
4103     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4104     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4105   }
4106 }
4107 
4108 #endif // !PRODUCT
4109 
4110 #ifdef ASSERT
4111 
4112 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4113 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4114 // content.
4115 class ChunkManagerReturnTestImpl {
4116 
4117   VirtualSpaceNode _vsn;
4118   ChunkManager _cm;
4119 
4120   // The expected content of the chunk manager.
4121   unsigned _chunks_in_chunkmanager;
4122   size_t _words_in_chunkmanager;
4123 
4124   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4125   static const int num_chunks = 256;
4126   Metachunk* _pool[num_chunks];
4127 
4128   // Helper, return a random position into the chunk pool.
4129   static int get_random_position() {
4130     return os::random() % num_chunks;
4131   }
4132 
4133   // Asserts that ChunkManager counters match expectations.
4134   void assert_counters() {
4135     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4136     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4137     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4138   }
4139 
4140   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4141   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4142   size_t get_random_chunk_size() {
4143     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4144     const int rand = os::random() % 4;
4145     if (rand < 3) {
4146       return sizes[rand];
4147     } else {
4148       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4149       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4150     }
4151   }
4152 
4153   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4154   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4155   int next_matching_chunk(int start, bool is_free) const {
4156     assert(start >= 0 && start < num_chunks, "invalid parameter");
4157     int pos = start;
4158     do {
4159       if (++pos == num_chunks) {
4160         pos = 0;
4161       }
4162       if (_pool[pos]->is_tagged_free() == is_free) {
4163         return pos;
4164       }
4165     } while (pos != start);
4166     return -1;
4167   }
4168 
4169   // A structure to keep information about a chunk list including which
4170   // chunks are part of this list. This is needed to keep information about a chunk list
4171   // we will to return to the ChunkManager, because the original list will be destroyed.
4172   struct AChunkList {
4173     Metachunk* head;
4174     Metachunk* all[num_chunks];
4175     size_t size;
4176     int num;
4177     ChunkIndex index;
4178   };
4179 
4180   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4181   // a random chunk list of max. length <list_size> of chunks with the same
4182   // ChunkIndex (chunk size).
4183   // Returns false if list cannot be assembled. List is returned in the <out>
4184   // structure. Returned list may be smaller than <list_size>.
4185   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4186     // Choose a random in-use chunk from the pool...
4187     const int headpos = next_matching_chunk(get_random_position(), false);
4188     if (headpos == -1) {
4189       return false;
4190     }
4191     Metachunk* const head = _pool[headpos];
4192     out->all[0] = head;
4193     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4194     // ..then go from there, chain it up with up to list_size - 1 number of other
4195     // in-use chunks of the same index.
4196     const ChunkIndex index = _cm.list_index(head->word_size());
4197     int num_added = 1;
4198     size_t size_added = head->word_size();
4199     int pos = headpos;
4200     Metachunk* tail = head;
4201     do {
4202       pos = next_matching_chunk(pos, false);
4203       if (pos != headpos) {
4204         Metachunk* c = _pool[pos];
4205         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4206         if (index == _cm.list_index(c->word_size())) {
4207           tail->set_next(c);
4208           c->set_prev(tail);
4209           tail = c;
4210           out->all[num_added] = c;
4211           num_added ++;
4212           size_added += c->word_size();
4213         }
4214       }
4215     } while (num_added < list_size && pos != headpos);
4216     out->head = head;
4217     out->index = index;
4218     out->size = size_added;
4219     out->num = num_added;
4220     return true;
4221   }
4222 
4223   // Take a single random chunk from the ChunkManager.
4224   bool take_single_random_chunk_from_chunkmanager() {
4225     assert_counters();
4226     _cm.locked_verify();
4227     int pos = next_matching_chunk(get_random_position(), true);
4228     if (pos == -1) {
4229       return false;
4230     }
4231     Metachunk* c = _pool[pos];
4232     assert(c->is_tagged_free(), "Chunk state mismatch");
4233     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4234     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4235     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4236     // normal chunk allocation.
4237     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4238     assert(c2 != NULL, "Unexpected.");
4239     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4240     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4241     _chunks_in_chunkmanager --;
4242     _words_in_chunkmanager -= c->word_size();
4243     assert_counters();
4244     _cm.locked_verify();
4245     return true;
4246   }
4247 
4248   // Returns a single random chunk to the chunk manager. Returns false if that
4249   // was not possible (all chunks are already in the chunk manager).
4250   bool return_single_random_chunk_to_chunkmanager() {
4251     assert_counters();
4252     _cm.locked_verify();
4253     int pos = next_matching_chunk(get_random_position(), false);
4254     if (pos == -1) {
4255       return false;
4256     }
4257     Metachunk* c = _pool[pos];
4258     assert(c->is_tagged_free() == false, "wrong chunk information");
4259     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4260     _chunks_in_chunkmanager ++;
4261     _words_in_chunkmanager += c->word_size();
4262     assert(c->is_tagged_free() == true, "wrong chunk information");
4263     assert_counters();
4264     _cm.locked_verify();
4265     return true;
4266   }
4267 
4268   // Return a random chunk list to the chunk manager. Returns the length of the
4269   // returned list.
4270   int return_random_chunk_list_to_chunkmanager(int list_size) {
4271     assert_counters();
4272     _cm.locked_verify();
4273     AChunkList aChunkList;
4274     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4275       return 0;
4276     }
4277     // Before returning chunks are returned, they should be tagged in use.
4278     for (int i = 0; i < aChunkList.num; i ++) {
4279       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4280     }
4281     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4282     _chunks_in_chunkmanager += aChunkList.num;
4283     _words_in_chunkmanager += aChunkList.size;
4284     // After all chunks are returned, check that they are now tagged free.
4285     for (int i = 0; i < aChunkList.num; i ++) {
4286       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4287     }
4288     assert_counters();
4289     _cm.locked_verify();
4290     return aChunkList.num;
4291   }
4292 
4293 public:
4294 
4295   ChunkManagerReturnTestImpl()
4296     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4297     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4298     , _chunks_in_chunkmanager(0)
4299     , _words_in_chunkmanager(0)
4300   {
4301     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4302     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4303     // "in use", because not yet added to any chunk manager.
4304     _vsn.initialize();
4305     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4306     for (int i = 0; i < num_chunks; i ++) {
4307       const size_t size = get_random_chunk_size();
4308       _pool[i] = _vsn.get_chunk_vs(size);
4309       assert(_pool[i] != NULL, "allocation failed");
4310     }
4311     assert_counters();
4312     _cm.locked_verify();
4313   }
4314 
4315   // Test entry point.
4316   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4317   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4318   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4319   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4320   // chunks manager, thereby emptying or filling it completely.
4321   void do_test(float phase_length_factor) {
4322     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4323     assert_counters();
4324     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4325     const int num_max_ops = num_chunks * 100;
4326     int num_ops = num_max_ops;
4327     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4328     int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4329     bool return_phase = true;
4330     while (num_ops > 0) {
4331       int chunks_moved = 0;
4332       if (return_phase) {
4333         // Randomly switch between returning a single chunk or a random length chunk list.
4334         if (os::random() % 2 == 0) {
4335           if (return_single_random_chunk_to_chunkmanager()) {
4336             chunks_moved = 1;
4337           }
4338         } else {
4339           const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4340           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4341         }
4342       } else {
4343         // Breath out.
4344         if (take_single_random_chunk_from_chunkmanager()) {
4345           chunks_moved = 1;
4346         }
4347       }
4348       num_ops -= chunks_moved;
4349       num_ops_until_switch -= chunks_moved;
4350       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4351         return_phase = !return_phase;
4352         num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4353       }
4354     }
4355   }
4356 };
4357 
4358 void* setup_chunkmanager_returntests() {
4359   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4360   return p;
4361 }
4362 
4363 void teardown_chunkmanager_returntests(void* p) {
4364   delete (ChunkManagerReturnTestImpl*) p;
4365 }
4366 
4367 void run_chunkmanager_returntests(void* p, float phase_length) {
4368   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4369   test->do_test(phase_length);
4370 }
4371 
4372 // The following test is placed here instead of a gtest / unittest file
4373 // because the ChunkManager class is only available in this file.
4374 class SpaceManagerTest : AllStatic {
4375   friend void SpaceManager_test_adjust_initial_chunk_size();
4376 
4377   static void test_adjust_initial_chunk_size(bool is_class) {
4378     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4379     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4380     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4381 
4382 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4383     do {                                                                         \
4384       size_t v = value;                                                          \
4385       size_t e = expected;                                                       \
4386       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4387              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4388     } while (0)
4389 
4390     // Smallest (specialized)
4391     test_adjust_initial_chunk_size(1,            smallest, is_class);
4392     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4393     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4394 
4395     // Small
4396     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4397     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4398     test_adjust_initial_chunk_size(normal,       normal, is_class);
4399 
4400     // Medium
4401     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4402     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4403     test_adjust_initial_chunk_size(medium,     medium, is_class);
4404 
4405     // Humongous
4406     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4407 
4408 #undef test_adjust_initial_chunk_size
4409   }
4410 
4411   static void test_adjust_initial_chunk_size() {
4412     test_adjust_initial_chunk_size(false);
4413     test_adjust_initial_chunk_size(true);
4414   }
4415 };
4416 
4417 void SpaceManager_test_adjust_initial_chunk_size() {
4418   SpaceManagerTest::test_adjust_initial_chunk_size();
4419 }
4420 
4421 #endif // ASSERT