1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/debug.hpp"
  51 #include "utilities/macros.hpp"
  52 
  53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  55 
  56 // Set this constant to enable slow integrity checking of the free chunk lists
  57 const bool metaspace_slow_verify = false;
  58 
  59 size_t const allocation_from_dictionary_limit = 4 * K;
  60 
  61 MetaWord* last_allocated = 0;
  62 
  63 size_t Metaspace::_compressed_class_space_size;
  64 const MetaspaceTracer* Metaspace::_tracer = NULL;
  65 
  66 // Used in declarations in SpaceManager and ChunkManager
  67 enum ChunkIndex {
  68   ZeroIndex = 0,
  69   SpecializedIndex = ZeroIndex,
  70   SmallIndex = SpecializedIndex + 1,
  71   MediumIndex = SmallIndex + 1,
  72   HumongousIndex = MediumIndex + 1,
  73   NumberOfFreeLists = 3,
  74   NumberOfInUseLists = 4
  75 };
  76 
  77 // Helper, returns a descriptive name for the given index.
  78 static const char* chunk_size_name(ChunkIndex index) {
  79   switch (index) {
  80     case SpecializedIndex:
  81       return "specialized";
  82     case SmallIndex:
  83       return "small";
  84     case MediumIndex:
  85       return "medium";
  86     case HumongousIndex:
  87       return "humongous";
  88     default:
  89       return "Invalid index";
  90   }
  91 }
  92 
  93 enum ChunkSizes {    // in words.
  94   ClassSpecializedChunk = 128,
  95   SpecializedChunk = 128,
  96   ClassSmallChunk = 256,
  97   SmallChunk = 512,
  98   ClassMediumChunk = 4 * K,
  99   MediumChunk = 8 * K
 100 };
 101 
 102 static ChunkIndex next_chunk_index(ChunkIndex i) {
 103   assert(i < NumberOfInUseLists, "Out of bound");
 104   return (ChunkIndex) (i+1);
 105 }
 106 
 107 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 108 uint MetaspaceGC::_shrink_factor = 0;
 109 bool MetaspaceGC::_should_concurrent_collect = false;
 110 
 111 typedef class FreeList<Metachunk> ChunkList;
 112 
 113 // Manages the global free lists of chunks.
 114 class ChunkManager : public CHeapObj<mtInternal> {
 115   friend class TestVirtualSpaceNodeTest;
 116 
 117   // Free list of chunks of different sizes.
 118   //   SpecializedChunk
 119   //   SmallChunk
 120   //   MediumChunk
 121   ChunkList _free_chunks[NumberOfFreeLists];
 122 
 123   // Return non-humongous chunk list by its index.
 124   ChunkList* free_chunks(ChunkIndex index);
 125 
 126   // Returns non-humongous chunk list for the given chunk word size.
 127   ChunkList* find_free_chunks_list(size_t word_size);
 128 
 129   //   HumongousChunk
 130   ChunkTreeDictionary _humongous_dictionary;
 131 
 132   // Returns the humongous chunk dictionary.
 133   ChunkTreeDictionary* humongous_dictionary() {
 134     return &_humongous_dictionary;
 135   }
 136 
 137   // Size, in metaspace words, of all chunks managed by this ChunkManager
 138   size_t _free_chunks_total;
 139   // Number of chunks in this ChunkManager
 140   size_t _free_chunks_count;
 141 
 142   // Update counters after a chunk was added or removed removed.
 143   void account_for_added_chunk(const Metachunk* c);
 144   void account_for_removed_chunk(const Metachunk* c);
 145 
 146   // Debug support
 147 
 148   size_t sum_free_chunks();
 149   size_t sum_free_chunks_count();
 150 
 151   void locked_verify_free_chunks_total();
 152   void slow_locked_verify_free_chunks_total() {
 153     if (metaspace_slow_verify) {
 154       locked_verify_free_chunks_total();
 155     }
 156   }
 157   void locked_verify_free_chunks_count();
 158   void slow_locked_verify_free_chunks_count() {
 159     if (metaspace_slow_verify) {
 160       locked_verify_free_chunks_count();
 161     }
 162   }
 163   void verify_free_chunks_count();
 164 
 165  public:
 166 
 167   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 168       : _free_chunks_total(0), _free_chunks_count(0) {
 169     _free_chunks[SpecializedIndex].set_size(specialized_size);
 170     _free_chunks[SmallIndex].set_size(small_size);
 171     _free_chunks[MediumIndex].set_size(medium_size);
 172   }
 173 
 174   // add or delete (return) a chunk to the global freelist.
 175   Metachunk* chunk_freelist_allocate(size_t word_size);
 176 
 177   // Map a size to a list index assuming that there are lists
 178   // for special, small, medium, and humongous chunks.
 179   ChunkIndex list_index(size_t size);
 180 
 181   // Map a given index to the chunk size.
 182   size_t size_by_index(ChunkIndex index);
 183 
 184   // Take a chunk from the ChunkManager. The chunk is expected to be in
 185   // the chunk manager (the freelist if non-humongous, the dictionary if
 186   // humongous).
 187   void remove_chunk(Metachunk* chunk);
 188 
 189   // Return a single chunk of type index to the ChunkManager.
 190   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 191 
 192   // Add the simple linked list of chunks to the freelist of chunks
 193   // of type index.
 194   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 195 
 196   // Total of the space in the free chunks list
 197   size_t free_chunks_total_words();
 198   size_t free_chunks_total_bytes();
 199 
 200   // Number of chunks in the free chunks list
 201   size_t free_chunks_count();
 202 
 203   // Remove from a list by size.  Selects list based on size of chunk.
 204   Metachunk* free_chunks_get(size_t chunk_word_size);
 205 
 206 #define index_bounds_check(index)                                         \
 207   assert(index == SpecializedIndex ||                                     \
 208          index == SmallIndex ||                                           \
 209          index == MediumIndex ||                                          \
 210          index == HumongousIndex, "Bad index: %d", (int) index)
 211 
 212   size_t num_free_chunks(ChunkIndex index) const {
 213     index_bounds_check(index);
 214 
 215     if (index == HumongousIndex) {
 216       return _humongous_dictionary.total_free_blocks();
 217     }
 218 
 219     ssize_t count = _free_chunks[index].count();
 220     return count == -1 ? 0 : (size_t) count;
 221   }
 222 
 223   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 224     index_bounds_check(index);
 225 
 226     size_t word_size = 0;
 227     if (index == HumongousIndex) {
 228       word_size = _humongous_dictionary.total_size();
 229     } else {
 230       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 231       word_size = size_per_chunk_in_words * num_free_chunks(index);
 232     }
 233 
 234     return word_size * BytesPerWord;
 235   }
 236 
 237   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 238     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 239                                          num_free_chunks(SmallIndex),
 240                                          num_free_chunks(MediumIndex),
 241                                          num_free_chunks(HumongousIndex),
 242                                          size_free_chunks_in_bytes(SpecializedIndex),
 243                                          size_free_chunks_in_bytes(SmallIndex),
 244                                          size_free_chunks_in_bytes(MediumIndex),
 245                                          size_free_chunks_in_bytes(HumongousIndex));
 246   }
 247 
 248   // Debug support
 249   void verify();
 250   void slow_verify() {
 251     if (metaspace_slow_verify) {
 252       verify();
 253     }
 254   }
 255   void locked_verify();
 256   void slow_locked_verify() {
 257     if (metaspace_slow_verify) {
 258       locked_verify();
 259     }
 260   }
 261   void verify_free_chunks_total();
 262 
 263   void locked_print_free_chunks(outputStream* st);
 264   void locked_print_sum_free_chunks(outputStream* st);
 265 
 266   void print_on(outputStream* st) const;
 267 };
 268 
 269 class SmallBlocks : public CHeapObj<mtClass> {
 270   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 271   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 272 
 273  private:
 274   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 275 
 276   FreeList<Metablock>& list_at(size_t word_size) {
 277     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 278     return _small_lists[word_size - _small_block_min_size];
 279   }
 280 
 281  public:
 282   SmallBlocks() {
 283     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 284       uint k = i - _small_block_min_size;
 285       _small_lists[k].set_size(i);
 286     }
 287   }
 288 
 289   size_t total_size() const {
 290     size_t result = 0;
 291     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 292       uint k = i - _small_block_min_size;
 293       result = result + _small_lists[k].count() * _small_lists[k].size();
 294     }
 295     return result;
 296   }
 297 
 298   static uint small_block_max_size() { return _small_block_max_size; }
 299   static uint small_block_min_size() { return _small_block_min_size; }
 300 
 301   MetaWord* get_block(size_t word_size) {
 302     if (list_at(word_size).count() > 0) {
 303       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 304       return new_block;
 305     } else {
 306       return NULL;
 307     }
 308   }
 309   void return_block(Metablock* free_chunk, size_t word_size) {
 310     list_at(word_size).return_chunk_at_head(free_chunk, false);
 311     assert(list_at(word_size).count() > 0, "Should have a chunk");
 312   }
 313 
 314   void print_on(outputStream* st) const {
 315     st->print_cr("SmallBlocks:");
 316     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 317       uint k = i - _small_block_min_size;
 318       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 319     }
 320   }
 321 };
 322 
 323 // Used to manage the free list of Metablocks (a block corresponds
 324 // to the allocation of a quantum of metadata).
 325 class BlockFreelist : public CHeapObj<mtClass> {
 326   BlockTreeDictionary* const _dictionary;
 327   SmallBlocks* _small_blocks;
 328 
 329   // Only allocate and split from freelist if the size of the allocation
 330   // is at least 1/4th the size of the available block.
 331   const static int WasteMultiplier = 4;
 332 
 333   // Accessors
 334   BlockTreeDictionary* dictionary() const { return _dictionary; }
 335   SmallBlocks* small_blocks() {
 336     if (_small_blocks == NULL) {
 337       _small_blocks = new SmallBlocks();
 338     }
 339     return _small_blocks;
 340   }
 341 
 342  public:
 343   BlockFreelist();
 344   ~BlockFreelist();
 345 
 346   // Get and return a block to the free list
 347   MetaWord* get_block(size_t word_size);
 348   void return_block(MetaWord* p, size_t word_size);
 349 
 350   size_t total_size() const  {
 351     size_t result = dictionary()->total_size();
 352     if (_small_blocks != NULL) {
 353       result = result + _small_blocks->total_size();
 354     }
 355     return result;
 356   }
 357 
 358   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 359   void print_on(outputStream* st) const;
 360 };
 361 
 362 // A VirtualSpaceList node.
 363 class VirtualSpaceNode : public CHeapObj<mtClass> {
 364   friend class VirtualSpaceList;
 365 
 366   // Link to next VirtualSpaceNode
 367   VirtualSpaceNode* _next;
 368 
 369   // total in the VirtualSpace
 370   MemRegion _reserved;
 371   ReservedSpace _rs;
 372   VirtualSpace _virtual_space;
 373   MetaWord* _top;
 374   // count of chunks contained in this VirtualSpace
 375   uintx _container_count;
 376 
 377   // Convenience functions to access the _virtual_space
 378   char* low()  const { return virtual_space()->low(); }
 379   char* high() const { return virtual_space()->high(); }
 380 
 381   // The first Metachunk will be allocated at the bottom of the
 382   // VirtualSpace
 383   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 384 
 385   // Committed but unused space in the virtual space
 386   size_t free_words_in_vs() const;
 387  public:
 388 
 389   VirtualSpaceNode(size_t byte_size);
 390   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 391   ~VirtualSpaceNode();
 392 
 393   // Convenience functions for logical bottom and end
 394   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 395   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 396 
 397   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 398 
 399   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 400   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 401 
 402   bool is_pre_committed() const { return _virtual_space.special(); }
 403 
 404   // address of next available space in _virtual_space;
 405   // Accessors
 406   VirtualSpaceNode* next() { return _next; }
 407   void set_next(VirtualSpaceNode* v) { _next = v; }
 408 
 409   void set_reserved(MemRegion const v) { _reserved = v; }
 410   void set_top(MetaWord* v) { _top = v; }
 411 
 412   // Accessors
 413   MemRegion* reserved() { return &_reserved; }
 414   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 415 
 416   // Returns true if "word_size" is available in the VirtualSpace
 417   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 418 
 419   MetaWord* top() const { return _top; }
 420   void inc_top(size_t word_size) { _top += word_size; }
 421 
 422   uintx container_count() { return _container_count; }
 423   void inc_container_count();
 424   void dec_container_count();
 425 #ifdef ASSERT
 426   uintx container_count_slow();
 427   void verify_container_count();
 428 #endif
 429 
 430   // used and capacity in this single entry in the list
 431   size_t used_words_in_vs() const;
 432   size_t capacity_words_in_vs() const;
 433 
 434   bool initialize();
 435 
 436   // get space from the virtual space
 437   Metachunk* take_from_committed(size_t chunk_word_size);
 438 
 439   // Allocate a chunk from the virtual space and return it.
 440   Metachunk* get_chunk_vs(size_t chunk_word_size);
 441 
 442   // Expands/shrinks the committed space in a virtual space.  Delegates
 443   // to Virtualspace
 444   bool expand_by(size_t min_words, size_t preferred_words);
 445 
 446   // In preparation for deleting this node, remove all the chunks
 447   // in the node from any freelist.
 448   void purge(ChunkManager* chunk_manager);
 449 
 450   // If an allocation doesn't fit in the current node a new node is created.
 451   // Allocate chunks out of the remaining committed space in this node
 452   // to avoid wasting that memory.
 453   // This always adds up because all the chunk sizes are multiples of
 454   // the smallest chunk size.
 455   void retire(ChunkManager* chunk_manager);
 456 
 457 #ifdef ASSERT
 458   // Debug support
 459   void mangle();
 460 #endif
 461 
 462   void print_on(outputStream* st) const;
 463 };
 464 
 465 #define assert_is_aligned(value, alignment)                  \
 466   assert(is_aligned((value), (alignment)),                   \
 467          SIZE_FORMAT_HEX " is not aligned to "               \
 468          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 469 
 470 // Decide if large pages should be committed when the memory is reserved.
 471 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 472   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 473     size_t words = bytes / BytesPerWord;
 474     bool is_class = false; // We never reserve large pages for the class space.
 475     if (MetaspaceGC::can_expand(words, is_class) &&
 476         MetaspaceGC::allowed_expansion() >= words) {
 477       return true;
 478     }
 479   }
 480 
 481   return false;
 482 }
 483 
 484   // byte_size is the size of the associated virtualspace.
 485 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 486   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 487 
 488 #if INCLUDE_CDS
 489   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 490   // configurable address, generally at the top of the Java heap so other
 491   // memory addresses don't conflict.
 492   if (DumpSharedSpaces) {
 493     bool large_pages = false; // No large pages when dumping the CDS archive.
 494     char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 495 
 496     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 497     if (_rs.is_reserved()) {
 498       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 499     } else {
 500       // Get a mmap region anywhere if the SharedBaseAddress fails.
 501       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 502     }
 503     if (!_rs.is_reserved()) {
 504       vm_exit_during_initialization("Unable to allocate memory for shared space",
 505         err_msg(SIZE_FORMAT " bytes.", bytes));
 506     }
 507     MetaspaceShared::initialize_shared_rs(&_rs);
 508   } else
 509 #endif
 510   {
 511     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 512 
 513     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 514   }
 515 
 516   if (_rs.is_reserved()) {
 517     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 518     assert(_rs.size() != 0, "Catch if we get a 0 size");
 519     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 520     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 521 
 522     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 523   }
 524 }
 525 
 526 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 527   Metachunk* chunk = first_chunk();
 528   Metachunk* invalid_chunk = (Metachunk*) top();
 529   while (chunk < invalid_chunk ) {
 530     assert(chunk->is_tagged_free(), "Should be tagged free");
 531     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 532     chunk_manager->remove_chunk(chunk);
 533     assert(chunk->next() == NULL &&
 534            chunk->prev() == NULL,
 535            "Was not removed from its list");
 536     chunk = (Metachunk*) next;
 537   }
 538 }
 539 
 540 #ifdef ASSERT
 541 uintx VirtualSpaceNode::container_count_slow() {
 542   uintx count = 0;
 543   Metachunk* chunk = first_chunk();
 544   Metachunk* invalid_chunk = (Metachunk*) top();
 545   while (chunk < invalid_chunk ) {
 546     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 547     // Don't count the chunks on the free lists.  Those are
 548     // still part of the VirtualSpaceNode but not currently
 549     // counted.
 550     if (!chunk->is_tagged_free()) {
 551       count++;
 552     }
 553     chunk = (Metachunk*) next;
 554   }
 555   return count;
 556 }
 557 #endif
 558 
 559 // List of VirtualSpaces for metadata allocation.
 560 class VirtualSpaceList : public CHeapObj<mtClass> {
 561   friend class VirtualSpaceNode;
 562 
 563   enum VirtualSpaceSizes {
 564     VirtualSpaceSize = 256 * K
 565   };
 566 
 567   // Head of the list
 568   VirtualSpaceNode* _virtual_space_list;
 569   // virtual space currently being used for allocations
 570   VirtualSpaceNode* _current_virtual_space;
 571 
 572   // Is this VirtualSpaceList used for the compressed class space
 573   bool _is_class;
 574 
 575   // Sum of reserved and committed memory in the virtual spaces
 576   size_t _reserved_words;
 577   size_t _committed_words;
 578 
 579   // Number of virtual spaces
 580   size_t _virtual_space_count;
 581 
 582   ~VirtualSpaceList();
 583 
 584   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 585 
 586   void set_virtual_space_list(VirtualSpaceNode* v) {
 587     _virtual_space_list = v;
 588   }
 589   void set_current_virtual_space(VirtualSpaceNode* v) {
 590     _current_virtual_space = v;
 591   }
 592 
 593   void link_vs(VirtualSpaceNode* new_entry);
 594 
 595   // Get another virtual space and add it to the list.  This
 596   // is typically prompted by a failed attempt to allocate a chunk
 597   // and is typically followed by the allocation of a chunk.
 598   bool create_new_virtual_space(size_t vs_word_size);
 599 
 600   // Chunk up the unused committed space in the current
 601   // virtual space and add the chunks to the free list.
 602   void retire_current_virtual_space();
 603 
 604  public:
 605   VirtualSpaceList(size_t word_size);
 606   VirtualSpaceList(ReservedSpace rs);
 607 
 608   size_t free_bytes();
 609 
 610   Metachunk* get_new_chunk(size_t chunk_word_size,
 611                            size_t suggested_commit_granularity);
 612 
 613   bool expand_node_by(VirtualSpaceNode* node,
 614                       size_t min_words,
 615                       size_t preferred_words);
 616 
 617   bool expand_by(size_t min_words,
 618                  size_t preferred_words);
 619 
 620   VirtualSpaceNode* current_virtual_space() {
 621     return _current_virtual_space;
 622   }
 623 
 624   bool is_class() const { return _is_class; }
 625 
 626   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 627 
 628   size_t reserved_words()  { return _reserved_words; }
 629   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 630   size_t committed_words() { return _committed_words; }
 631   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 632 
 633   void inc_reserved_words(size_t v);
 634   void dec_reserved_words(size_t v);
 635   void inc_committed_words(size_t v);
 636   void dec_committed_words(size_t v);
 637   void inc_virtual_space_count();
 638   void dec_virtual_space_count();
 639 
 640   bool contains(const void* ptr);
 641 
 642   // Unlink empty VirtualSpaceNodes and free it.
 643   void purge(ChunkManager* chunk_manager);
 644 
 645   void print_on(outputStream* st) const;
 646 
 647   class VirtualSpaceListIterator : public StackObj {
 648     VirtualSpaceNode* _virtual_spaces;
 649    public:
 650     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 651       _virtual_spaces(virtual_spaces) {}
 652 
 653     bool repeat() {
 654       return _virtual_spaces != NULL;
 655     }
 656 
 657     VirtualSpaceNode* get_next() {
 658       VirtualSpaceNode* result = _virtual_spaces;
 659       if (_virtual_spaces != NULL) {
 660         _virtual_spaces = _virtual_spaces->next();
 661       }
 662       return result;
 663     }
 664   };
 665 };
 666 
 667 class Metadebug : AllStatic {
 668   // Debugging support for Metaspaces
 669   static int _allocation_fail_alot_count;
 670 
 671  public:
 672 
 673   static void init_allocation_fail_alot_count();
 674 #ifdef ASSERT
 675   static bool test_metadata_failure();
 676 #endif
 677 };
 678 
 679 int Metadebug::_allocation_fail_alot_count = 0;
 680 
 681 //  SpaceManager - used by Metaspace to handle allocations
 682 class SpaceManager : public CHeapObj<mtClass> {
 683   friend class Metaspace;
 684   friend class Metadebug;
 685 
 686  private:
 687 
 688   // protects allocations
 689   Mutex* const _lock;
 690 
 691   // Type of metadata allocated.
 692   Metaspace::MetadataType _mdtype;
 693 
 694   // List of chunks in use by this SpaceManager.  Allocations
 695   // are done from the current chunk.  The list is used for deallocating
 696   // chunks when the SpaceManager is freed.
 697   Metachunk* _chunks_in_use[NumberOfInUseLists];
 698   Metachunk* _current_chunk;
 699 
 700   // Maximum number of small chunks to allocate to a SpaceManager
 701   static uint const _small_chunk_limit;
 702 
 703   // Sum of all space in allocated chunks
 704   size_t _allocated_blocks_words;
 705 
 706   // Sum of all allocated chunks
 707   size_t _allocated_chunks_words;
 708   size_t _allocated_chunks_count;
 709 
 710   // Free lists of blocks are per SpaceManager since they
 711   // are assumed to be in chunks in use by the SpaceManager
 712   // and all chunks in use by a SpaceManager are freed when
 713   // the class loader using the SpaceManager is collected.
 714   BlockFreelist* _block_freelists;
 715 
 716   // protects virtualspace and chunk expansions
 717   static const char*  _expand_lock_name;
 718   static const int    _expand_lock_rank;
 719   static Mutex* const _expand_lock;
 720 
 721  private:
 722   // Accessors
 723   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 724   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 725     _chunks_in_use[index] = v;
 726   }
 727 
 728   BlockFreelist* block_freelists() const { return _block_freelists; }
 729 
 730   Metaspace::MetadataType mdtype() { return _mdtype; }
 731 
 732   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 733   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 734 
 735   Metachunk* current_chunk() const { return _current_chunk; }
 736   void set_current_chunk(Metachunk* v) {
 737     _current_chunk = v;
 738   }
 739 
 740   Metachunk* find_current_chunk(size_t word_size);
 741 
 742   // Add chunk to the list of chunks in use
 743   void add_chunk(Metachunk* v, bool make_current);
 744   void retire_current_chunk();
 745 
 746   Mutex* lock() const { return _lock; }
 747 
 748  protected:
 749   void initialize();
 750 
 751  public:
 752   SpaceManager(Metaspace::MetadataType mdtype,
 753                Mutex* lock);
 754   ~SpaceManager();
 755 
 756   enum ChunkMultiples {
 757     MediumChunkMultiple = 4
 758   };
 759 
 760   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 761   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 762   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 763 
 764   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 765 
 766   // Accessors
 767   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 768 
 769   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 770   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 771   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 772 
 773   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 774 
 775   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 776 
 777   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 778   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 779   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 780   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 781   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 782 
 783   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 784 
 785   static Mutex* expand_lock() { return _expand_lock; }
 786 
 787   // Increment the per Metaspace and global running sums for Metachunks
 788   // by the given size.  This is used when a Metachunk to added to
 789   // the in-use list.
 790   void inc_size_metrics(size_t words);
 791   // Increment the per Metaspace and global running sums Metablocks by the given
 792   // size.  This is used when a Metablock is allocated.
 793   void inc_used_metrics(size_t words);
 794   // Delete the portion of the running sums for this SpaceManager. That is,
 795   // the globals running sums for the Metachunks and Metablocks are
 796   // decremented for all the Metachunks in-use by this SpaceManager.
 797   void dec_total_from_size_metrics();
 798 
 799   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 800   // or return the unadjusted size if the requested size is humongous.
 801   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 802   size_t adjust_initial_chunk_size(size_t requested) const;
 803 
 804   // Get the initial chunks size for this metaspace type.
 805   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 806 
 807   size_t sum_capacity_in_chunks_in_use() const;
 808   size_t sum_used_in_chunks_in_use() const;
 809   size_t sum_free_in_chunks_in_use() const;
 810   size_t sum_waste_in_chunks_in_use() const;
 811   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 812 
 813   size_t sum_count_in_chunks_in_use();
 814   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 815 
 816   Metachunk* get_new_chunk(size_t chunk_word_size);
 817 
 818   // Block allocation and deallocation.
 819   // Allocates a block from the current chunk
 820   MetaWord* allocate(size_t word_size);
 821   // Allocates a block from a small chunk
 822   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 823 
 824   // Helper for allocations
 825   MetaWord* allocate_work(size_t word_size);
 826 
 827   // Returns a block to the per manager freelist
 828   void deallocate(MetaWord* p, size_t word_size);
 829 
 830   // Based on the allocation size and a minimum chunk size,
 831   // returned chunk size (for expanding space for chunk allocation).
 832   size_t calc_chunk_size(size_t allocation_word_size);
 833 
 834   // Called when an allocation from the current chunk fails.
 835   // Gets a new chunk (may require getting a new virtual space),
 836   // and allocates from that chunk.
 837   MetaWord* grow_and_allocate(size_t word_size);
 838 
 839   // Notify memory usage to MemoryService.
 840   void track_metaspace_memory_usage();
 841 
 842   // debugging support.
 843 
 844   void dump(outputStream* const out) const;
 845   void print_on(outputStream* st) const;
 846   void locked_print_chunks_in_use_on(outputStream* st) const;
 847 
 848   void verify();
 849   void verify_chunk_size(Metachunk* chunk);
 850 #ifdef ASSERT
 851   void verify_allocated_blocks_words();
 852 #endif
 853 
 854   // This adjusts the size given to be greater than the minimum allocation size in
 855   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 856   size_t get_allocation_word_size(size_t word_size) {
 857     size_t byte_size = word_size * BytesPerWord;
 858 
 859     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 860     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 861 
 862     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 863     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 864 
 865     return raw_word_size;
 866   }
 867 };
 868 
 869 uint const SpaceManager::_small_chunk_limit = 4;
 870 
 871 const char* SpaceManager::_expand_lock_name =
 872   "SpaceManager chunk allocation lock";
 873 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 874 Mutex* const SpaceManager::_expand_lock =
 875   new Mutex(SpaceManager::_expand_lock_rank,
 876             SpaceManager::_expand_lock_name,
 877             Mutex::_allow_vm_block_flag,
 878             Monitor::_safepoint_check_never);
 879 
 880 void VirtualSpaceNode::inc_container_count() {
 881   assert_lock_strong(SpaceManager::expand_lock());
 882   _container_count++;
 883 }
 884 
 885 void VirtualSpaceNode::dec_container_count() {
 886   assert_lock_strong(SpaceManager::expand_lock());
 887   _container_count--;
 888 }
 889 
 890 #ifdef ASSERT
 891 void VirtualSpaceNode::verify_container_count() {
 892   assert(_container_count == container_count_slow(),
 893          "Inconsistency in container_count _container_count " UINTX_FORMAT
 894          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 895 }
 896 #endif
 897 
 898 // BlockFreelist methods
 899 
 900 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 901 
 902 BlockFreelist::~BlockFreelist() {
 903   delete _dictionary;
 904   if (_small_blocks != NULL) {
 905     delete _small_blocks;
 906   }
 907 }
 908 
 909 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 910   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
 911 
 912   Metablock* free_chunk = ::new (p) Metablock(word_size);
 913   if (word_size < SmallBlocks::small_block_max_size()) {
 914     small_blocks()->return_block(free_chunk, word_size);
 915   } else {
 916   dictionary()->return_chunk(free_chunk);
 917 }
 918   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
 919             SIZE_FORMAT, p2i(free_chunk), word_size);
 920 }
 921 
 922 MetaWord* BlockFreelist::get_block(size_t word_size) {
 923   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
 924 
 925   // Try small_blocks first.
 926   if (word_size < SmallBlocks::small_block_max_size()) {
 927     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
 928     // this space manager.
 929     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
 930     if (new_block != NULL) {
 931       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 932               p2i(new_block), word_size);
 933       return new_block;
 934     }
 935   }
 936 
 937   if (word_size < BlockFreelist::min_dictionary_size()) {
 938     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
 939     return NULL;
 940   }
 941 
 942   Metablock* free_block =
 943     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 944   if (free_block == NULL) {
 945     return NULL;
 946   }
 947 
 948   const size_t block_size = free_block->size();
 949   if (block_size > WasteMultiplier * word_size) {
 950     return_block((MetaWord*)free_block, block_size);
 951     return NULL;
 952   }
 953 
 954   MetaWord* new_block = (MetaWord*)free_block;
 955   assert(block_size >= word_size, "Incorrect size of block from freelist");
 956   const size_t unused = block_size - word_size;
 957   if (unused >= SmallBlocks::small_block_min_size()) {
 958     return_block(new_block + word_size, unused);
 959   }
 960 
 961   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 962             p2i(new_block), word_size);
 963   return new_block;
 964 }
 965 
 966 void BlockFreelist::print_on(outputStream* st) const {
 967   dictionary()->print_free_lists(st);
 968   if (_small_blocks != NULL) {
 969     _small_blocks->print_on(st);
 970   }
 971 }
 972 
 973 // VirtualSpaceNode methods
 974 
 975 VirtualSpaceNode::~VirtualSpaceNode() {
 976   _rs.release();
 977 #ifdef ASSERT
 978   size_t word_size = sizeof(*this) / BytesPerWord;
 979   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 980 #endif
 981 }
 982 
 983 size_t VirtualSpaceNode::used_words_in_vs() const {
 984   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 985 }
 986 
 987 // Space committed in the VirtualSpace
 988 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 989   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 990 }
 991 
 992 size_t VirtualSpaceNode::free_words_in_vs() const {
 993   return pointer_delta(end(), top(), sizeof(MetaWord));
 994 }
 995 
 996 // Allocates the chunk from the virtual space only.
 997 // This interface is also used internally for debugging.  Not all
 998 // chunks removed here are necessarily used for allocation.
 999 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1000   // Bottom of the new chunk
1001   MetaWord* chunk_limit = top();
1002   assert(chunk_limit != NULL, "Not safe to call this method");
1003 
1004   // The virtual spaces are always expanded by the
1005   // commit granularity to enforce the following condition.
1006   // Without this the is_available check will not work correctly.
1007   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1008       "The committed memory doesn't match the expanded memory.");
1009 
1010   if (!is_available(chunk_word_size)) {
1011     Log(gc, metaspace, freelist) log;
1012     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1013     // Dump some information about the virtual space that is nearly full
1014     ResourceMark rm;
1015     print_on(log.debug_stream());
1016     return NULL;
1017   }
1018 
1019   // Take the space  (bump top on the current virtual space).
1020   inc_top(chunk_word_size);
1021 
1022   // Initialize the chunk
1023   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1024   return result;
1025 }
1026 
1027 
1028 // Expand the virtual space (commit more of the reserved space)
1029 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1030   size_t min_bytes = min_words * BytesPerWord;
1031   size_t preferred_bytes = preferred_words * BytesPerWord;
1032 
1033   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1034 
1035   if (uncommitted < min_bytes) {
1036     return false;
1037   }
1038 
1039   size_t commit = MIN2(preferred_bytes, uncommitted);
1040   bool result = virtual_space()->expand_by(commit, false);
1041 
1042   assert(result, "Failed to commit memory");
1043 
1044   return result;
1045 }
1046 
1047 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1048   assert_lock_strong(SpaceManager::expand_lock());
1049   Metachunk* result = take_from_committed(chunk_word_size);
1050   if (result != NULL) {
1051     inc_container_count();
1052   }
1053   return result;
1054 }
1055 
1056 bool VirtualSpaceNode::initialize() {
1057 
1058   if (!_rs.is_reserved()) {
1059     return false;
1060   }
1061 
1062   // These are necessary restriction to make sure that the virtual space always
1063   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1064   // aligned only the middle alignment of the VirtualSpace is used.
1065   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1066   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1067 
1068   // ReservedSpaces marked as special will have the entire memory
1069   // pre-committed. Setting a committed size will make sure that
1070   // committed_size and actual_committed_size agrees.
1071   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1072 
1073   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1074                                             Metaspace::commit_alignment());
1075   if (result) {
1076     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1077         "Checking that the pre-committed memory was registered by the VirtualSpace");
1078 
1079     set_top((MetaWord*)virtual_space()->low());
1080     set_reserved(MemRegion((HeapWord*)_rs.base(),
1081                  (HeapWord*)(_rs.base() + _rs.size())));
1082 
1083     assert(reserved()->start() == (HeapWord*) _rs.base(),
1084            "Reserved start was not set properly " PTR_FORMAT
1085            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1086     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1087            "Reserved size was not set properly " SIZE_FORMAT
1088            " != " SIZE_FORMAT, reserved()->word_size(),
1089            _rs.size() / BytesPerWord);
1090   }
1091 
1092   return result;
1093 }
1094 
1095 void VirtualSpaceNode::print_on(outputStream* st) const {
1096   size_t used = used_words_in_vs();
1097   size_t capacity = capacity_words_in_vs();
1098   VirtualSpace* vs = virtual_space();
1099   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1100            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1101            PTR_FORMAT ", " PTR_FORMAT ")",
1102            p2i(vs), capacity / K,
1103            capacity == 0 ? 0 : used * 100 / capacity,
1104            p2i(bottom()), p2i(top()), p2i(end()),
1105            p2i(vs->high_boundary()));
1106 }
1107 
1108 #ifdef ASSERT
1109 void VirtualSpaceNode::mangle() {
1110   size_t word_size = capacity_words_in_vs();
1111   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1112 }
1113 #endif // ASSERT
1114 
1115 // VirtualSpaceList methods
1116 // Space allocated from the VirtualSpace
1117 
1118 VirtualSpaceList::~VirtualSpaceList() {
1119   VirtualSpaceListIterator iter(virtual_space_list());
1120   while (iter.repeat()) {
1121     VirtualSpaceNode* vsl = iter.get_next();
1122     delete vsl;
1123   }
1124 }
1125 
1126 void VirtualSpaceList::inc_reserved_words(size_t v) {
1127   assert_lock_strong(SpaceManager::expand_lock());
1128   _reserved_words = _reserved_words + v;
1129 }
1130 void VirtualSpaceList::dec_reserved_words(size_t v) {
1131   assert_lock_strong(SpaceManager::expand_lock());
1132   _reserved_words = _reserved_words - v;
1133 }
1134 
1135 #define assert_committed_below_limit()                        \
1136   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1137          "Too much committed memory. Committed: " SIZE_FORMAT \
1138          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1139          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1140 
1141 void VirtualSpaceList::inc_committed_words(size_t v) {
1142   assert_lock_strong(SpaceManager::expand_lock());
1143   _committed_words = _committed_words + v;
1144 
1145   assert_committed_below_limit();
1146 }
1147 void VirtualSpaceList::dec_committed_words(size_t v) {
1148   assert_lock_strong(SpaceManager::expand_lock());
1149   _committed_words = _committed_words - v;
1150 
1151   assert_committed_below_limit();
1152 }
1153 
1154 void VirtualSpaceList::inc_virtual_space_count() {
1155   assert_lock_strong(SpaceManager::expand_lock());
1156   _virtual_space_count++;
1157 }
1158 void VirtualSpaceList::dec_virtual_space_count() {
1159   assert_lock_strong(SpaceManager::expand_lock());
1160   _virtual_space_count--;
1161 }
1162 
1163 void ChunkManager::remove_chunk(Metachunk* chunk) {
1164   size_t word_size = chunk->word_size();
1165   ChunkIndex index = list_index(word_size);
1166   if (index != HumongousIndex) {
1167     free_chunks(index)->remove_chunk(chunk);
1168   } else {
1169     humongous_dictionary()->remove_chunk(chunk);
1170   }
1171 
1172   // Chunk has been removed from the chunks free list, update counters.
1173   account_for_removed_chunk(chunk);
1174 }
1175 
1176 // Walk the list of VirtualSpaceNodes and delete
1177 // nodes with a 0 container_count.  Remove Metachunks in
1178 // the node from their respective freelists.
1179 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1180   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1181   assert_lock_strong(SpaceManager::expand_lock());
1182   // Don't use a VirtualSpaceListIterator because this
1183   // list is being changed and a straightforward use of an iterator is not safe.
1184   VirtualSpaceNode* purged_vsl = NULL;
1185   VirtualSpaceNode* prev_vsl = virtual_space_list();
1186   VirtualSpaceNode* next_vsl = prev_vsl;
1187   while (next_vsl != NULL) {
1188     VirtualSpaceNode* vsl = next_vsl;
1189     DEBUG_ONLY(vsl->verify_container_count();)
1190     next_vsl = vsl->next();
1191     // Don't free the current virtual space since it will likely
1192     // be needed soon.
1193     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1194       // Unlink it from the list
1195       if (prev_vsl == vsl) {
1196         // This is the case of the current node being the first node.
1197         assert(vsl == virtual_space_list(), "Expected to be the first node");
1198         set_virtual_space_list(vsl->next());
1199       } else {
1200         prev_vsl->set_next(vsl->next());
1201       }
1202 
1203       vsl->purge(chunk_manager);
1204       dec_reserved_words(vsl->reserved_words());
1205       dec_committed_words(vsl->committed_words());
1206       dec_virtual_space_count();
1207       purged_vsl = vsl;
1208       delete vsl;
1209     } else {
1210       prev_vsl = vsl;
1211     }
1212   }
1213 #ifdef ASSERT
1214   if (purged_vsl != NULL) {
1215     // List should be stable enough to use an iterator here.
1216     VirtualSpaceListIterator iter(virtual_space_list());
1217     while (iter.repeat()) {
1218       VirtualSpaceNode* vsl = iter.get_next();
1219       assert(vsl != purged_vsl, "Purge of vsl failed");
1220     }
1221   }
1222 #endif
1223 }
1224 
1225 
1226 // This function looks at the mmap regions in the metaspace without locking.
1227 // The chunks are added with store ordering and not deleted except for at
1228 // unloading time during a safepoint.
1229 bool VirtualSpaceList::contains(const void* ptr) {
1230   // List should be stable enough to use an iterator here because removing virtual
1231   // space nodes is only allowed at a safepoint.
1232   VirtualSpaceListIterator iter(virtual_space_list());
1233   while (iter.repeat()) {
1234     VirtualSpaceNode* vsn = iter.get_next();
1235     if (vsn->contains(ptr)) {
1236       return true;
1237     }
1238   }
1239   return false;
1240 }
1241 
1242 void VirtualSpaceList::retire_current_virtual_space() {
1243   assert_lock_strong(SpaceManager::expand_lock());
1244 
1245   VirtualSpaceNode* vsn = current_virtual_space();
1246 
1247   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1248                                   Metaspace::chunk_manager_metadata();
1249 
1250   vsn->retire(cm);
1251 }
1252 
1253 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1254   DEBUG_ONLY(verify_container_count();)
1255   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1256     ChunkIndex index = (ChunkIndex)i;
1257     size_t chunk_size = chunk_manager->size_by_index(index);
1258 
1259     while (free_words_in_vs() >= chunk_size) {
1260       Metachunk* chunk = get_chunk_vs(chunk_size);
1261       assert(chunk != NULL, "allocation should have been successful");
1262 
1263       chunk_manager->return_single_chunk(index, chunk);
1264     }
1265     DEBUG_ONLY(verify_container_count();)
1266   }
1267   assert(free_words_in_vs() == 0, "should be empty now");
1268 }
1269 
1270 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1271                                    _is_class(false),
1272                                    _virtual_space_list(NULL),
1273                                    _current_virtual_space(NULL),
1274                                    _reserved_words(0),
1275                                    _committed_words(0),
1276                                    _virtual_space_count(0) {
1277   MutexLockerEx cl(SpaceManager::expand_lock(),
1278                    Mutex::_no_safepoint_check_flag);
1279   create_new_virtual_space(word_size);
1280 }
1281 
1282 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1283                                    _is_class(true),
1284                                    _virtual_space_list(NULL),
1285                                    _current_virtual_space(NULL),
1286                                    _reserved_words(0),
1287                                    _committed_words(0),
1288                                    _virtual_space_count(0) {
1289   MutexLockerEx cl(SpaceManager::expand_lock(),
1290                    Mutex::_no_safepoint_check_flag);
1291   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1292   bool succeeded = class_entry->initialize();
1293   if (succeeded) {
1294     link_vs(class_entry);
1295   }
1296 }
1297 
1298 size_t VirtualSpaceList::free_bytes() {
1299   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1300 }
1301 
1302 // Allocate another meta virtual space and add it to the list.
1303 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1304   assert_lock_strong(SpaceManager::expand_lock());
1305 
1306   if (is_class()) {
1307     assert(false, "We currently don't support more than one VirtualSpace for"
1308                   " the compressed class space. The initialization of the"
1309                   " CCS uses another code path and should not hit this path.");
1310     return false;
1311   }
1312 
1313   if (vs_word_size == 0) {
1314     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1315     return false;
1316   }
1317 
1318   // Reserve the space
1319   size_t vs_byte_size = vs_word_size * BytesPerWord;
1320   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1321 
1322   // Allocate the meta virtual space and initialize it.
1323   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1324   if (!new_entry->initialize()) {
1325     delete new_entry;
1326     return false;
1327   } else {
1328     assert(new_entry->reserved_words() == vs_word_size,
1329         "Reserved memory size differs from requested memory size");
1330     // ensure lock-free iteration sees fully initialized node
1331     OrderAccess::storestore();
1332     link_vs(new_entry);
1333     return true;
1334   }
1335 }
1336 
1337 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1338   if (virtual_space_list() == NULL) {
1339       set_virtual_space_list(new_entry);
1340   } else {
1341     current_virtual_space()->set_next(new_entry);
1342   }
1343   set_current_virtual_space(new_entry);
1344   inc_reserved_words(new_entry->reserved_words());
1345   inc_committed_words(new_entry->committed_words());
1346   inc_virtual_space_count();
1347 #ifdef ASSERT
1348   new_entry->mangle();
1349 #endif
1350   if (log_is_enabled(Trace, gc, metaspace)) {
1351     Log(gc, metaspace) log;
1352     VirtualSpaceNode* vsl = current_virtual_space();
1353     ResourceMark rm;
1354     vsl->print_on(log.trace_stream());
1355   }
1356 }
1357 
1358 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1359                                       size_t min_words,
1360                                       size_t preferred_words) {
1361   size_t before = node->committed_words();
1362 
1363   bool result = node->expand_by(min_words, preferred_words);
1364 
1365   size_t after = node->committed_words();
1366 
1367   // after and before can be the same if the memory was pre-committed.
1368   assert(after >= before, "Inconsistency");
1369   inc_committed_words(after - before);
1370 
1371   return result;
1372 }
1373 
1374 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1375   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1376   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1377   assert(min_words <= preferred_words, "Invalid arguments");
1378 
1379   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1380     return  false;
1381   }
1382 
1383   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1384   if (allowed_expansion_words < min_words) {
1385     return false;
1386   }
1387 
1388   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1389 
1390   // Commit more memory from the the current virtual space.
1391   bool vs_expanded = expand_node_by(current_virtual_space(),
1392                                     min_words,
1393                                     max_expansion_words);
1394   if (vs_expanded) {
1395     return true;
1396   }
1397   retire_current_virtual_space();
1398 
1399   // Get another virtual space.
1400   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1401   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1402 
1403   if (create_new_virtual_space(grow_vs_words)) {
1404     if (current_virtual_space()->is_pre_committed()) {
1405       // The memory was pre-committed, so we are done here.
1406       assert(min_words <= current_virtual_space()->committed_words(),
1407           "The new VirtualSpace was pre-committed, so it"
1408           "should be large enough to fit the alloc request.");
1409       return true;
1410     }
1411 
1412     return expand_node_by(current_virtual_space(),
1413                           min_words,
1414                           max_expansion_words);
1415   }
1416 
1417   return false;
1418 }
1419 
1420 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1421 
1422   // Allocate a chunk out of the current virtual space.
1423   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1424 
1425   if (next != NULL) {
1426     return next;
1427   }
1428 
1429   // The expand amount is currently only determined by the requested sizes
1430   // and not how much committed memory is left in the current virtual space.
1431 
1432   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1433   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1434   if (min_word_size >= preferred_word_size) {
1435     // Can happen when humongous chunks are allocated.
1436     preferred_word_size = min_word_size;
1437   }
1438 
1439   bool expanded = expand_by(min_word_size, preferred_word_size);
1440   if (expanded) {
1441     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1442     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1443   }
1444 
1445    return next;
1446 }
1447 
1448 void VirtualSpaceList::print_on(outputStream* st) const {
1449   VirtualSpaceListIterator iter(virtual_space_list());
1450   while (iter.repeat()) {
1451     VirtualSpaceNode* node = iter.get_next();
1452     node->print_on(st);
1453   }
1454 }
1455 
1456 // MetaspaceGC methods
1457 
1458 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1459 // Within the VM operation after the GC the attempt to allocate the metadata
1460 // should succeed.  If the GC did not free enough space for the metaspace
1461 // allocation, the HWM is increased so that another virtualspace will be
1462 // allocated for the metadata.  With perm gen the increase in the perm
1463 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1464 // metaspace policy uses those as the small and large steps for the HWM.
1465 //
1466 // After the GC the compute_new_size() for MetaspaceGC is called to
1467 // resize the capacity of the metaspaces.  The current implementation
1468 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1469 // to resize the Java heap by some GC's.  New flags can be implemented
1470 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1471 // free space is desirable in the metaspace capacity to decide how much
1472 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1473 // free space is desirable in the metaspace capacity before decreasing
1474 // the HWM.
1475 
1476 // Calculate the amount to increase the high water mark (HWM).
1477 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1478 // another expansion is not requested too soon.  If that is not
1479 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1480 // If that is still not enough, expand by the size of the allocation
1481 // plus some.
1482 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1483   size_t min_delta = MinMetaspaceExpansion;
1484   size_t max_delta = MaxMetaspaceExpansion;
1485   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1486 
1487   if (delta <= min_delta) {
1488     delta = min_delta;
1489   } else if (delta <= max_delta) {
1490     // Don't want to hit the high water mark on the next
1491     // allocation so make the delta greater than just enough
1492     // for this allocation.
1493     delta = max_delta;
1494   } else {
1495     // This allocation is large but the next ones are probably not
1496     // so increase by the minimum.
1497     delta = delta + min_delta;
1498   }
1499 
1500   assert_is_aligned(delta, Metaspace::commit_alignment());
1501 
1502   return delta;
1503 }
1504 
1505 size_t MetaspaceGC::capacity_until_GC() {
1506   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1507   assert(value >= MetaspaceSize, "Not initialized properly?");
1508   return value;
1509 }
1510 
1511 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1512   assert_is_aligned(v, Metaspace::commit_alignment());
1513 
1514   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1515   size_t new_value = capacity_until_GC + v;
1516 
1517   if (new_value < capacity_until_GC) {
1518     // The addition wrapped around, set new_value to aligned max value.
1519     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1520   }
1521 
1522   intptr_t expected = (intptr_t) capacity_until_GC;
1523   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1524 
1525   if (expected != actual) {
1526     return false;
1527   }
1528 
1529   if (new_cap_until_GC != NULL) {
1530     *new_cap_until_GC = new_value;
1531   }
1532   if (old_cap_until_GC != NULL) {
1533     *old_cap_until_GC = capacity_until_GC;
1534   }
1535   return true;
1536 }
1537 
1538 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1539   assert_is_aligned(v, Metaspace::commit_alignment());
1540 
1541   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1542 }
1543 
1544 void MetaspaceGC::initialize() {
1545   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1546   // we can't do a GC during initialization.
1547   _capacity_until_GC = MaxMetaspaceSize;
1548 }
1549 
1550 void MetaspaceGC::post_initialize() {
1551   // Reset the high-water mark once the VM initialization is done.
1552   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1553 }
1554 
1555 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1556   // Check if the compressed class space is full.
1557   if (is_class && Metaspace::using_class_space()) {
1558     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1559     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1560       return false;
1561     }
1562   }
1563 
1564   // Check if the user has imposed a limit on the metaspace memory.
1565   size_t committed_bytes = MetaspaceAux::committed_bytes();
1566   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1567     return false;
1568   }
1569 
1570   return true;
1571 }
1572 
1573 size_t MetaspaceGC::allowed_expansion() {
1574   size_t committed_bytes = MetaspaceAux::committed_bytes();
1575   size_t capacity_until_gc = capacity_until_GC();
1576 
1577   assert(capacity_until_gc >= committed_bytes,
1578          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1579          capacity_until_gc, committed_bytes);
1580 
1581   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1582   size_t left_until_GC = capacity_until_gc - committed_bytes;
1583   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1584 
1585   return left_to_commit / BytesPerWord;
1586 }
1587 
1588 void MetaspaceGC::compute_new_size() {
1589   assert(_shrink_factor <= 100, "invalid shrink factor");
1590   uint current_shrink_factor = _shrink_factor;
1591   _shrink_factor = 0;
1592 
1593   // Using committed_bytes() for used_after_gc is an overestimation, since the
1594   // chunk free lists are included in committed_bytes() and the memory in an
1595   // un-fragmented chunk free list is available for future allocations.
1596   // However, if the chunk free lists becomes fragmented, then the memory may
1597   // not be available for future allocations and the memory is therefore "in use".
1598   // Including the chunk free lists in the definition of "in use" is therefore
1599   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1600   // shrink below committed_bytes() and this has caused serious bugs in the past.
1601   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1602   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1603 
1604   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1605   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1606 
1607   const double min_tmp = used_after_gc / maximum_used_percentage;
1608   size_t minimum_desired_capacity =
1609     (size_t)MIN2(min_tmp, double(max_uintx));
1610   // Don't shrink less than the initial generation size
1611   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1612                                   MetaspaceSize);
1613 
1614   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1615   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1616                            minimum_free_percentage, maximum_used_percentage);
1617   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1618 
1619 
1620   size_t shrink_bytes = 0;
1621   if (capacity_until_GC < minimum_desired_capacity) {
1622     // If we have less capacity below the metaspace HWM, then
1623     // increment the HWM.
1624     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1625     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1626     // Don't expand unless it's significant
1627     if (expand_bytes >= MinMetaspaceExpansion) {
1628       size_t new_capacity_until_GC = 0;
1629       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1630       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1631 
1632       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1633                                                new_capacity_until_GC,
1634                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1635       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1636                                minimum_desired_capacity / (double) K,
1637                                expand_bytes / (double) K,
1638                                MinMetaspaceExpansion / (double) K,
1639                                new_capacity_until_GC / (double) K);
1640     }
1641     return;
1642   }
1643 
1644   // No expansion, now see if we want to shrink
1645   // We would never want to shrink more than this
1646   assert(capacity_until_GC >= minimum_desired_capacity,
1647          SIZE_FORMAT " >= " SIZE_FORMAT,
1648          capacity_until_GC, minimum_desired_capacity);
1649   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1650 
1651   // Should shrinking be considered?
1652   if (MaxMetaspaceFreeRatio < 100) {
1653     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1654     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1655     const double max_tmp = used_after_gc / minimum_used_percentage;
1656     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1657     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1658                                     MetaspaceSize);
1659     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1660                              maximum_free_percentage, minimum_used_percentage);
1661     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1662                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1663 
1664     assert(minimum_desired_capacity <= maximum_desired_capacity,
1665            "sanity check");
1666 
1667     if (capacity_until_GC > maximum_desired_capacity) {
1668       // Capacity too large, compute shrinking size
1669       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1670       // We don't want shrink all the way back to initSize if people call
1671       // System.gc(), because some programs do that between "phases" and then
1672       // we'd just have to grow the heap up again for the next phase.  So we
1673       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1674       // on the third call, and 100% by the fourth call.  But if we recompute
1675       // size without shrinking, it goes back to 0%.
1676       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1677 
1678       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1679 
1680       assert(shrink_bytes <= max_shrink_bytes,
1681              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1682              shrink_bytes, max_shrink_bytes);
1683       if (current_shrink_factor == 0) {
1684         _shrink_factor = 10;
1685       } else {
1686         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1687       }
1688       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1689                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1690       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1691                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1692     }
1693   }
1694 
1695   // Don't shrink unless it's significant
1696   if (shrink_bytes >= MinMetaspaceExpansion &&
1697       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1698     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1699     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1700                                              new_capacity_until_GC,
1701                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1702   }
1703 }
1704 
1705 // Metadebug methods
1706 
1707 void Metadebug::init_allocation_fail_alot_count() {
1708   if (MetadataAllocationFailALot) {
1709     _allocation_fail_alot_count =
1710       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1711   }
1712 }
1713 
1714 #ifdef ASSERT
1715 bool Metadebug::test_metadata_failure() {
1716   if (MetadataAllocationFailALot &&
1717       Threads::is_vm_complete()) {
1718     if (_allocation_fail_alot_count > 0) {
1719       _allocation_fail_alot_count--;
1720     } else {
1721       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1722       init_allocation_fail_alot_count();
1723       return true;
1724     }
1725   }
1726   return false;
1727 }
1728 #endif
1729 
1730 // ChunkManager methods
1731 
1732 size_t ChunkManager::free_chunks_total_words() {
1733   return _free_chunks_total;
1734 }
1735 
1736 size_t ChunkManager::free_chunks_total_bytes() {
1737   return free_chunks_total_words() * BytesPerWord;
1738 }
1739 
1740 // Update internal accounting after a chunk was added
1741 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1742   assert_lock_strong(SpaceManager::expand_lock());
1743   _free_chunks_count ++;
1744   _free_chunks_total += c->word_size();
1745 }
1746 
1747 // Update internal accounting after a chunk was removed
1748 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1749   assert_lock_strong(SpaceManager::expand_lock());
1750   assert(_free_chunks_count >= 1,
1751     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1752   assert(_free_chunks_total >= c->word_size(),
1753     "ChunkManager::_free_chunks_total: about to go negative"
1754      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1755   _free_chunks_count --;
1756   _free_chunks_total -= c->word_size();
1757 }
1758 
1759 size_t ChunkManager::free_chunks_count() {
1760 #ifdef ASSERT
1761   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1762     MutexLockerEx cl(SpaceManager::expand_lock(),
1763                      Mutex::_no_safepoint_check_flag);
1764     // This lock is only needed in debug because the verification
1765     // of the _free_chunks_totals walks the list of free chunks
1766     slow_locked_verify_free_chunks_count();
1767   }
1768 #endif
1769   return _free_chunks_count;
1770 }
1771 
1772 ChunkIndex ChunkManager::list_index(size_t size) {
1773   if (size_by_index(SpecializedIndex) == size) {
1774     return SpecializedIndex;
1775   }
1776   if (size_by_index(SmallIndex) == size) {
1777     return SmallIndex;
1778   }
1779   const size_t med_size = size_by_index(MediumIndex);
1780   if (med_size == size) {
1781     return MediumIndex;
1782   }
1783 
1784   assert(size > med_size, "Not a humongous chunk");
1785   return HumongousIndex;
1786 }
1787 
1788 size_t ChunkManager::size_by_index(ChunkIndex index) {
1789   index_bounds_check(index);
1790   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1791   return free_chunks(index)->size();
1792 }
1793 
1794 void ChunkManager::locked_verify_free_chunks_total() {
1795   assert_lock_strong(SpaceManager::expand_lock());
1796   assert(sum_free_chunks() == _free_chunks_total,
1797          "_free_chunks_total " SIZE_FORMAT " is not the"
1798          " same as sum " SIZE_FORMAT, _free_chunks_total,
1799          sum_free_chunks());
1800 }
1801 
1802 void ChunkManager::verify_free_chunks_total() {
1803   MutexLockerEx cl(SpaceManager::expand_lock(),
1804                      Mutex::_no_safepoint_check_flag);
1805   locked_verify_free_chunks_total();
1806 }
1807 
1808 void ChunkManager::locked_verify_free_chunks_count() {
1809   assert_lock_strong(SpaceManager::expand_lock());
1810   assert(sum_free_chunks_count() == _free_chunks_count,
1811          "_free_chunks_count " SIZE_FORMAT " is not the"
1812          " same as sum " SIZE_FORMAT, _free_chunks_count,
1813          sum_free_chunks_count());
1814 }
1815 
1816 void ChunkManager::verify_free_chunks_count() {
1817 #ifdef ASSERT
1818   MutexLockerEx cl(SpaceManager::expand_lock(),
1819                      Mutex::_no_safepoint_check_flag);
1820   locked_verify_free_chunks_count();
1821 #endif
1822 }
1823 
1824 void ChunkManager::verify() {
1825   MutexLockerEx cl(SpaceManager::expand_lock(),
1826                      Mutex::_no_safepoint_check_flag);
1827   locked_verify();
1828 }
1829 
1830 void ChunkManager::locked_verify() {
1831   locked_verify_free_chunks_count();
1832   locked_verify_free_chunks_total();
1833 }
1834 
1835 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1836   assert_lock_strong(SpaceManager::expand_lock());
1837   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1838                 _free_chunks_total, _free_chunks_count);
1839 }
1840 
1841 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1842   assert_lock_strong(SpaceManager::expand_lock());
1843   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1844                 sum_free_chunks(), sum_free_chunks_count());
1845 }
1846 
1847 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1848   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1849          "Bad index: %d", (int)index);
1850 
1851   return &_free_chunks[index];
1852 }
1853 
1854 // These methods that sum the free chunk lists are used in printing
1855 // methods that are used in product builds.
1856 size_t ChunkManager::sum_free_chunks() {
1857   assert_lock_strong(SpaceManager::expand_lock());
1858   size_t result = 0;
1859   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1860     ChunkList* list = free_chunks(i);
1861 
1862     if (list == NULL) {
1863       continue;
1864     }
1865 
1866     result = result + list->count() * list->size();
1867   }
1868   result = result + humongous_dictionary()->total_size();
1869   return result;
1870 }
1871 
1872 size_t ChunkManager::sum_free_chunks_count() {
1873   assert_lock_strong(SpaceManager::expand_lock());
1874   size_t count = 0;
1875   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1876     ChunkList* list = free_chunks(i);
1877     if (list == NULL) {
1878       continue;
1879     }
1880     count = count + list->count();
1881   }
1882   count = count + humongous_dictionary()->total_free_blocks();
1883   return count;
1884 }
1885 
1886 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1887   ChunkIndex index = list_index(word_size);
1888   assert(index < HumongousIndex, "No humongous list");
1889   return free_chunks(index);
1890 }
1891 
1892 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1893   assert_lock_strong(SpaceManager::expand_lock());
1894 
1895   slow_locked_verify();
1896 
1897   Metachunk* chunk = NULL;
1898   if (list_index(word_size) != HumongousIndex) {
1899     ChunkList* free_list = find_free_chunks_list(word_size);
1900     assert(free_list != NULL, "Sanity check");
1901 
1902     chunk = free_list->head();
1903 
1904     if (chunk == NULL) {
1905       return NULL;
1906     }
1907 
1908     // Remove the chunk as the head of the list.
1909     free_list->remove_chunk(chunk);
1910 
1911     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1912                                        p2i(free_list), p2i(chunk), chunk->word_size());
1913   } else {
1914     chunk = humongous_dictionary()->get_chunk(
1915       word_size,
1916       FreeBlockDictionary<Metachunk>::atLeast);
1917 
1918     if (chunk == NULL) {
1919       return NULL;
1920     }
1921 
1922     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1923                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
1924   }
1925 
1926   // Chunk has been removed from the chunk manager; update counters.
1927   account_for_removed_chunk(chunk);
1928 
1929   // Remove it from the links to this freelist
1930   chunk->set_next(NULL);
1931   chunk->set_prev(NULL);
1932 #ifdef ASSERT
1933   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1934   // work.
1935   chunk->set_is_tagged_free(false);
1936 #endif
1937   chunk->container()->inc_container_count();
1938 
1939   slow_locked_verify();
1940   return chunk;
1941 }
1942 
1943 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1944   assert_lock_strong(SpaceManager::expand_lock());
1945   slow_locked_verify();
1946 
1947   // Take from the beginning of the list
1948   Metachunk* chunk = free_chunks_get(word_size);
1949   if (chunk == NULL) {
1950     return NULL;
1951   }
1952 
1953   assert((word_size <= chunk->word_size()) ||
1954          (list_index(chunk->word_size()) == HumongousIndex),
1955          "Non-humongous variable sized chunk");
1956   Log(gc, metaspace, freelist) log;
1957   if (log.is_debug()) {
1958     size_t list_count;
1959     if (list_index(word_size) < HumongousIndex) {
1960       ChunkList* list = find_free_chunks_list(word_size);
1961       list_count = list->count();
1962     } else {
1963       list_count = humongous_dictionary()->total_count();
1964     }
1965     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1966                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1967     ResourceMark rm;
1968     locked_print_free_chunks(log.debug_stream());
1969   }
1970 
1971   return chunk;
1972 }
1973 
1974 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1975   assert_lock_strong(SpaceManager::expand_lock());
1976   assert(chunk != NULL, "Expected chunk.");
1977   assert(chunk->container() != NULL, "Container should have been set.");
1978   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1979   index_bounds_check(index);
1980 
1981   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1982   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1983   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1984   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1985 
1986   if (index != HumongousIndex) {
1987     // Return non-humongous chunk to freelist.
1988     ChunkList* list = free_chunks(index);
1989     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
1990     list->return_chunk_at_head(chunk);
1991     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
1992         chunk_size_name(index), p2i(chunk));
1993   } else {
1994     // Return humongous chunk to dictionary.
1995     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
1996     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
1997            "Humongous chunk has wrong alignment.");
1998     _humongous_dictionary.return_chunk(chunk);
1999     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2000         chunk_size_name(index), p2i(chunk), chunk->word_size());
2001   }
2002   chunk->container()->dec_container_count();
2003   DEBUG_ONLY(chunk->set_is_tagged_free(true);)
2004 
2005   // Chunk has been added; update counters.
2006   account_for_added_chunk(chunk);
2007 
2008 }
2009 
2010 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2011   index_bounds_check(index);
2012   if (chunks == NULL) {
2013     return;
2014   }
2015   LogTarget(Trace, gc, metaspace, freelist) log;
2016   if (log.is_enabled()) { // tracing
2017     log.print("returning list of %s chunks...", chunk_size_name(index));
2018   }
2019   unsigned num_chunks_returned = 0;
2020   size_t size_chunks_returned = 0;
2021   Metachunk* cur = chunks;
2022   while (cur != NULL) {
2023     // Capture the next link before it is changed
2024     // by the call to return_chunk_at_head();
2025     Metachunk* next = cur->next();
2026     if (log.is_enabled()) { // tracing
2027       num_chunks_returned ++;
2028       size_chunks_returned += cur->word_size();
2029     }
2030     return_single_chunk(index, cur);
2031     cur = next;
2032   }
2033   if (log.is_enabled()) { // tracing
2034     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2035         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2036     if (index != HumongousIndex) {
2037       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2038     } else {
2039       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2040     }
2041   }
2042 }
2043 
2044 void ChunkManager::print_on(outputStream* out) const {
2045   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
2046 }
2047 
2048 // SpaceManager methods
2049 
2050 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2051   size_t chunk_sizes[] = {
2052       specialized_chunk_size(is_class_space),
2053       small_chunk_size(is_class_space),
2054       medium_chunk_size(is_class_space)
2055   };
2056 
2057   // Adjust up to one of the fixed chunk sizes ...
2058   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2059     if (requested <= chunk_sizes[i]) {
2060       return chunk_sizes[i];
2061     }
2062   }
2063 
2064   // ... or return the size as a humongous chunk.
2065   return requested;
2066 }
2067 
2068 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2069   return adjust_initial_chunk_size(requested, is_class());
2070 }
2071 
2072 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2073   size_t requested;
2074 
2075   if (is_class()) {
2076     switch (type) {
2077     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2078     case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
2079     case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
2080     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2081     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2082     default:                                 requested = ClassSmallChunk; break;
2083     }
2084   } else {
2085     switch (type) {
2086     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2087     case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
2088     case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
2089     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2090     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2091     default:                                 requested = SmallChunk; break;
2092     }
2093   }
2094 
2095   // Adjust to one of the fixed chunk sizes (unless humongous)
2096   const size_t adjusted = adjust_initial_chunk_size(requested);
2097 
2098   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2099          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2100 
2101   return adjusted;
2102 }
2103 
2104 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2105   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2106   size_t free = 0;
2107   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2108     Metachunk* chunk = chunks_in_use(i);
2109     while (chunk != NULL) {
2110       free += chunk->free_word_size();
2111       chunk = chunk->next();
2112     }
2113   }
2114   return free;
2115 }
2116 
2117 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2118   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2119   size_t result = 0;
2120   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2121    result += sum_waste_in_chunks_in_use(i);
2122   }
2123 
2124   return result;
2125 }
2126 
2127 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2128   size_t result = 0;
2129   Metachunk* chunk = chunks_in_use(index);
2130   // Count the free space in all the chunk but not the
2131   // current chunk from which allocations are still being done.
2132   while (chunk != NULL) {
2133     if (chunk != current_chunk()) {
2134       result += chunk->free_word_size();
2135     }
2136     chunk = chunk->next();
2137   }
2138   return result;
2139 }
2140 
2141 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2142   // For CMS use "allocated_chunks_words()" which does not need the
2143   // Metaspace lock.  For the other collectors sum over the
2144   // lists.  Use both methods as a check that "allocated_chunks_words()"
2145   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2146   // to use in the product and allocated_chunks_words() should be used
2147   // but allow for  checking that allocated_chunks_words() returns the same
2148   // value as sum_capacity_in_chunks_in_use() which is the definitive
2149   // answer.
2150   if (UseConcMarkSweepGC) {
2151     return allocated_chunks_words();
2152   } else {
2153     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2154     size_t sum = 0;
2155     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2156       Metachunk* chunk = chunks_in_use(i);
2157       while (chunk != NULL) {
2158         sum += chunk->word_size();
2159         chunk = chunk->next();
2160       }
2161     }
2162   return sum;
2163   }
2164 }
2165 
2166 size_t SpaceManager::sum_count_in_chunks_in_use() {
2167   size_t count = 0;
2168   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2169     count = count + sum_count_in_chunks_in_use(i);
2170   }
2171 
2172   return count;
2173 }
2174 
2175 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2176   size_t count = 0;
2177   Metachunk* chunk = chunks_in_use(i);
2178   while (chunk != NULL) {
2179     count++;
2180     chunk = chunk->next();
2181   }
2182   return count;
2183 }
2184 
2185 
2186 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2187   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2188   size_t used = 0;
2189   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2190     Metachunk* chunk = chunks_in_use(i);
2191     while (chunk != NULL) {
2192       used += chunk->used_word_size();
2193       chunk = chunk->next();
2194     }
2195   }
2196   return used;
2197 }
2198 
2199 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2200 
2201   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2202     Metachunk* chunk = chunks_in_use(i);
2203     st->print("SpaceManager: %s " PTR_FORMAT,
2204                  chunk_size_name(i), p2i(chunk));
2205     if (chunk != NULL) {
2206       st->print_cr(" free " SIZE_FORMAT,
2207                    chunk->free_word_size());
2208     } else {
2209       st->cr();
2210     }
2211   }
2212 
2213   chunk_manager()->locked_print_free_chunks(st);
2214   chunk_manager()->locked_print_sum_free_chunks(st);
2215 }
2216 
2217 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2218 
2219   // Decide between a small chunk and a medium chunk.  Up to
2220   // _small_chunk_limit small chunks can be allocated.
2221   // After that a medium chunk is preferred.
2222   size_t chunk_word_size;
2223   if (chunks_in_use(MediumIndex) == NULL &&
2224       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2225     chunk_word_size = (size_t) small_chunk_size();
2226     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2227       chunk_word_size = medium_chunk_size();
2228     }
2229   } else {
2230     chunk_word_size = medium_chunk_size();
2231   }
2232 
2233   // Might still need a humongous chunk.  Enforce
2234   // humongous allocations sizes to be aligned up to
2235   // the smallest chunk size.
2236   size_t if_humongous_sized_chunk =
2237     align_up(word_size + Metachunk::overhead(),
2238                   smallest_chunk_size());
2239   chunk_word_size =
2240     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2241 
2242   assert(!SpaceManager::is_humongous(word_size) ||
2243          chunk_word_size == if_humongous_sized_chunk,
2244          "Size calculation is wrong, word_size " SIZE_FORMAT
2245          " chunk_word_size " SIZE_FORMAT,
2246          word_size, chunk_word_size);
2247   Log(gc, metaspace, alloc) log;
2248   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2249     log.debug("Metadata humongous allocation:");
2250     log.debug("  word_size " PTR_FORMAT, word_size);
2251     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2252     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2253   }
2254   return chunk_word_size;
2255 }
2256 
2257 void SpaceManager::track_metaspace_memory_usage() {
2258   if (is_init_completed()) {
2259     if (is_class()) {
2260       MemoryService::track_compressed_class_memory_usage();
2261     }
2262     MemoryService::track_metaspace_memory_usage();
2263   }
2264 }
2265 
2266 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2267   assert(vs_list()->current_virtual_space() != NULL,
2268          "Should have been set");
2269   assert(current_chunk() == NULL ||
2270          current_chunk()->allocate(word_size) == NULL,
2271          "Don't need to expand");
2272   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2273 
2274   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2275     size_t words_left = 0;
2276     size_t words_used = 0;
2277     if (current_chunk() != NULL) {
2278       words_left = current_chunk()->free_word_size();
2279       words_used = current_chunk()->used_word_size();
2280     }
2281     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2282                                        word_size, words_used, words_left);
2283   }
2284 
2285   // Get another chunk
2286   size_t chunk_word_size = calc_chunk_size(word_size);
2287   Metachunk* next = get_new_chunk(chunk_word_size);
2288 
2289   MetaWord* mem = NULL;
2290 
2291   // If a chunk was available, add it to the in-use chunk list
2292   // and do an allocation from it.
2293   if (next != NULL) {
2294     // Add to this manager's list of chunks in use.
2295     add_chunk(next, false);
2296     mem = next->allocate(word_size);
2297   }
2298 
2299   // Track metaspace memory usage statistic.
2300   track_metaspace_memory_usage();
2301 
2302   return mem;
2303 }
2304 
2305 void SpaceManager::print_on(outputStream* st) const {
2306 
2307   for (ChunkIndex i = ZeroIndex;
2308        i < NumberOfInUseLists ;
2309        i = next_chunk_index(i) ) {
2310     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2311                  p2i(chunks_in_use(i)),
2312                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2313   }
2314   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2315                " Humongous " SIZE_FORMAT,
2316                sum_waste_in_chunks_in_use(SmallIndex),
2317                sum_waste_in_chunks_in_use(MediumIndex),
2318                sum_waste_in_chunks_in_use(HumongousIndex));
2319   // block free lists
2320   if (block_freelists() != NULL) {
2321     st->print_cr("total in block free lists " SIZE_FORMAT,
2322       block_freelists()->total_size());
2323   }
2324 }
2325 
2326 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2327                            Mutex* lock) :
2328   _mdtype(mdtype),
2329   _allocated_blocks_words(0),
2330   _allocated_chunks_words(0),
2331   _allocated_chunks_count(0),
2332   _block_freelists(NULL),
2333   _lock(lock)
2334 {
2335   initialize();
2336 }
2337 
2338 void SpaceManager::inc_size_metrics(size_t words) {
2339   assert_lock_strong(SpaceManager::expand_lock());
2340   // Total of allocated Metachunks and allocated Metachunks count
2341   // for each SpaceManager
2342   _allocated_chunks_words = _allocated_chunks_words + words;
2343   _allocated_chunks_count++;
2344   // Global total of capacity in allocated Metachunks
2345   MetaspaceAux::inc_capacity(mdtype(), words);
2346   // Global total of allocated Metablocks.
2347   // used_words_slow() includes the overhead in each
2348   // Metachunk so include it in the used when the
2349   // Metachunk is first added (so only added once per
2350   // Metachunk).
2351   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2352 }
2353 
2354 void SpaceManager::inc_used_metrics(size_t words) {
2355   // Add to the per SpaceManager total
2356   Atomic::add_ptr(words, &_allocated_blocks_words);
2357   // Add to the global total
2358   MetaspaceAux::inc_used(mdtype(), words);
2359 }
2360 
2361 void SpaceManager::dec_total_from_size_metrics() {
2362   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2363   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2364   // Also deduct the overhead per Metachunk
2365   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2366 }
2367 
2368 void SpaceManager::initialize() {
2369   Metadebug::init_allocation_fail_alot_count();
2370   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2371     _chunks_in_use[i] = NULL;
2372   }
2373   _current_chunk = NULL;
2374   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2375 }
2376 
2377 SpaceManager::~SpaceManager() {
2378   // This call this->_lock which can't be done while holding expand_lock()
2379   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2380          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2381          " allocated_chunks_words() " SIZE_FORMAT,
2382          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2383 
2384   MutexLockerEx fcl(SpaceManager::expand_lock(),
2385                     Mutex::_no_safepoint_check_flag);
2386 
2387   chunk_manager()->slow_locked_verify();
2388 
2389   dec_total_from_size_metrics();
2390 
2391   Log(gc, metaspace, freelist) log;
2392   if (log.is_trace()) {
2393     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2394     ResourceMark rm;
2395     locked_print_chunks_in_use_on(log.trace_stream());
2396     if (block_freelists() != NULL) {
2397     block_freelists()->print_on(log.trace_stream());
2398   }
2399   }
2400 
2401   // Add all the chunks in use by this space manager
2402   // to the global list of free chunks.
2403 
2404   // Follow each list of chunks-in-use and add them to the
2405   // free lists.  Each list is NULL terminated.
2406 
2407   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2408     Metachunk* chunks = chunks_in_use(i);
2409     chunk_manager()->return_chunk_list(i, chunks);
2410     set_chunks_in_use(i, NULL);
2411   }
2412 
2413   chunk_manager()->slow_locked_verify();
2414 
2415   if (_block_freelists != NULL) {
2416     delete _block_freelists;
2417   }
2418 }
2419 
2420 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2421   assert_lock_strong(_lock);
2422   // Allocations and deallocations are in raw_word_size
2423   size_t raw_word_size = get_allocation_word_size(word_size);
2424   // Lazily create a block_freelist
2425   if (block_freelists() == NULL) {
2426     _block_freelists = new BlockFreelist();
2427   }
2428   block_freelists()->return_block(p, raw_word_size);
2429 }
2430 
2431 // Adds a chunk to the list of chunks in use.
2432 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2433 
2434   assert(new_chunk != NULL, "Should not be NULL");
2435   assert(new_chunk->next() == NULL, "Should not be on a list");
2436 
2437   new_chunk->reset_empty();
2438 
2439   // Find the correct list and and set the current
2440   // chunk for that list.
2441   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2442 
2443   if (index != HumongousIndex) {
2444     retire_current_chunk();
2445     set_current_chunk(new_chunk);
2446     new_chunk->set_next(chunks_in_use(index));
2447     set_chunks_in_use(index, new_chunk);
2448   } else {
2449     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2450     // small, so small will be null.  Link this first chunk as the current
2451     // chunk.
2452     if (make_current) {
2453       // Set as the current chunk but otherwise treat as a humongous chunk.
2454       set_current_chunk(new_chunk);
2455     }
2456     // Link at head.  The _current_chunk only points to a humongous chunk for
2457     // the null class loader metaspace (class and data virtual space managers)
2458     // any humongous chunks so will not point to the tail
2459     // of the humongous chunks list.
2460     new_chunk->set_next(chunks_in_use(HumongousIndex));
2461     set_chunks_in_use(HumongousIndex, new_chunk);
2462 
2463     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2464   }
2465 
2466   // Add to the running sum of capacity
2467   inc_size_metrics(new_chunk->word_size());
2468 
2469   assert(new_chunk->is_empty(), "Not ready for reuse");
2470   Log(gc, metaspace, freelist) log;
2471   if (log.is_trace()) {
2472     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2473     ResourceMark rm;
2474     outputStream* out = log.trace_stream();
2475     new_chunk->print_on(out);
2476     chunk_manager()->locked_print_free_chunks(out);
2477   }
2478 }
2479 
2480 void SpaceManager::retire_current_chunk() {
2481   if (current_chunk() != NULL) {
2482     size_t remaining_words = current_chunk()->free_word_size();
2483     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2484       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2485       deallocate(ptr, remaining_words);
2486       inc_used_metrics(remaining_words);
2487     }
2488   }
2489 }
2490 
2491 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2492   // Get a chunk from the chunk freelist
2493   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2494 
2495   if (next == NULL) {
2496     next = vs_list()->get_new_chunk(chunk_word_size,
2497                                     medium_chunk_bunch());
2498   }
2499 
2500   Log(gc, metaspace, alloc) log;
2501   if (log.is_debug() && next != NULL &&
2502       SpaceManager::is_humongous(next->word_size())) {
2503     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2504   }
2505 
2506   return next;
2507 }
2508 
2509 /*
2510  * The policy is to allocate up to _small_chunk_limit small chunks
2511  * after which only medium chunks are allocated.  This is done to
2512  * reduce fragmentation.  In some cases, this can result in a lot
2513  * of small chunks being allocated to the point where it's not
2514  * possible to expand.  If this happens, there may be no medium chunks
2515  * available and OOME would be thrown.  Instead of doing that,
2516  * if the allocation request size fits in a small chunk, an attempt
2517  * will be made to allocate a small chunk.
2518  */
2519 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2520   size_t raw_word_size = get_allocation_word_size(word_size);
2521 
2522   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2523     return NULL;
2524   }
2525 
2526   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2527   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2528 
2529   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2530 
2531   MetaWord* mem = NULL;
2532 
2533   if (chunk != NULL) {
2534     // Add chunk to the in-use chunk list and do an allocation from it.
2535     // Add to this manager's list of chunks in use.
2536     add_chunk(chunk, false);
2537     mem = chunk->allocate(raw_word_size);
2538 
2539     inc_used_metrics(raw_word_size);
2540 
2541     // Track metaspace memory usage statistic.
2542     track_metaspace_memory_usage();
2543   }
2544 
2545   return mem;
2546 }
2547 
2548 MetaWord* SpaceManager::allocate(size_t word_size) {
2549   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2550   size_t raw_word_size = get_allocation_word_size(word_size);
2551   BlockFreelist* fl =  block_freelists();
2552   MetaWord* p = NULL;
2553   // Allocation from the dictionary is expensive in the sense that
2554   // the dictionary has to be searched for a size.  Don't allocate
2555   // from the dictionary until it starts to get fat.  Is this
2556   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2557   // for allocations.  Do some profiling.  JJJ
2558   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2559     p = fl->get_block(raw_word_size);
2560   }
2561   if (p == NULL) {
2562     p = allocate_work(raw_word_size);
2563   }
2564 
2565   return p;
2566 }
2567 
2568 // Returns the address of spaced allocated for "word_size".
2569 // This methods does not know about blocks (Metablocks)
2570 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2571   assert_lock_strong(_lock);
2572 #ifdef ASSERT
2573   if (Metadebug::test_metadata_failure()) {
2574     return NULL;
2575   }
2576 #endif
2577   // Is there space in the current chunk?
2578   MetaWord* result = NULL;
2579 
2580   // For DumpSharedSpaces, only allocate out of the current chunk which is
2581   // never null because we gave it the size we wanted.   Caller reports out
2582   // of memory if this returns null.
2583   if (DumpSharedSpaces) {
2584     assert(current_chunk() != NULL, "should never happen");
2585     inc_used_metrics(word_size);
2586     return current_chunk()->allocate(word_size); // caller handles null result
2587   }
2588 
2589   if (current_chunk() != NULL) {
2590     result = current_chunk()->allocate(word_size);
2591   }
2592 
2593   if (result == NULL) {
2594     result = grow_and_allocate(word_size);
2595   }
2596 
2597   if (result != NULL) {
2598     inc_used_metrics(word_size);
2599     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2600            "Head of the list is being allocated");
2601   }
2602 
2603   return result;
2604 }
2605 
2606 void SpaceManager::verify() {
2607   // If there are blocks in the dictionary, then
2608   // verification of chunks does not work since
2609   // being in the dictionary alters a chunk.
2610   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2611     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2612       Metachunk* curr = chunks_in_use(i);
2613       while (curr != NULL) {
2614         curr->verify();
2615         verify_chunk_size(curr);
2616         curr = curr->next();
2617       }
2618     }
2619   }
2620 }
2621 
2622 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2623   assert(is_humongous(chunk->word_size()) ||
2624          chunk->word_size() == medium_chunk_size() ||
2625          chunk->word_size() == small_chunk_size() ||
2626          chunk->word_size() == specialized_chunk_size(),
2627          "Chunk size is wrong");
2628   return;
2629 }
2630 
2631 #ifdef ASSERT
2632 void SpaceManager::verify_allocated_blocks_words() {
2633   // Verification is only guaranteed at a safepoint.
2634   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2635     "Verification can fail if the applications is running");
2636   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2637          "allocation total is not consistent " SIZE_FORMAT
2638          " vs " SIZE_FORMAT,
2639          allocated_blocks_words(), sum_used_in_chunks_in_use());
2640 }
2641 
2642 #endif
2643 
2644 void SpaceManager::dump(outputStream* const out) const {
2645   size_t curr_total = 0;
2646   size_t waste = 0;
2647   uint i = 0;
2648   size_t used = 0;
2649   size_t capacity = 0;
2650 
2651   // Add up statistics for all chunks in this SpaceManager.
2652   for (ChunkIndex index = ZeroIndex;
2653        index < NumberOfInUseLists;
2654        index = next_chunk_index(index)) {
2655     for (Metachunk* curr = chunks_in_use(index);
2656          curr != NULL;
2657          curr = curr->next()) {
2658       out->print("%d) ", i++);
2659       curr->print_on(out);
2660       curr_total += curr->word_size();
2661       used += curr->used_word_size();
2662       capacity += curr->word_size();
2663       waste += curr->free_word_size() + curr->overhead();;
2664     }
2665   }
2666 
2667   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2668     if (block_freelists() != NULL) block_freelists()->print_on(out);
2669   }
2670 
2671   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2672   // Free space isn't wasted.
2673   waste -= free;
2674 
2675   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2676                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2677                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2678 }
2679 
2680 // MetaspaceAux
2681 
2682 
2683 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2684 size_t MetaspaceAux::_used_words[] = {0, 0};
2685 
2686 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2687   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2688   return list == NULL ? 0 : list->free_bytes();
2689 }
2690 
2691 size_t MetaspaceAux::free_bytes() {
2692   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2693 }
2694 
2695 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2696   assert_lock_strong(SpaceManager::expand_lock());
2697   assert(words <= capacity_words(mdtype),
2698          "About to decrement below 0: words " SIZE_FORMAT
2699          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2700          words, mdtype, capacity_words(mdtype));
2701   _capacity_words[mdtype] -= words;
2702 }
2703 
2704 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2705   assert_lock_strong(SpaceManager::expand_lock());
2706   // Needs to be atomic
2707   _capacity_words[mdtype] += words;
2708 }
2709 
2710 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2711   assert(words <= used_words(mdtype),
2712          "About to decrement below 0: words " SIZE_FORMAT
2713          " is greater than _used_words[%u] " SIZE_FORMAT,
2714          words, mdtype, used_words(mdtype));
2715   // For CMS deallocation of the Metaspaces occurs during the
2716   // sweep which is a concurrent phase.  Protection by the expand_lock()
2717   // is not enough since allocation is on a per Metaspace basis
2718   // and protected by the Metaspace lock.
2719   jlong minus_words = (jlong) - (jlong) words;
2720   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2721 }
2722 
2723 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2724   // _used_words tracks allocations for
2725   // each piece of metadata.  Those allocations are
2726   // generally done concurrently by different application
2727   // threads so must be done atomically.
2728   Atomic::add_ptr(words, &_used_words[mdtype]);
2729 }
2730 
2731 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2732   size_t used = 0;
2733   ClassLoaderDataGraphMetaspaceIterator iter;
2734   while (iter.repeat()) {
2735     Metaspace* msp = iter.get_next();
2736     // Sum allocated_blocks_words for each metaspace
2737     if (msp != NULL) {
2738       used += msp->used_words_slow(mdtype);
2739     }
2740   }
2741   return used * BytesPerWord;
2742 }
2743 
2744 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2745   size_t free = 0;
2746   ClassLoaderDataGraphMetaspaceIterator iter;
2747   while (iter.repeat()) {
2748     Metaspace* msp = iter.get_next();
2749     if (msp != NULL) {
2750       free += msp->free_words_slow(mdtype);
2751     }
2752   }
2753   return free * BytesPerWord;
2754 }
2755 
2756 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2757   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2758     return 0;
2759   }
2760   // Don't count the space in the freelists.  That space will be
2761   // added to the capacity calculation as needed.
2762   size_t capacity = 0;
2763   ClassLoaderDataGraphMetaspaceIterator iter;
2764   while (iter.repeat()) {
2765     Metaspace* msp = iter.get_next();
2766     if (msp != NULL) {
2767       capacity += msp->capacity_words_slow(mdtype);
2768     }
2769   }
2770   return capacity * BytesPerWord;
2771 }
2772 
2773 size_t MetaspaceAux::capacity_bytes_slow() {
2774 #ifdef PRODUCT
2775   // Use capacity_bytes() in PRODUCT instead of this function.
2776   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2777 #endif
2778   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2779   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2780   assert(capacity_bytes() == class_capacity + non_class_capacity,
2781          "bad accounting: capacity_bytes() " SIZE_FORMAT
2782          " class_capacity + non_class_capacity " SIZE_FORMAT
2783          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2784          capacity_bytes(), class_capacity + non_class_capacity,
2785          class_capacity, non_class_capacity);
2786 
2787   return class_capacity + non_class_capacity;
2788 }
2789 
2790 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2791   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2792   return list == NULL ? 0 : list->reserved_bytes();
2793 }
2794 
2795 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2796   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2797   return list == NULL ? 0 : list->committed_bytes();
2798 }
2799 
2800 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2801 
2802 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2803   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2804   if (chunk_manager == NULL) {
2805     return 0;
2806   }
2807   chunk_manager->slow_verify();
2808   return chunk_manager->free_chunks_total_words();
2809 }
2810 
2811 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2812   return free_chunks_total_words(mdtype) * BytesPerWord;
2813 }
2814 
2815 size_t MetaspaceAux::free_chunks_total_words() {
2816   return free_chunks_total_words(Metaspace::ClassType) +
2817          free_chunks_total_words(Metaspace::NonClassType);
2818 }
2819 
2820 size_t MetaspaceAux::free_chunks_total_bytes() {
2821   return free_chunks_total_words() * BytesPerWord;
2822 }
2823 
2824 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2825   return Metaspace::get_chunk_manager(mdtype) != NULL;
2826 }
2827 
2828 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2829   if (!has_chunk_free_list(mdtype)) {
2830     return MetaspaceChunkFreeListSummary();
2831   }
2832 
2833   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2834   return cm->chunk_free_list_summary();
2835 }
2836 
2837 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2838   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2839                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2840 }
2841 
2842 void MetaspaceAux::print_on(outputStream* out) {
2843   Metaspace::MetadataType nct = Metaspace::NonClassType;
2844 
2845   out->print_cr(" Metaspace       "
2846                 "used "      SIZE_FORMAT "K, "
2847                 "capacity "  SIZE_FORMAT "K, "
2848                 "committed " SIZE_FORMAT "K, "
2849                 "reserved "  SIZE_FORMAT "K",
2850                 used_bytes()/K,
2851                 capacity_bytes()/K,
2852                 committed_bytes()/K,
2853                 reserved_bytes()/K);
2854 
2855   if (Metaspace::using_class_space()) {
2856     Metaspace::MetadataType ct = Metaspace::ClassType;
2857     out->print_cr("  class space    "
2858                   "used "      SIZE_FORMAT "K, "
2859                   "capacity "  SIZE_FORMAT "K, "
2860                   "committed " SIZE_FORMAT "K, "
2861                   "reserved "  SIZE_FORMAT "K",
2862                   used_bytes(ct)/K,
2863                   capacity_bytes(ct)/K,
2864                   committed_bytes(ct)/K,
2865                   reserved_bytes(ct)/K);
2866   }
2867 }
2868 
2869 // Print information for class space and data space separately.
2870 // This is almost the same as above.
2871 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2872   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2873   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2874   size_t used_bytes = used_bytes_slow(mdtype);
2875   size_t free_bytes = free_bytes_slow(mdtype);
2876   size_t used_and_free = used_bytes + free_bytes +
2877                            free_chunks_capacity_bytes;
2878   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2879              "K + unused in chunks " SIZE_FORMAT "K  + "
2880              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2881              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2882              used_bytes / K,
2883              free_bytes / K,
2884              free_chunks_capacity_bytes / K,
2885              used_and_free / K,
2886              capacity_bytes / K);
2887   // Accounting can only be correct if we got the values during a safepoint
2888   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2889 }
2890 
2891 // Print total fragmentation for class metaspaces
2892 void MetaspaceAux::print_class_waste(outputStream* out) {
2893   assert(Metaspace::using_class_space(), "class metaspace not used");
2894   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2895   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2896   ClassLoaderDataGraphMetaspaceIterator iter;
2897   while (iter.repeat()) {
2898     Metaspace* msp = iter.get_next();
2899     if (msp != NULL) {
2900       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2901       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2902       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2903       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2904       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2905       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2906       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2907     }
2908   }
2909   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2910                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2911                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2912                 "large count " SIZE_FORMAT,
2913                 cls_specialized_count, cls_specialized_waste,
2914                 cls_small_count, cls_small_waste,
2915                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2916 }
2917 
2918 // Print total fragmentation for data and class metaspaces separately
2919 void MetaspaceAux::print_waste(outputStream* out) {
2920   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2921   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2922 
2923   ClassLoaderDataGraphMetaspaceIterator iter;
2924   while (iter.repeat()) {
2925     Metaspace* msp = iter.get_next();
2926     if (msp != NULL) {
2927       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2928       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2929       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2930       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2931       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2932       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2933       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2934     }
2935   }
2936   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2937   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2938                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2939                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2940                         "large count " SIZE_FORMAT,
2941              specialized_count, specialized_waste, small_count,
2942              small_waste, medium_count, medium_waste, humongous_count);
2943   if (Metaspace::using_class_space()) {
2944     print_class_waste(out);
2945   }
2946 }
2947 
2948 // Dump global metaspace things from the end of ClassLoaderDataGraph
2949 void MetaspaceAux::dump(outputStream* out) {
2950   out->print_cr("All Metaspace:");
2951   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2952   out->print("class space: "); print_on(out, Metaspace::ClassType);
2953   print_waste(out);
2954 }
2955 
2956 void MetaspaceAux::verify_free_chunks() {
2957   Metaspace::chunk_manager_metadata()->verify();
2958   if (Metaspace::using_class_space()) {
2959     Metaspace::chunk_manager_class()->verify();
2960   }
2961 }
2962 
2963 void MetaspaceAux::verify_capacity() {
2964 #ifdef ASSERT
2965   size_t running_sum_capacity_bytes = capacity_bytes();
2966   // For purposes of the running sum of capacity, verify against capacity
2967   size_t capacity_in_use_bytes = capacity_bytes_slow();
2968   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2969          "capacity_words() * BytesPerWord " SIZE_FORMAT
2970          " capacity_bytes_slow()" SIZE_FORMAT,
2971          running_sum_capacity_bytes, capacity_in_use_bytes);
2972   for (Metaspace::MetadataType i = Metaspace::ClassType;
2973        i < Metaspace:: MetadataTypeCount;
2974        i = (Metaspace::MetadataType)(i + 1)) {
2975     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2976     assert(capacity_bytes(i) == capacity_in_use_bytes,
2977            "capacity_bytes(%u) " SIZE_FORMAT
2978            " capacity_bytes_slow(%u)" SIZE_FORMAT,
2979            i, capacity_bytes(i), i, capacity_in_use_bytes);
2980   }
2981 #endif
2982 }
2983 
2984 void MetaspaceAux::verify_used() {
2985 #ifdef ASSERT
2986   size_t running_sum_used_bytes = used_bytes();
2987   // For purposes of the running sum of used, verify against used
2988   size_t used_in_use_bytes = used_bytes_slow();
2989   assert(used_bytes() == used_in_use_bytes,
2990          "used_bytes() " SIZE_FORMAT
2991          " used_bytes_slow()" SIZE_FORMAT,
2992          used_bytes(), used_in_use_bytes);
2993   for (Metaspace::MetadataType i = Metaspace::ClassType;
2994        i < Metaspace:: MetadataTypeCount;
2995        i = (Metaspace::MetadataType)(i + 1)) {
2996     size_t used_in_use_bytes = used_bytes_slow(i);
2997     assert(used_bytes(i) == used_in_use_bytes,
2998            "used_bytes(%u) " SIZE_FORMAT
2999            " used_bytes_slow(%u)" SIZE_FORMAT,
3000            i, used_bytes(i), i, used_in_use_bytes);
3001   }
3002 #endif
3003 }
3004 
3005 void MetaspaceAux::verify_metrics() {
3006   verify_capacity();
3007   verify_used();
3008 }
3009 
3010 
3011 // Metaspace methods
3012 
3013 size_t Metaspace::_first_chunk_word_size = 0;
3014 size_t Metaspace::_first_class_chunk_word_size = 0;
3015 
3016 size_t Metaspace::_commit_alignment = 0;
3017 size_t Metaspace::_reserve_alignment = 0;
3018 
3019 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3020   initialize(lock, type);
3021 }
3022 
3023 Metaspace::~Metaspace() {
3024   delete _vsm;
3025   if (using_class_space()) {
3026     delete _class_vsm;
3027   }
3028 }
3029 
3030 VirtualSpaceList* Metaspace::_space_list = NULL;
3031 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3032 
3033 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3034 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3035 
3036 #define VIRTUALSPACEMULTIPLIER 2
3037 
3038 #ifdef _LP64
3039 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3040 
3041 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3042   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3043   // narrow_klass_base is the lower of the metaspace base and the cds base
3044   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3045   // between the lower base and higher address.
3046   address lower_base;
3047   address higher_address;
3048 #if INCLUDE_CDS
3049   if (UseSharedSpaces) {
3050     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3051                           (address)(metaspace_base + compressed_class_space_size()));
3052     lower_base = MIN2(metaspace_base, cds_base);
3053   } else
3054 #endif
3055   {
3056     higher_address = metaspace_base + compressed_class_space_size();
3057     lower_base = metaspace_base;
3058 
3059     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3060     // If compressed class space fits in lower 32G, we don't need a base.
3061     if (higher_address <= (address)klass_encoding_max) {
3062       lower_base = 0; // Effectively lower base is zero.
3063     }
3064   }
3065 
3066   Universe::set_narrow_klass_base(lower_base);
3067 
3068   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3069     Universe::set_narrow_klass_shift(0);
3070   } else {
3071     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3072     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3073   }
3074   AOTLoader::set_narrow_klass_shift();
3075 }
3076 
3077 #if INCLUDE_CDS
3078 // Return TRUE if the specified metaspace_base and cds_base are close enough
3079 // to work with compressed klass pointers.
3080 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3081   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3082   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3083   address lower_base = MIN2((address)metaspace_base, cds_base);
3084   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3085                                 (address)(metaspace_base + compressed_class_space_size()));
3086   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3087 }
3088 #endif
3089 
3090 // Try to allocate the metaspace at the requested addr.
3091 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3092   assert(using_class_space(), "called improperly");
3093   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3094   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3095          "Metaspace size is too big");
3096   assert_is_aligned(requested_addr, _reserve_alignment);
3097   assert_is_aligned(cds_base, _reserve_alignment);
3098   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3099 
3100   // Don't use large pages for the class space.
3101   bool large_pages = false;
3102 
3103 #if !(defined(AARCH64) || defined(AIX))
3104   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3105                                              _reserve_alignment,
3106                                              large_pages,
3107                                              requested_addr);
3108 #else // AARCH64
3109   ReservedSpace metaspace_rs;
3110 
3111   // Our compressed klass pointers may fit nicely into the lower 32
3112   // bits.
3113   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3114     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3115                                  _reserve_alignment,
3116                                  large_pages,
3117                                  requested_addr);
3118   }
3119 
3120   if (! metaspace_rs.is_reserved()) {
3121     // Aarch64: Try to align metaspace so that we can decode a compressed
3122     // klass with a single MOVK instruction.  We can do this iff the
3123     // compressed class base is a multiple of 4G.
3124     // Aix: Search for a place where we can find memory. If we need to load
3125     // the base, 4G alignment is helpful, too.
3126     size_t increment = AARCH64_ONLY(4*)G;
3127     for (char *a = align_up(requested_addr, increment);
3128          a < (char*)(1024*G);
3129          a += increment) {
3130       if (a == (char *)(32*G)) {
3131         // Go faster from here on. Zero-based is no longer possible.
3132         increment = 4*G;
3133       }
3134 
3135 #if INCLUDE_CDS
3136       if (UseSharedSpaces
3137           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3138         // We failed to find an aligned base that will reach.  Fall
3139         // back to using our requested addr.
3140         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3141                                      _reserve_alignment,
3142                                      large_pages,
3143                                      requested_addr);
3144         break;
3145       }
3146 #endif
3147 
3148       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3149                                    _reserve_alignment,
3150                                    large_pages,
3151                                    a);
3152       if (metaspace_rs.is_reserved())
3153         break;
3154     }
3155   }
3156 
3157 #endif // AARCH64
3158 
3159   if (!metaspace_rs.is_reserved()) {
3160 #if INCLUDE_CDS
3161     if (UseSharedSpaces) {
3162       size_t increment = align_up(1*G, _reserve_alignment);
3163 
3164       // Keep trying to allocate the metaspace, increasing the requested_addr
3165       // by 1GB each time, until we reach an address that will no longer allow
3166       // use of CDS with compressed klass pointers.
3167       char *addr = requested_addr;
3168       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3169              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3170         addr = addr + increment;
3171         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3172                                      _reserve_alignment, large_pages, addr);
3173       }
3174     }
3175 #endif
3176     // If no successful allocation then try to allocate the space anywhere.  If
3177     // that fails then OOM doom.  At this point we cannot try allocating the
3178     // metaspace as if UseCompressedClassPointers is off because too much
3179     // initialization has happened that depends on UseCompressedClassPointers.
3180     // So, UseCompressedClassPointers cannot be turned off at this point.
3181     if (!metaspace_rs.is_reserved()) {
3182       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3183                                    _reserve_alignment, large_pages);
3184       if (!metaspace_rs.is_reserved()) {
3185         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3186                                               compressed_class_space_size()));
3187       }
3188     }
3189   }
3190 
3191   // If we got here then the metaspace got allocated.
3192   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3193 
3194 #if INCLUDE_CDS
3195   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3196   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3197     FileMapInfo::stop_sharing_and_unmap(
3198         "Could not allocate metaspace at a compatible address");
3199   }
3200 #endif
3201   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3202                                   UseSharedSpaces ? (address)cds_base : 0);
3203 
3204   initialize_class_space(metaspace_rs);
3205 
3206   if (log_is_enabled(Trace, gc, metaspace)) {
3207     Log(gc, metaspace) log;
3208     ResourceMark rm;
3209     print_compressed_class_space(log.trace_stream(), requested_addr);
3210   }
3211 }
3212 
3213 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3214   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3215                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3216   if (_class_space_list != NULL) {
3217     address base = (address)_class_space_list->current_virtual_space()->bottom();
3218     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3219                  compressed_class_space_size(), p2i(base));
3220     if (requested_addr != 0) {
3221       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3222     }
3223     st->cr();
3224   }
3225 }
3226 
3227 // For UseCompressedClassPointers the class space is reserved above the top of
3228 // the Java heap.  The argument passed in is at the base of the compressed space.
3229 void Metaspace::initialize_class_space(ReservedSpace rs) {
3230   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3231   assert(rs.size() >= CompressedClassSpaceSize,
3232          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3233   assert(using_class_space(), "Must be using class space");
3234   _class_space_list = new VirtualSpaceList(rs);
3235   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3236 
3237   if (!_class_space_list->initialization_succeeded()) {
3238     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3239   }
3240 }
3241 
3242 #endif
3243 
3244 void Metaspace::ergo_initialize() {
3245   if (DumpSharedSpaces) {
3246     // Using large pages when dumping the shared archive is currently not implemented.
3247     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3248   }
3249 
3250   size_t page_size = os::vm_page_size();
3251   if (UseLargePages && UseLargePagesInMetaspace) {
3252     page_size = os::large_page_size();
3253   }
3254 
3255   _commit_alignment  = page_size;
3256   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3257 
3258   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3259   // override if MaxMetaspaceSize was set on the command line or not.
3260   // This information is needed later to conform to the specification of the
3261   // java.lang.management.MemoryUsage API.
3262   //
3263   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3264   // globals.hpp to the aligned value, but this is not possible, since the
3265   // alignment depends on other flags being parsed.
3266   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3267 
3268   if (MetaspaceSize > MaxMetaspaceSize) {
3269     MetaspaceSize = MaxMetaspaceSize;
3270   }
3271 
3272   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3273 
3274   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3275 
3276   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3277   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3278 
3279   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3280   set_compressed_class_space_size(CompressedClassSpaceSize);
3281 }
3282 
3283 void Metaspace::global_initialize() {
3284   MetaspaceGC::initialize();
3285 
3286   // Initialize the alignment for shared spaces.
3287   int max_alignment = os::vm_allocation_granularity();
3288   size_t cds_total = 0;
3289 
3290   MetaspaceShared::set_max_alignment(max_alignment);
3291 
3292   if (DumpSharedSpaces) {
3293 #if INCLUDE_CDS
3294     MetaspaceShared::estimate_regions_size();
3295 
3296     SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
3297     SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
3298     SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
3299     SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
3300 
3301     // Initialize with the sum of the shared space sizes.  The read-only
3302     // and read write metaspace chunks will be allocated out of this and the
3303     // remainder is the misc code and data chunks.
3304     cds_total = FileMapInfo::shared_spaces_size();
3305     cds_total = align_up(cds_total, _reserve_alignment);
3306     _space_list = new VirtualSpaceList(cds_total/wordSize);
3307     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3308 
3309     if (!_space_list->initialization_succeeded()) {
3310       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3311     }
3312 
3313 #ifdef _LP64
3314     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3315       vm_exit_during_initialization("Unable to dump shared archive.",
3316           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3317                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3318                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3319                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3320     }
3321 
3322     // Set the compressed klass pointer base so that decoding of these pointers works
3323     // properly when creating the shared archive.
3324     assert(UseCompressedOops && UseCompressedClassPointers,
3325       "UseCompressedOops and UseCompressedClassPointers must be set");
3326     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3327     log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3328                                      p2i(_space_list->current_virtual_space()->bottom()));
3329 
3330     Universe::set_narrow_klass_shift(0);
3331 #endif // _LP64
3332 #endif // INCLUDE_CDS
3333   } else {
3334 #if INCLUDE_CDS
3335     if (UseSharedSpaces) {
3336       // If using shared space, open the file that contains the shared space
3337       // and map in the memory before initializing the rest of metaspace (so
3338       // the addresses don't conflict)
3339       address cds_address = NULL;
3340       FileMapInfo* mapinfo = new FileMapInfo();
3341 
3342       // Open the shared archive file, read and validate the header. If
3343       // initialization fails, shared spaces [UseSharedSpaces] are
3344       // disabled and the file is closed.
3345       // Map in spaces now also
3346       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3347         cds_total = FileMapInfo::shared_spaces_size();
3348         cds_address = (address)mapinfo->header()->region_addr(0);
3349 #ifdef _LP64
3350         if (using_class_space()) {
3351           char* cds_end = (char*)(cds_address + cds_total);
3352           cds_end = align_up(cds_end, _reserve_alignment);
3353           // If UseCompressedClassPointers is set then allocate the metaspace area
3354           // above the heap and above the CDS area (if it exists).
3355           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3356           // Map the shared string space after compressed pointers
3357           // because it relies on compressed class pointers setting to work
3358           mapinfo->map_string_regions();
3359         }
3360 #endif // _LP64
3361       } else {
3362         assert(!mapinfo->is_open() && !UseSharedSpaces,
3363                "archive file not closed or shared spaces not disabled.");
3364       }
3365     }
3366 #endif // INCLUDE_CDS
3367 
3368 #ifdef _LP64
3369     if (!UseSharedSpaces && using_class_space()) {
3370       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3371       allocate_metaspace_compressed_klass_ptrs(base, 0);
3372     }
3373 #endif // _LP64
3374 
3375     // Initialize these before initializing the VirtualSpaceList
3376     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3377     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3378     // Make the first class chunk bigger than a medium chunk so it's not put
3379     // on the medium chunk list.   The next chunk will be small and progress
3380     // from there.  This size calculated by -version.
3381     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3382                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3383     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3384     // Arbitrarily set the initial virtual space to a multiple
3385     // of the boot class loader size.
3386     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3387     word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3388 
3389     // Initialize the list of virtual spaces.
3390     _space_list = new VirtualSpaceList(word_size);
3391     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3392 
3393     if (!_space_list->initialization_succeeded()) {
3394       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3395     }
3396   }
3397 
3398   _tracer = new MetaspaceTracer();
3399 }
3400 
3401 void Metaspace::post_initialize() {
3402   MetaspaceGC::post_initialize();
3403 }
3404 
3405 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3406   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3407   if (chunk != NULL) {
3408     // Add to this manager's list of chunks in use and current_chunk().
3409     get_space_manager(mdtype)->add_chunk(chunk, true);
3410   }
3411 }
3412 
3413 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3414   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3415 
3416   // Get a chunk from the chunk freelist
3417   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3418 
3419   if (chunk == NULL) {
3420     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3421                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3422   }
3423 
3424   // For dumping shared archive, report error if allocation has failed.
3425   if (DumpSharedSpaces && chunk == NULL) {
3426     report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3427   }
3428 
3429   return chunk;
3430 }
3431 
3432 void Metaspace::verify_global_initialization() {
3433   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3434   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3435 
3436   if (using_class_space()) {
3437     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3438     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3439   }
3440 }
3441 
3442 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3443   verify_global_initialization();
3444 
3445   // Allocate SpaceManager for metadata objects.
3446   _vsm = new SpaceManager(NonClassType, lock);
3447 
3448   if (using_class_space()) {
3449     // Allocate SpaceManager for classes.
3450     _class_vsm = new SpaceManager(ClassType, lock);
3451   }
3452 
3453   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3454 
3455   // Allocate chunk for metadata objects
3456   initialize_first_chunk(type, NonClassType);
3457 
3458   // Allocate chunk for class metadata objects
3459   if (using_class_space()) {
3460     initialize_first_chunk(type, ClassType);
3461   }
3462 
3463   _alloc_record_head = NULL;
3464   _alloc_record_tail = NULL;
3465 }
3466 
3467 size_t Metaspace::align_word_size_up(size_t word_size) {
3468   size_t byte_size = word_size * wordSize;
3469   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3470 }
3471 
3472 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3473   // DumpSharedSpaces doesn't use class metadata area (yet)
3474   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3475   if (is_class_space_allocation(mdtype)) {
3476     return  class_vsm()->allocate(word_size);
3477   } else {
3478     return  vsm()->allocate(word_size);
3479   }
3480 }
3481 
3482 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3483   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3484   assert(delta_bytes > 0, "Must be");
3485 
3486   size_t before = 0;
3487   size_t after = 0;
3488   MetaWord* res;
3489   bool incremented;
3490 
3491   // Each thread increments the HWM at most once. Even if the thread fails to increment
3492   // the HWM, an allocation is still attempted. This is because another thread must then
3493   // have incremented the HWM and therefore the allocation might still succeed.
3494   do {
3495     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3496     res = allocate(word_size, mdtype);
3497   } while (!incremented && res == NULL);
3498 
3499   if (incremented) {
3500     tracer()->report_gc_threshold(before, after,
3501                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3502     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3503   }
3504 
3505   return res;
3506 }
3507 
3508 // Space allocated in the Metaspace.  This may
3509 // be across several metadata virtual spaces.
3510 char* Metaspace::bottom() const {
3511   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3512   return (char*)vsm()->current_chunk()->bottom();
3513 }
3514 
3515 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3516   if (mdtype == ClassType) {
3517     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3518   } else {
3519     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3520   }
3521 }
3522 
3523 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3524   if (mdtype == ClassType) {
3525     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3526   } else {
3527     return vsm()->sum_free_in_chunks_in_use();
3528   }
3529 }
3530 
3531 // Space capacity in the Metaspace.  It includes
3532 // space in the list of chunks from which allocations
3533 // have been made. Don't include space in the global freelist and
3534 // in the space available in the dictionary which
3535 // is already counted in some chunk.
3536 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3537   if (mdtype == ClassType) {
3538     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3539   } else {
3540     return vsm()->sum_capacity_in_chunks_in_use();
3541   }
3542 }
3543 
3544 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3545   return used_words_slow(mdtype) * BytesPerWord;
3546 }
3547 
3548 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3549   return capacity_words_slow(mdtype) * BytesPerWord;
3550 }
3551 
3552 size_t Metaspace::allocated_blocks_bytes() const {
3553   return vsm()->allocated_blocks_bytes() +
3554       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3555 }
3556 
3557 size_t Metaspace::allocated_chunks_bytes() const {
3558   return vsm()->allocated_chunks_bytes() +
3559       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3560 }
3561 
3562 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3563   assert(!SafepointSynchronize::is_at_safepoint()
3564          || Thread::current()->is_VM_thread(), "should be the VM thread");
3565 
3566   if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
3567     record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
3568   }
3569 
3570   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3571 
3572   if (is_class && using_class_space()) {
3573     class_vsm()->deallocate(ptr, word_size);
3574   } else {
3575     vsm()->deallocate(ptr, word_size);
3576   }
3577 }
3578 
3579 
3580 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3581                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3582   if (HAS_PENDING_EXCEPTION) {
3583     assert(false, "Should not allocate with exception pending");
3584     return NULL;  // caller does a CHECK_NULL too
3585   }
3586 
3587   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3588         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3589 
3590   // Allocate in metaspaces without taking out a lock, because it deadlocks
3591   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3592   // to revisit this for application class data sharing.
3593   if (DumpSharedSpaces) {
3594     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3595     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3596     MetaWord* result = space->allocate(word_size, NonClassType);
3597     if (result == NULL) {
3598       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3599     }
3600     if (log_is_enabled(Info, cds)) {
3601       space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
3602     }
3603 
3604     // Zero initialize.
3605     Copy::fill_to_words((HeapWord*)result, word_size, 0);
3606 
3607     return result;
3608   }
3609 
3610   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3611 
3612   // Try to allocate metadata.
3613   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3614 
3615   if (result == NULL) {
3616     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3617 
3618     // Allocation failed.
3619     if (is_init_completed()) {
3620       // Only start a GC if the bootstrapping has completed.
3621 
3622       // Try to clean out some memory and retry.
3623       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3624           loader_data, word_size, mdtype);
3625     }
3626   }
3627 
3628   if (result == NULL) {
3629     SpaceManager* sm;
3630     if (is_class_space_allocation(mdtype)) {
3631       sm = loader_data->metaspace_non_null()->class_vsm();
3632     } else {
3633       sm = loader_data->metaspace_non_null()->vsm();
3634     }
3635 
3636     result = sm->get_small_chunk_and_allocate(word_size);
3637 
3638     if (result == NULL) {
3639       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3640     }
3641   }
3642 
3643   // Zero initialize.
3644   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3645 
3646   return result;
3647 }
3648 
3649 size_t Metaspace::class_chunk_size(size_t word_size) {
3650   assert(using_class_space(), "Has to use class space");
3651   return class_vsm()->calc_chunk_size(word_size);
3652 }
3653 
3654 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3655   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3656 
3657   // If result is still null, we are out of memory.
3658   Log(gc, metaspace, freelist) log;
3659   if (log.is_info()) {
3660     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3661              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3662     ResourceMark rm;
3663     outputStream* out = log.info_stream();
3664     if (loader_data->metaspace_or_null() != NULL) {
3665       loader_data->dump(out);
3666     }
3667     MetaspaceAux::dump(out);
3668   }
3669 
3670   bool out_of_compressed_class_space = false;
3671   if (is_class_space_allocation(mdtype)) {
3672     Metaspace* metaspace = loader_data->metaspace_non_null();
3673     out_of_compressed_class_space =
3674       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3675       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3676       CompressedClassSpaceSize;
3677   }
3678 
3679   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3680   const char* space_string = out_of_compressed_class_space ?
3681     "Compressed class space" : "Metaspace";
3682 
3683   report_java_out_of_memory(space_string);
3684 
3685   if (JvmtiExport::should_post_resource_exhausted()) {
3686     JvmtiExport::post_resource_exhausted(
3687         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3688         space_string);
3689   }
3690 
3691   if (!is_init_completed()) {
3692     vm_exit_during_initialization("OutOfMemoryError", space_string);
3693   }
3694 
3695   if (out_of_compressed_class_space) {
3696     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3697   } else {
3698     THROW_OOP(Universe::out_of_memory_error_metaspace());
3699   }
3700 }
3701 
3702 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3703   switch (mdtype) {
3704     case Metaspace::ClassType: return "Class";
3705     case Metaspace::NonClassType: return "Metadata";
3706     default:
3707       assert(false, "Got bad mdtype: %d", (int) mdtype);
3708       return NULL;
3709   }
3710 }
3711 
3712 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3713   assert(DumpSharedSpaces, "sanity");
3714 
3715   int byte_size = (int)word_size * wordSize;
3716   AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3717 
3718   if (_alloc_record_head == NULL) {
3719     _alloc_record_head = _alloc_record_tail = rec;
3720   } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3721     _alloc_record_tail->_next = rec;
3722     _alloc_record_tail = rec;
3723   } else {
3724     // slow linear search, but this doesn't happen that often, and only when dumping
3725     for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3726       if (old->_ptr == ptr) {
3727         assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3728         int remain_bytes = old->_byte_size - byte_size;
3729         assert(remain_bytes >= 0, "sanity");
3730         old->_type = type;
3731 
3732         if (remain_bytes == 0) {
3733           delete(rec);
3734         } else {
3735           address remain_ptr = address(ptr) + byte_size;
3736           rec->_ptr = remain_ptr;
3737           rec->_byte_size = remain_bytes;
3738           rec->_type = MetaspaceObj::DeallocatedType;
3739           rec->_next = old->_next;
3740           old->_byte_size = byte_size;
3741           old->_next = rec;
3742         }
3743         return;
3744       }
3745     }
3746     assert(0, "reallocating a freed pointer that was not recorded");
3747   }
3748 }
3749 
3750 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3751   assert(DumpSharedSpaces, "sanity");
3752 
3753   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3754     if (rec->_ptr == ptr) {
3755       assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
3756       rec->_type = MetaspaceObj::DeallocatedType;
3757       return;
3758     }
3759   }
3760 
3761   assert(0, "deallocating a pointer that was not recorded");
3762 }
3763 
3764 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3765   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3766 
3767   address last_addr = (address)bottom();
3768 
3769   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3770     address ptr = rec->_ptr;
3771     if (last_addr < ptr) {
3772       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3773     }
3774     closure->doit(ptr, rec->_type, rec->_byte_size);
3775     last_addr = ptr + rec->_byte_size;
3776   }
3777 
3778   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3779   if (last_addr < top) {
3780     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3781   }
3782 }
3783 
3784 void Metaspace::purge(MetadataType mdtype) {
3785   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3786 }
3787 
3788 void Metaspace::purge() {
3789   MutexLockerEx cl(SpaceManager::expand_lock(),
3790                    Mutex::_no_safepoint_check_flag);
3791   purge(NonClassType);
3792   if (using_class_space()) {
3793     purge(ClassType);
3794   }
3795 }
3796 
3797 void Metaspace::print_on(outputStream* out) const {
3798   // Print both class virtual space counts and metaspace.
3799   if (Verbose) {
3800     vsm()->print_on(out);
3801     if (using_class_space()) {
3802       class_vsm()->print_on(out);
3803     }
3804   }
3805 }
3806 
3807 bool Metaspace::contains(const void* ptr) {
3808   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3809     return true;
3810   }
3811   return contains_non_shared(ptr);
3812 }
3813 
3814 bool Metaspace::contains_non_shared(const void* ptr) {
3815   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3816      return true;
3817   }
3818 
3819   return get_space_list(NonClassType)->contains(ptr);
3820 }
3821 
3822 void Metaspace::verify() {
3823   vsm()->verify();
3824   if (using_class_space()) {
3825     class_vsm()->verify();
3826   }
3827 }
3828 
3829 void Metaspace::dump(outputStream* const out) const {
3830   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3831   vsm()->dump(out);
3832   if (using_class_space()) {
3833     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3834     class_vsm()->dump(out);
3835   }
3836 }
3837 
3838 /////////////// Unit tests ///////////////
3839 
3840 #ifndef PRODUCT
3841 
3842 class TestMetaspaceAuxTest : AllStatic {
3843  public:
3844   static void test_reserved() {
3845     size_t reserved = MetaspaceAux::reserved_bytes();
3846 
3847     assert(reserved > 0, "assert");
3848 
3849     size_t committed  = MetaspaceAux::committed_bytes();
3850     assert(committed <= reserved, "assert");
3851 
3852     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3853     assert(reserved_metadata > 0, "assert");
3854     assert(reserved_metadata <= reserved, "assert");
3855 
3856     if (UseCompressedClassPointers) {
3857       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3858       assert(reserved_class > 0, "assert");
3859       assert(reserved_class < reserved, "assert");
3860     }
3861   }
3862 
3863   static void test_committed() {
3864     size_t committed = MetaspaceAux::committed_bytes();
3865 
3866     assert(committed > 0, "assert");
3867 
3868     size_t reserved  = MetaspaceAux::reserved_bytes();
3869     assert(committed <= reserved, "assert");
3870 
3871     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3872     assert(committed_metadata > 0, "assert");
3873     assert(committed_metadata <= committed, "assert");
3874 
3875     if (UseCompressedClassPointers) {
3876       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3877       assert(committed_class > 0, "assert");
3878       assert(committed_class < committed, "assert");
3879     }
3880   }
3881 
3882   static void test_virtual_space_list_large_chunk() {
3883     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3884     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3885     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3886     // vm_allocation_granularity aligned on Windows.
3887     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3888     large_size += (os::vm_page_size()/BytesPerWord);
3889     vs_list->get_new_chunk(large_size, 0);
3890   }
3891 
3892   static void test() {
3893     test_reserved();
3894     test_committed();
3895     test_virtual_space_list_large_chunk();
3896   }
3897 };
3898 
3899 void TestMetaspaceAux_test() {
3900   TestMetaspaceAuxTest::test();
3901 }
3902 
3903 class TestVirtualSpaceNodeTest {
3904   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3905                                           size_t& num_small_chunks,
3906                                           size_t& num_specialized_chunks) {
3907     num_medium_chunks = words_left / MediumChunk;
3908     words_left = words_left % MediumChunk;
3909 
3910     num_small_chunks = words_left / SmallChunk;
3911     words_left = words_left % SmallChunk;
3912     // how many specialized chunks can we get?
3913     num_specialized_chunks = words_left / SpecializedChunk;
3914     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3915   }
3916 
3917  public:
3918   static void test() {
3919     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3920     const size_t vsn_test_size_words = MediumChunk  * 4;
3921     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3922 
3923     // The chunk sizes must be multiples of eachother, or this will fail
3924     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3925     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3926 
3927     { // No committed memory in VSN
3928       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3929       VirtualSpaceNode vsn(vsn_test_size_bytes);
3930       vsn.initialize();
3931       vsn.retire(&cm);
3932       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3933     }
3934 
3935     { // All of VSN is committed, half is used by chunks
3936       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3937       VirtualSpaceNode vsn(vsn_test_size_bytes);
3938       vsn.initialize();
3939       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3940       vsn.get_chunk_vs(MediumChunk);
3941       vsn.get_chunk_vs(MediumChunk);
3942       vsn.retire(&cm);
3943       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3944       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3945     }
3946 
3947     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3948     // This doesn't work for systems with vm_page_size >= 16K.
3949     if (page_chunks < MediumChunk) {
3950       // 4 pages of VSN is committed, some is used by chunks
3951       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3952       VirtualSpaceNode vsn(vsn_test_size_bytes);
3953 
3954       vsn.initialize();
3955       vsn.expand_by(page_chunks, page_chunks);
3956       vsn.get_chunk_vs(SmallChunk);
3957       vsn.get_chunk_vs(SpecializedChunk);
3958       vsn.retire(&cm);
3959 
3960       // committed - used = words left to retire
3961       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3962 
3963       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3964       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3965 
3966       assert(num_medium_chunks == 0, "should not get any medium chunks");
3967       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3968       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3969     }
3970 
3971     { // Half of VSN is committed, a humongous chunk is used
3972       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3973       VirtualSpaceNode vsn(vsn_test_size_bytes);
3974       vsn.initialize();
3975       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3976       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3977       vsn.retire(&cm);
3978 
3979       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3980       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3981       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3982 
3983       assert(num_medium_chunks == 0, "should not get any medium chunks");
3984       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3985       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3986     }
3987 
3988   }
3989 
3990 #define assert_is_available_positive(word_size) \
3991   assert(vsn.is_available(word_size), \
3992          #word_size ": " PTR_FORMAT " bytes were not available in " \
3993          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3994          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3995 
3996 #define assert_is_available_negative(word_size) \
3997   assert(!vsn.is_available(word_size), \
3998          #word_size ": " PTR_FORMAT " bytes should not be available in " \
3999          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4000          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4001 
4002   static void test_is_available_positive() {
4003     // Reserve some memory.
4004     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4005     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4006 
4007     // Commit some memory.
4008     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4009     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4010     assert(expanded, "Failed to commit");
4011 
4012     // Check that is_available accepts the committed size.
4013     assert_is_available_positive(commit_word_size);
4014 
4015     // Check that is_available accepts half the committed size.
4016     size_t expand_word_size = commit_word_size / 2;
4017     assert_is_available_positive(expand_word_size);
4018   }
4019 
4020   static void test_is_available_negative() {
4021     // Reserve some memory.
4022     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4023     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4024 
4025     // Commit some memory.
4026     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4027     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4028     assert(expanded, "Failed to commit");
4029 
4030     // Check that is_available doesn't accept a too large size.
4031     size_t two_times_commit_word_size = commit_word_size * 2;
4032     assert_is_available_negative(two_times_commit_word_size);
4033   }
4034 
4035   static void test_is_available_overflow() {
4036     // Reserve some memory.
4037     VirtualSpaceNode vsn(os::vm_allocation_granularity());
4038     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4039 
4040     // Commit some memory.
4041     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4042     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4043     assert(expanded, "Failed to commit");
4044 
4045     // Calculate a size that will overflow the virtual space size.
4046     void* virtual_space_max = (void*)(uintptr_t)-1;
4047     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4048     size_t overflow_size = bottom_to_max + BytesPerWord;
4049     size_t overflow_word_size = overflow_size / BytesPerWord;
4050 
4051     // Check that is_available can handle the overflow.
4052     assert_is_available_negative(overflow_word_size);
4053   }
4054 
4055   static void test_is_available() {
4056     TestVirtualSpaceNodeTest::test_is_available_positive();
4057     TestVirtualSpaceNodeTest::test_is_available_negative();
4058     TestVirtualSpaceNodeTest::test_is_available_overflow();
4059   }
4060 };
4061 
4062 void TestVirtualSpaceNode_test() {
4063   TestVirtualSpaceNodeTest::test();
4064   TestVirtualSpaceNodeTest::test_is_available();
4065 }
4066 
4067 // The following test is placed here instead of a gtest / unittest file
4068 // because the ChunkManager class is only available in this file.
4069 void ChunkManager_test_list_index() {
4070   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4071 
4072   // Test previous bug where a query for a humongous class metachunk,
4073   // incorrectly matched the non-class medium metachunk size.
4074   {
4075     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4076 
4077     ChunkIndex index = manager.list_index(MediumChunk);
4078 
4079     assert(index == HumongousIndex,
4080            "Requested size is larger than ClassMediumChunk,"
4081            " so should return HumongousIndex. Got index: %d", (int)index);
4082   }
4083 
4084   // Check the specified sizes as well.
4085   {
4086     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4087     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4088   }
4089   {
4090     ChunkIndex index = manager.list_index(ClassSmallChunk);
4091     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4092   }
4093   {
4094     ChunkIndex index = manager.list_index(ClassMediumChunk);
4095     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4096   }
4097   {
4098     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4099     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4100   }
4101 }
4102 
4103 #endif // !PRODUCT
4104 
4105 #ifdef ASSERT
4106 
4107 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4108 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4109 // content.
4110 class ChunkManagerReturnTestImpl {
4111 
4112   VirtualSpaceNode _vsn;
4113   ChunkManager _cm;
4114 
4115   // The expected content of the chunk manager.
4116   unsigned _chunks_in_chunkmanager;
4117   size_t _words_in_chunkmanager;
4118 
4119   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4120   static const int num_chunks = 256;
4121   Metachunk* _pool[num_chunks];
4122 
4123   // Helper, return a random position into the chunk pool.
4124   static int get_random_position() {
4125     return os::random() % num_chunks;
4126   }
4127 
4128   // Asserts that ChunkManager counters match expectations.
4129   void assert_counters() {
4130     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4131     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4132     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4133   }
4134 
4135   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4136   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4137   size_t get_random_chunk_size() {
4138     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4139     const int rand = os::random() % 4;
4140     if (rand < 3) {
4141       return sizes[rand];
4142     } else {
4143       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4144       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4145     }
4146   }
4147 
4148   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4149   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4150   int next_matching_chunk(int start, bool is_free) const {
4151     assert(start >= 0 && start < num_chunks, "invalid parameter");
4152     int pos = start;
4153     do {
4154       if (++pos == num_chunks) {
4155         pos = 0;
4156       }
4157       if (_pool[pos]->is_tagged_free() == is_free) {
4158         return pos;
4159       }
4160     } while (pos != start);
4161     return -1;
4162   }
4163 
4164   // A structure to keep information about a chunk list including which
4165   // chunks are part of this list. This is needed to keep information about a chunk list
4166   // we will to return to the ChunkManager, because the original list will be destroyed.
4167   struct AChunkList {
4168     Metachunk* head;
4169     Metachunk* all[num_chunks];
4170     size_t size;
4171     int num;
4172     ChunkIndex index;
4173   };
4174 
4175   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4176   // a random chunk list of max. length <list_size> of chunks with the same
4177   // ChunkIndex (chunk size).
4178   // Returns false if list cannot be assembled. List is returned in the <out>
4179   // structure. Returned list may be smaller than <list_size>.
4180   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4181     // Choose a random in-use chunk from the pool...
4182     const int headpos = next_matching_chunk(get_random_position(), false);
4183     if (headpos == -1) {
4184       return false;
4185     }
4186     Metachunk* const head = _pool[headpos];
4187     out->all[0] = head;
4188     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4189     // ..then go from there, chain it up with up to list_size - 1 number of other
4190     // in-use chunks of the same index.
4191     const ChunkIndex index = _cm.list_index(head->word_size());
4192     int num_added = 1;
4193     size_t size_added = head->word_size();
4194     int pos = headpos;
4195     Metachunk* tail = head;
4196     do {
4197       pos = next_matching_chunk(pos, false);
4198       if (pos != headpos) {
4199         Metachunk* c = _pool[pos];
4200         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4201         if (index == _cm.list_index(c->word_size())) {
4202           tail->set_next(c);
4203           c->set_prev(tail);
4204           tail = c;
4205           out->all[num_added] = c;
4206           num_added ++;
4207           size_added += c->word_size();
4208         }
4209       }
4210     } while (num_added < list_size && pos != headpos);
4211     out->head = head;
4212     out->index = index;
4213     out->size = size_added;
4214     out->num = num_added;
4215     return true;
4216   }
4217 
4218   // Take a single random chunk from the ChunkManager.
4219   bool take_single_random_chunk_from_chunkmanager() {
4220     assert_counters();
4221     _cm.locked_verify();
4222     int pos = next_matching_chunk(get_random_position(), true);
4223     if (pos == -1) {
4224       return false;
4225     }
4226     Metachunk* c = _pool[pos];
4227     assert(c->is_tagged_free(), "Chunk state mismatch");
4228     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4229     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4230     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4231     // normal chunk allocation.
4232     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4233     assert(c2 != NULL, "Unexpected.");
4234     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4235     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4236     _chunks_in_chunkmanager --;
4237     _words_in_chunkmanager -= c->word_size();
4238     assert_counters();
4239     _cm.locked_verify();
4240     return true;
4241   }
4242 
4243   // Returns a single random chunk to the chunk manager. Returns false if that
4244   // was not possible (all chunks are already in the chunk manager).
4245   bool return_single_random_chunk_to_chunkmanager() {
4246     assert_counters();
4247     _cm.locked_verify();
4248     int pos = next_matching_chunk(get_random_position(), false);
4249     if (pos == -1) {
4250       return false;
4251     }
4252     Metachunk* c = _pool[pos];
4253     assert(c->is_tagged_free() == false, "wrong chunk information");
4254     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4255     _chunks_in_chunkmanager ++;
4256     _words_in_chunkmanager += c->word_size();
4257     assert(c->is_tagged_free() == true, "wrong chunk information");
4258     assert_counters();
4259     _cm.locked_verify();
4260     return true;
4261   }
4262 
4263   // Return a random chunk list to the chunk manager. Returns the length of the
4264   // returned list.
4265   int return_random_chunk_list_to_chunkmanager(int list_size) {
4266     assert_counters();
4267     _cm.locked_verify();
4268     AChunkList aChunkList;
4269     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4270       return 0;
4271     }
4272     // Before returning chunks are returned, they should be tagged in use.
4273     for (int i = 0; i < aChunkList.num; i ++) {
4274       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4275     }
4276     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4277     _chunks_in_chunkmanager += aChunkList.num;
4278     _words_in_chunkmanager += aChunkList.size;
4279     // After all chunks are returned, check that they are now tagged free.
4280     for (int i = 0; i < aChunkList.num; i ++) {
4281       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4282     }
4283     assert_counters();
4284     _cm.locked_verify();
4285     return aChunkList.num;
4286   }
4287 
4288 public:
4289 
4290   ChunkManagerReturnTestImpl()
4291     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4292     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4293     , _chunks_in_chunkmanager(0)
4294     , _words_in_chunkmanager(0)
4295   {
4296     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4297     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4298     // "in use", because not yet added to any chunk manager.
4299     _vsn.initialize();
4300     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4301     for (int i = 0; i < num_chunks; i ++) {
4302       const size_t size = get_random_chunk_size();
4303       _pool[i] = _vsn.get_chunk_vs(size);
4304       assert(_pool[i] != NULL, "allocation failed");
4305     }
4306     assert_counters();
4307     _cm.locked_verify();
4308   }
4309 
4310   // Test entry point.
4311   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4312   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4313   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4314   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4315   // chunks manager, thereby emptying or filling it completely.
4316   void do_test(float phase_length_factor) {
4317     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4318     assert_counters();
4319     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4320     const int num_max_ops = num_chunks * 100;
4321     int num_ops = num_max_ops;
4322     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4323     int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4324     bool return_phase = true;
4325     while (num_ops > 0) {
4326       int chunks_moved = 0;
4327       if (return_phase) {
4328         // Randomly switch between returning a single chunk or a random length chunk list.
4329         if (os::random() % 2 == 0) {
4330           if (return_single_random_chunk_to_chunkmanager()) {
4331             chunks_moved = 1;
4332           }
4333         } else {
4334           const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4335           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4336         }
4337       } else {
4338         // Breath out.
4339         if (take_single_random_chunk_from_chunkmanager()) {
4340           chunks_moved = 1;
4341         }
4342       }
4343       num_ops -= chunks_moved;
4344       num_ops_until_switch -= chunks_moved;
4345       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4346         return_phase = !return_phase;
4347         num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4348       }
4349     }
4350   }
4351 };
4352 
4353 void* setup_chunkmanager_returntests() {
4354   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4355   return p;
4356 }
4357 
4358 void teardown_chunkmanager_returntests(void* p) {
4359   delete (ChunkManagerReturnTestImpl*) p;
4360 }
4361 
4362 void run_chunkmanager_returntests(void* p, float phase_length) {
4363   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4364   test->do_test(phase_length);
4365 }
4366 
4367 // The following test is placed here instead of a gtest / unittest file
4368 // because the ChunkManager class is only available in this file.
4369 class SpaceManagerTest : AllStatic {
4370   friend void SpaceManager_test_adjust_initial_chunk_size();
4371 
4372   static void test_adjust_initial_chunk_size(bool is_class) {
4373     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4374     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4375     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4376 
4377 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4378     do {                                                                         \
4379       size_t v = value;                                                          \
4380       size_t e = expected;                                                       \
4381       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4382              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4383     } while (0)
4384 
4385     // Smallest (specialized)
4386     test_adjust_initial_chunk_size(1,            smallest, is_class);
4387     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4388     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4389 
4390     // Small
4391     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4392     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4393     test_adjust_initial_chunk_size(normal,       normal, is_class);
4394 
4395     // Medium
4396     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4397     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4398     test_adjust_initial_chunk_size(medium,     medium, is_class);
4399 
4400     // Humongous
4401     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4402 
4403 #undef test_adjust_initial_chunk_size
4404   }
4405 
4406   static void test_adjust_initial_chunk_size() {
4407     test_adjust_initial_chunk_size(false);
4408     test_adjust_initial_chunk_size(true);
4409   }
4410 };
4411 
4412 void SpaceManager_test_adjust_initial_chunk_size() {
4413   SpaceManagerTest::test_adjust_initial_chunk_size();
4414 }
4415 
4416 #endif // ASSERT