1 /*
   2  * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Set this constant to enable slow integrity checking of the free chunk lists
  59 const bool metaspace_slow_verify = false;
  60 
  61 size_t const allocation_from_dictionary_limit = 4 * K;
  62 
  63 MetaWord* last_allocated = 0;
  64 
  65 size_t Metaspace::_compressed_class_space_size;
  66 const MetaspaceTracer* Metaspace::_tracer = NULL;
  67 
  68 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  69 
  70 // Used in declarations in SpaceManager and ChunkManager
  71 enum ChunkIndex {
  72   ZeroIndex = 0,
  73   SpecializedIndex = ZeroIndex,
  74   SmallIndex = SpecializedIndex + 1,
  75   MediumIndex = SmallIndex + 1,
  76   HumongousIndex = MediumIndex + 1,
  77   NumberOfFreeLists = 3,
  78   NumberOfInUseLists = 4
  79 };
  80 
  81 // Helper, returns a descriptive name for the given index.
  82 static const char* chunk_size_name(ChunkIndex index) {
  83   switch (index) {
  84     case SpecializedIndex:
  85       return "specialized";
  86     case SmallIndex:
  87       return "small";
  88     case MediumIndex:
  89       return "medium";
  90     case HumongousIndex:
  91       return "humongous";
  92     default:
  93       return "Invalid index";
  94   }
  95 }
  96 
  97 enum ChunkSizes {    // in words.
  98   ClassSpecializedChunk = 128,
  99   SpecializedChunk = 128,
 100   ClassSmallChunk = 256,
 101   SmallChunk = 512,
 102   ClassMediumChunk = 4 * K,
 103   MediumChunk = 8 * K
 104 };
 105 
 106 static ChunkIndex next_chunk_index(ChunkIndex i) {
 107   assert(i < NumberOfInUseLists, "Out of bound");
 108   return (ChunkIndex) (i+1);
 109 }
 110 
 111 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 112 uint MetaspaceGC::_shrink_factor = 0;
 113 bool MetaspaceGC::_should_concurrent_collect = false;
 114 
 115 typedef class FreeList<Metachunk> ChunkList;
 116 
 117 // Manages the global free lists of chunks.
 118 class ChunkManager : public CHeapObj<mtInternal> {
 119   friend class TestVirtualSpaceNodeTest;
 120 
 121   // Free list of chunks of different sizes.
 122   //   SpecializedChunk
 123   //   SmallChunk
 124   //   MediumChunk
 125   ChunkList _free_chunks[NumberOfFreeLists];
 126 
 127   // Return non-humongous chunk list by its index.
 128   ChunkList* free_chunks(ChunkIndex index);
 129 
 130   // Returns non-humongous chunk list for the given chunk word size.
 131   ChunkList* find_free_chunks_list(size_t word_size);
 132 
 133   //   HumongousChunk
 134   ChunkTreeDictionary _humongous_dictionary;
 135 
 136   // Returns the humongous chunk dictionary.
 137   ChunkTreeDictionary* humongous_dictionary() {
 138     return &_humongous_dictionary;
 139   }
 140 
 141   // Size, in metaspace words, of all chunks managed by this ChunkManager
 142   size_t _free_chunks_total;
 143   // Number of chunks in this ChunkManager
 144   size_t _free_chunks_count;
 145 
 146   // Update counters after a chunk was added or removed removed.
 147   void account_for_added_chunk(const Metachunk* c);
 148   void account_for_removed_chunk(const Metachunk* c);
 149 
 150   // Debug support
 151 
 152   size_t sum_free_chunks();
 153   size_t sum_free_chunks_count();
 154 
 155   void locked_verify_free_chunks_total();
 156   void slow_locked_verify_free_chunks_total() {
 157     if (metaspace_slow_verify) {
 158       locked_verify_free_chunks_total();
 159     }
 160   }
 161   void locked_verify_free_chunks_count();
 162   void slow_locked_verify_free_chunks_count() {
 163     if (metaspace_slow_verify) {
 164       locked_verify_free_chunks_count();
 165     }
 166   }
 167   void verify_free_chunks_count();
 168 
 169   struct ChunkManagerStatistics {
 170     size_t num_by_type[NumberOfFreeLists];
 171     size_t single_size_by_type[NumberOfFreeLists];
 172     size_t total_size_by_type[NumberOfFreeLists];
 173     size_t num_humongous_chunks;
 174     size_t total_size_humongous_chunks;
 175   };
 176 
 177   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 178   void get_statistics(ChunkManagerStatistics* stat) const;
 179   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out);
 180 
 181  public:
 182 
 183   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 184       : _free_chunks_total(0), _free_chunks_count(0) {
 185     _free_chunks[SpecializedIndex].set_size(specialized_size);
 186     _free_chunks[SmallIndex].set_size(small_size);
 187     _free_chunks[MediumIndex].set_size(medium_size);
 188   }
 189 
 190   // add or delete (return) a chunk to the global freelist.
 191   Metachunk* chunk_freelist_allocate(size_t word_size);
 192 
 193   // Map a size to a list index assuming that there are lists
 194   // for special, small, medium, and humongous chunks.
 195   ChunkIndex list_index(size_t size);
 196 
 197   // Map a given index to the chunk size.
 198   size_t size_by_index(ChunkIndex index) const;
 199 
 200   // Take a chunk from the ChunkManager. The chunk is expected to be in
 201   // the chunk manager (the freelist if non-humongous, the dictionary if
 202   // humongous).
 203   void remove_chunk(Metachunk* chunk);
 204 
 205   // Return a single chunk of type index to the ChunkManager.
 206   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 207 
 208   // Add the simple linked list of chunks to the freelist of chunks
 209   // of type index.
 210   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 211 
 212   // Total of the space in the free chunks list
 213   size_t free_chunks_total_words();
 214   size_t free_chunks_total_bytes();
 215 
 216   // Number of chunks in the free chunks list
 217   size_t free_chunks_count();
 218 
 219   // Remove from a list by size.  Selects list based on size of chunk.
 220   Metachunk* free_chunks_get(size_t chunk_word_size);
 221 
 222 #define index_bounds_check(index)                                         \
 223   assert(index == SpecializedIndex ||                                     \
 224          index == SmallIndex ||                                           \
 225          index == MediumIndex ||                                          \
 226          index == HumongousIndex, "Bad index: %d", (int) index)
 227 
 228   size_t num_free_chunks(ChunkIndex index) const {
 229     index_bounds_check(index);
 230 
 231     if (index == HumongousIndex) {
 232       return _humongous_dictionary.total_free_blocks();
 233     }
 234 
 235     ssize_t count = _free_chunks[index].count();
 236     return count == -1 ? 0 : (size_t) count;
 237   }
 238 
 239   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 240     index_bounds_check(index);
 241 
 242     size_t word_size = 0;
 243     if (index == HumongousIndex) {
 244       word_size = _humongous_dictionary.total_size();
 245     } else {
 246       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 247       word_size = size_per_chunk_in_words * num_free_chunks(index);
 248     }
 249 
 250     return word_size * BytesPerWord;
 251   }
 252 
 253   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 254     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 255                                          num_free_chunks(SmallIndex),
 256                                          num_free_chunks(MediumIndex),
 257                                          num_free_chunks(HumongousIndex),
 258                                          size_free_chunks_in_bytes(SpecializedIndex),
 259                                          size_free_chunks_in_bytes(SmallIndex),
 260                                          size_free_chunks_in_bytes(MediumIndex),
 261                                          size_free_chunks_in_bytes(HumongousIndex));
 262   }
 263 
 264   // Debug support
 265   void verify();
 266   void slow_verify() {
 267     if (metaspace_slow_verify) {
 268       verify();
 269     }
 270   }
 271   void locked_verify();
 272   void slow_locked_verify() {
 273     if (metaspace_slow_verify) {
 274       locked_verify();
 275     }
 276   }
 277   void verify_free_chunks_total();
 278 
 279   void locked_print_free_chunks(outputStream* st);
 280   void locked_print_sum_free_chunks(outputStream* st);
 281 
 282   void print_on(outputStream* st) const;
 283 
 284   // Prints composition for both non-class and (if available)
 285   // class chunk manager.
 286   static void print_all_chunkmanagers(outputStream* out);
 287 };
 288 
 289 class SmallBlocks : public CHeapObj<mtClass> {
 290   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 291   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 292 
 293  private:
 294   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 295 
 296   FreeList<Metablock>& list_at(size_t word_size) {
 297     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 298     return _small_lists[word_size - _small_block_min_size];
 299   }
 300 
 301  public:
 302   SmallBlocks() {
 303     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 304       uint k = i - _small_block_min_size;
 305       _small_lists[k].set_size(i);
 306     }
 307   }
 308 
 309   size_t total_size() const {
 310     size_t result = 0;
 311     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 312       uint k = i - _small_block_min_size;
 313       result = result + _small_lists[k].count() * _small_lists[k].size();
 314     }
 315     return result;
 316   }
 317 
 318   static uint small_block_max_size() { return _small_block_max_size; }
 319   static uint small_block_min_size() { return _small_block_min_size; }
 320 
 321   MetaWord* get_block(size_t word_size) {
 322     if (list_at(word_size).count() > 0) {
 323       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 324       return new_block;
 325     } else {
 326       return NULL;
 327     }
 328   }
 329   void return_block(Metablock* free_chunk, size_t word_size) {
 330     list_at(word_size).return_chunk_at_head(free_chunk, false);
 331     assert(list_at(word_size).count() > 0, "Should have a chunk");
 332   }
 333 
 334   void print_on(outputStream* st) const {
 335     st->print_cr("SmallBlocks:");
 336     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 337       uint k = i - _small_block_min_size;
 338       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 339     }
 340   }
 341 };
 342 
 343 // Used to manage the free list of Metablocks (a block corresponds
 344 // to the allocation of a quantum of metadata).
 345 class BlockFreelist : public CHeapObj<mtClass> {
 346   BlockTreeDictionary* const _dictionary;
 347   SmallBlocks* _small_blocks;
 348 
 349   // Only allocate and split from freelist if the size of the allocation
 350   // is at least 1/4th the size of the available block.
 351   const static int WasteMultiplier = 4;
 352 
 353   // Accessors
 354   BlockTreeDictionary* dictionary() const { return _dictionary; }
 355   SmallBlocks* small_blocks() {
 356     if (_small_blocks == NULL) {
 357       _small_blocks = new SmallBlocks();
 358     }
 359     return _small_blocks;
 360   }
 361 
 362  public:
 363   BlockFreelist();
 364   ~BlockFreelist();
 365 
 366   // Get and return a block to the free list
 367   MetaWord* get_block(size_t word_size);
 368   void return_block(MetaWord* p, size_t word_size);
 369 
 370   size_t total_size() const  {
 371     size_t result = dictionary()->total_size();
 372     if (_small_blocks != NULL) {
 373       result = result + _small_blocks->total_size();
 374     }
 375     return result;
 376   }
 377 
 378   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 379   void print_on(outputStream* st) const;
 380 };
 381 
 382 // A VirtualSpaceList node.
 383 class VirtualSpaceNode : public CHeapObj<mtClass> {
 384   friend class VirtualSpaceList;
 385 
 386   // Link to next VirtualSpaceNode
 387   VirtualSpaceNode* _next;
 388 
 389   // total in the VirtualSpace
 390   MemRegion _reserved;
 391   ReservedSpace _rs;
 392   VirtualSpace _virtual_space;
 393   MetaWord* _top;
 394   // count of chunks contained in this VirtualSpace
 395   uintx _container_count;
 396 
 397   // Convenience functions to access the _virtual_space
 398   char* low()  const { return virtual_space()->low(); }
 399   char* high() const { return virtual_space()->high(); }
 400 
 401   // The first Metachunk will be allocated at the bottom of the
 402   // VirtualSpace
 403   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 404 
 405   // Committed but unused space in the virtual space
 406   size_t free_words_in_vs() const;
 407  public:
 408 
 409   VirtualSpaceNode(size_t byte_size);
 410   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 411   ~VirtualSpaceNode();
 412 
 413   // Convenience functions for logical bottom and end
 414   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 415   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 416 
 417   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 418 
 419   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 420   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 421 
 422   bool is_pre_committed() const { return _virtual_space.special(); }
 423 
 424   // address of next available space in _virtual_space;
 425   // Accessors
 426   VirtualSpaceNode* next() { return _next; }
 427   void set_next(VirtualSpaceNode* v) { _next = v; }
 428 
 429   void set_reserved(MemRegion const v) { _reserved = v; }
 430   void set_top(MetaWord* v) { _top = v; }
 431 
 432   // Accessors
 433   MemRegion* reserved() { return &_reserved; }
 434   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 435 
 436   // Returns true if "word_size" is available in the VirtualSpace
 437   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 438 
 439   MetaWord* top() const { return _top; }
 440   void inc_top(size_t word_size) { _top += word_size; }
 441 
 442   uintx container_count() { return _container_count; }
 443   void inc_container_count();
 444   void dec_container_count();
 445 #ifdef ASSERT
 446   uintx container_count_slow();
 447   void verify_container_count();
 448 #endif
 449 
 450   // used and capacity in this single entry in the list
 451   size_t used_words_in_vs() const;
 452   size_t capacity_words_in_vs() const;
 453 
 454   bool initialize();
 455 
 456   // get space from the virtual space
 457   Metachunk* take_from_committed(size_t chunk_word_size);
 458 
 459   // Allocate a chunk from the virtual space and return it.
 460   Metachunk* get_chunk_vs(size_t chunk_word_size);
 461 
 462   // Expands/shrinks the committed space in a virtual space.  Delegates
 463   // to Virtualspace
 464   bool expand_by(size_t min_words, size_t preferred_words);
 465 
 466   // In preparation for deleting this node, remove all the chunks
 467   // in the node from any freelist.
 468   void purge(ChunkManager* chunk_manager);
 469 
 470   // If an allocation doesn't fit in the current node a new node is created.
 471   // Allocate chunks out of the remaining committed space in this node
 472   // to avoid wasting that memory.
 473   // This always adds up because all the chunk sizes are multiples of
 474   // the smallest chunk size.
 475   void retire(ChunkManager* chunk_manager);
 476 
 477 #ifdef ASSERT
 478   // Debug support
 479   void mangle();
 480 #endif
 481 
 482   void print_on(outputStream* st) const;
 483 };
 484 
 485 #define assert_is_aligned(value, alignment)                  \
 486   assert(is_aligned((value), (alignment)),                   \
 487          SIZE_FORMAT_HEX " is not aligned to "               \
 488          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 489 
 490 // Decide if large pages should be committed when the memory is reserved.
 491 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 492   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 493     size_t words = bytes / BytesPerWord;
 494     bool is_class = false; // We never reserve large pages for the class space.
 495     if (MetaspaceGC::can_expand(words, is_class) &&
 496         MetaspaceGC::allowed_expansion() >= words) {
 497       return true;
 498     }
 499   }
 500 
 501   return false;
 502 }
 503 
 504   // byte_size is the size of the associated virtualspace.
 505 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 506   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 507   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 508   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 509 
 510   if (_rs.is_reserved()) {
 511     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 512     assert(_rs.size() != 0, "Catch if we get a 0 size");
 513     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 514     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 515 
 516     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 517   }
 518 }
 519 
 520 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 521   Metachunk* chunk = first_chunk();
 522   Metachunk* invalid_chunk = (Metachunk*) top();
 523   while (chunk < invalid_chunk ) {
 524     assert(chunk->is_tagged_free(), "Should be tagged free");
 525     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 526     chunk_manager->remove_chunk(chunk);
 527     assert(chunk->next() == NULL &&
 528            chunk->prev() == NULL,
 529            "Was not removed from its list");
 530     chunk = (Metachunk*) next;
 531   }
 532 }
 533 
 534 #ifdef ASSERT
 535 uintx VirtualSpaceNode::container_count_slow() {
 536   uintx count = 0;
 537   Metachunk* chunk = first_chunk();
 538   Metachunk* invalid_chunk = (Metachunk*) top();
 539   while (chunk < invalid_chunk ) {
 540     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 541     // Don't count the chunks on the free lists.  Those are
 542     // still part of the VirtualSpaceNode but not currently
 543     // counted.
 544     if (!chunk->is_tagged_free()) {
 545       count++;
 546     }
 547     chunk = (Metachunk*) next;
 548   }
 549   return count;
 550 }
 551 #endif
 552 
 553 // List of VirtualSpaces for metadata allocation.
 554 class VirtualSpaceList : public CHeapObj<mtClass> {
 555   friend class VirtualSpaceNode;
 556 
 557   enum VirtualSpaceSizes {
 558     VirtualSpaceSize = 256 * K
 559   };
 560 
 561   // Head of the list
 562   VirtualSpaceNode* _virtual_space_list;
 563   // virtual space currently being used for allocations
 564   VirtualSpaceNode* _current_virtual_space;
 565 
 566   // Is this VirtualSpaceList used for the compressed class space
 567   bool _is_class;
 568 
 569   // Sum of reserved and committed memory in the virtual spaces
 570   size_t _reserved_words;
 571   size_t _committed_words;
 572 
 573   // Number of virtual spaces
 574   size_t _virtual_space_count;
 575 
 576   ~VirtualSpaceList();
 577 
 578   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 579 
 580   void set_virtual_space_list(VirtualSpaceNode* v) {
 581     _virtual_space_list = v;
 582   }
 583   void set_current_virtual_space(VirtualSpaceNode* v) {
 584     _current_virtual_space = v;
 585   }
 586 
 587   void link_vs(VirtualSpaceNode* new_entry);
 588 
 589   // Get another virtual space and add it to the list.  This
 590   // is typically prompted by a failed attempt to allocate a chunk
 591   // and is typically followed by the allocation of a chunk.
 592   bool create_new_virtual_space(size_t vs_word_size);
 593 
 594   // Chunk up the unused committed space in the current
 595   // virtual space and add the chunks to the free list.
 596   void retire_current_virtual_space();
 597 
 598  public:
 599   VirtualSpaceList(size_t word_size);
 600   VirtualSpaceList(ReservedSpace rs);
 601 
 602   size_t free_bytes();
 603 
 604   Metachunk* get_new_chunk(size_t chunk_word_size,
 605                            size_t suggested_commit_granularity);
 606 
 607   bool expand_node_by(VirtualSpaceNode* node,
 608                       size_t min_words,
 609                       size_t preferred_words);
 610 
 611   bool expand_by(size_t min_words,
 612                  size_t preferred_words);
 613 
 614   VirtualSpaceNode* current_virtual_space() {
 615     return _current_virtual_space;
 616   }
 617 
 618   bool is_class() const { return _is_class; }
 619 
 620   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 621 
 622   size_t reserved_words()  { return _reserved_words; }
 623   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 624   size_t committed_words() { return _committed_words; }
 625   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 626 
 627   void inc_reserved_words(size_t v);
 628   void dec_reserved_words(size_t v);
 629   void inc_committed_words(size_t v);
 630   void dec_committed_words(size_t v);
 631   void inc_virtual_space_count();
 632   void dec_virtual_space_count();
 633 
 634   bool contains(const void* ptr);
 635 
 636   // Unlink empty VirtualSpaceNodes and free it.
 637   void purge(ChunkManager* chunk_manager);
 638 
 639   void print_on(outputStream* st) const;
 640 
 641   class VirtualSpaceListIterator : public StackObj {
 642     VirtualSpaceNode* _virtual_spaces;
 643    public:
 644     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 645       _virtual_spaces(virtual_spaces) {}
 646 
 647     bool repeat() {
 648       return _virtual_spaces != NULL;
 649     }
 650 
 651     VirtualSpaceNode* get_next() {
 652       VirtualSpaceNode* result = _virtual_spaces;
 653       if (_virtual_spaces != NULL) {
 654         _virtual_spaces = _virtual_spaces->next();
 655       }
 656       return result;
 657     }
 658   };
 659 };
 660 
 661 class Metadebug : AllStatic {
 662   // Debugging support for Metaspaces
 663   static int _allocation_fail_alot_count;
 664 
 665  public:
 666 
 667   static void init_allocation_fail_alot_count();
 668 #ifdef ASSERT
 669   static bool test_metadata_failure();
 670 #endif
 671 };
 672 
 673 int Metadebug::_allocation_fail_alot_count = 0;
 674 
 675 //  SpaceManager - used by Metaspace to handle allocations
 676 class SpaceManager : public CHeapObj<mtClass> {
 677   friend class Metaspace;
 678   friend class Metadebug;
 679 
 680  private:
 681 
 682   // protects allocations
 683   Mutex* const _lock;
 684 
 685   // Type of metadata allocated.
 686   Metaspace::MetadataType _mdtype;
 687 
 688   // List of chunks in use by this SpaceManager.  Allocations
 689   // are done from the current chunk.  The list is used for deallocating
 690   // chunks when the SpaceManager is freed.
 691   Metachunk* _chunks_in_use[NumberOfInUseLists];
 692   Metachunk* _current_chunk;
 693 
 694   // Maximum number of small chunks to allocate to a SpaceManager
 695   static uint const _small_chunk_limit;
 696 
 697   // Sum of all space in allocated chunks
 698   size_t _allocated_blocks_words;
 699 
 700   // Sum of all allocated chunks
 701   size_t _allocated_chunks_words;
 702   size_t _allocated_chunks_count;
 703 
 704   // Free lists of blocks are per SpaceManager since they
 705   // are assumed to be in chunks in use by the SpaceManager
 706   // and all chunks in use by a SpaceManager are freed when
 707   // the class loader using the SpaceManager is collected.
 708   BlockFreelist* _block_freelists;
 709 
 710   // protects virtualspace and chunk expansions
 711   static const char*  _expand_lock_name;
 712   static const int    _expand_lock_rank;
 713   static Mutex* const _expand_lock;
 714 
 715  private:
 716   // Accessors
 717   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 718   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 719     _chunks_in_use[index] = v;
 720   }
 721 
 722   BlockFreelist* block_freelists() const { return _block_freelists; }
 723 
 724   Metaspace::MetadataType mdtype() { return _mdtype; }
 725 
 726   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 727   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 728 
 729   Metachunk* current_chunk() const { return _current_chunk; }
 730   void set_current_chunk(Metachunk* v) {
 731     _current_chunk = v;
 732   }
 733 
 734   Metachunk* find_current_chunk(size_t word_size);
 735 
 736   // Add chunk to the list of chunks in use
 737   void add_chunk(Metachunk* v, bool make_current);
 738   void retire_current_chunk();
 739 
 740   Mutex* lock() const { return _lock; }
 741 
 742  protected:
 743   void initialize();
 744 
 745  public:
 746   SpaceManager(Metaspace::MetadataType mdtype,
 747                Mutex* lock);
 748   ~SpaceManager();
 749 
 750   enum ChunkMultiples {
 751     MediumChunkMultiple = 4
 752   };
 753 
 754   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
 755   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
 756   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
 757 
 758   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 759 
 760   // Accessors
 761   bool is_class() const { return _mdtype == Metaspace::ClassType; }
 762 
 763   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
 764   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
 765   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
 766 
 767   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
 768 
 769   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 770 
 771   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 772   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 773   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 774   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 775   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 776 
 777   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 778 
 779   static Mutex* expand_lock() { return _expand_lock; }
 780 
 781   // Increment the per Metaspace and global running sums for Metachunks
 782   // by the given size.  This is used when a Metachunk to added to
 783   // the in-use list.
 784   void inc_size_metrics(size_t words);
 785   // Increment the per Metaspace and global running sums Metablocks by the given
 786   // size.  This is used when a Metablock is allocated.
 787   void inc_used_metrics(size_t words);
 788   // Delete the portion of the running sums for this SpaceManager. That is,
 789   // the globals running sums for the Metachunks and Metablocks are
 790   // decremented for all the Metachunks in-use by this SpaceManager.
 791   void dec_total_from_size_metrics();
 792 
 793   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
 794   // or return the unadjusted size if the requested size is humongous.
 795   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
 796   size_t adjust_initial_chunk_size(size_t requested) const;
 797 
 798   // Get the initial chunks size for this metaspace type.
 799   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 800 
 801   size_t sum_capacity_in_chunks_in_use() const;
 802   size_t sum_used_in_chunks_in_use() const;
 803   size_t sum_free_in_chunks_in_use() const;
 804   size_t sum_waste_in_chunks_in_use() const;
 805   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 806 
 807   size_t sum_count_in_chunks_in_use();
 808   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 809 
 810   Metachunk* get_new_chunk(size_t chunk_word_size);
 811 
 812   // Block allocation and deallocation.
 813   // Allocates a block from the current chunk
 814   MetaWord* allocate(size_t word_size);
 815   // Allocates a block from a small chunk
 816   MetaWord* get_small_chunk_and_allocate(size_t word_size);
 817 
 818   // Helper for allocations
 819   MetaWord* allocate_work(size_t word_size);
 820 
 821   // Returns a block to the per manager freelist
 822   void deallocate(MetaWord* p, size_t word_size);
 823 
 824   // Based on the allocation size and a minimum chunk size,
 825   // returned chunk size (for expanding space for chunk allocation).
 826   size_t calc_chunk_size(size_t allocation_word_size);
 827 
 828   // Called when an allocation from the current chunk fails.
 829   // Gets a new chunk (may require getting a new virtual space),
 830   // and allocates from that chunk.
 831   MetaWord* grow_and_allocate(size_t word_size);
 832 
 833   // Notify memory usage to MemoryService.
 834   void track_metaspace_memory_usage();
 835 
 836   // debugging support.
 837 
 838   void dump(outputStream* const out) const;
 839   void print_on(outputStream* st) const;
 840   void locked_print_chunks_in_use_on(outputStream* st) const;
 841 
 842   void verify();
 843   void verify_chunk_size(Metachunk* chunk);
 844 #ifdef ASSERT
 845   void verify_allocated_blocks_words();
 846 #endif
 847 
 848   // This adjusts the size given to be greater than the minimum allocation size in
 849   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 850   size_t get_allocation_word_size(size_t word_size) {
 851     size_t byte_size = word_size * BytesPerWord;
 852 
 853     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 854     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 855 
 856     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 857     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 858 
 859     return raw_word_size;
 860   }
 861 };
 862 
 863 uint const SpaceManager::_small_chunk_limit = 4;
 864 
 865 const char* SpaceManager::_expand_lock_name =
 866   "SpaceManager chunk allocation lock";
 867 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 868 Mutex* const SpaceManager::_expand_lock =
 869   new Mutex(SpaceManager::_expand_lock_rank,
 870             SpaceManager::_expand_lock_name,
 871             Mutex::_allow_vm_block_flag,
 872             Monitor::_safepoint_check_never);
 873 
 874 void VirtualSpaceNode::inc_container_count() {
 875   assert_lock_strong(SpaceManager::expand_lock());
 876   _container_count++;
 877 }
 878 
 879 void VirtualSpaceNode::dec_container_count() {
 880   assert_lock_strong(SpaceManager::expand_lock());
 881   _container_count--;
 882 }
 883 
 884 #ifdef ASSERT
 885 void VirtualSpaceNode::verify_container_count() {
 886   assert(_container_count == container_count_slow(),
 887          "Inconsistency in container_count _container_count " UINTX_FORMAT
 888          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 889 }
 890 #endif
 891 
 892 // BlockFreelist methods
 893 
 894 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
 895 
 896 BlockFreelist::~BlockFreelist() {
 897   delete _dictionary;
 898   if (_small_blocks != NULL) {
 899     delete _small_blocks;
 900   }
 901 }
 902 
 903 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 904   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
 905 
 906   Metablock* free_chunk = ::new (p) Metablock(word_size);
 907   if (word_size < SmallBlocks::small_block_max_size()) {
 908     small_blocks()->return_block(free_chunk, word_size);
 909   } else {
 910   dictionary()->return_chunk(free_chunk);
 911 }
 912   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
 913             SIZE_FORMAT, p2i(free_chunk), word_size);
 914 }
 915 
 916 MetaWord* BlockFreelist::get_block(size_t word_size) {
 917   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
 918 
 919   // Try small_blocks first.
 920   if (word_size < SmallBlocks::small_block_max_size()) {
 921     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
 922     // this space manager.
 923     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
 924     if (new_block != NULL) {
 925       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 926               p2i(new_block), word_size);
 927       return new_block;
 928     }
 929   }
 930 
 931   if (word_size < BlockFreelist::min_dictionary_size()) {
 932     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
 933     return NULL;
 934   }
 935 
 936   Metablock* free_block = dictionary()->get_chunk(word_size);
 937   if (free_block == NULL) {
 938     return NULL;
 939   }
 940 
 941   const size_t block_size = free_block->size();
 942   if (block_size > WasteMultiplier * word_size) {
 943     return_block((MetaWord*)free_block, block_size);
 944     return NULL;
 945   }
 946 
 947   MetaWord* new_block = (MetaWord*)free_block;
 948   assert(block_size >= word_size, "Incorrect size of block from freelist");
 949   const size_t unused = block_size - word_size;
 950   if (unused >= SmallBlocks::small_block_min_size()) {
 951     return_block(new_block + word_size, unused);
 952   }
 953 
 954   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
 955             p2i(new_block), word_size);
 956   return new_block;
 957 }
 958 
 959 void BlockFreelist::print_on(outputStream* st) const {
 960   dictionary()->print_free_lists(st);
 961   if (_small_blocks != NULL) {
 962     _small_blocks->print_on(st);
 963   }
 964 }
 965 
 966 // VirtualSpaceNode methods
 967 
 968 VirtualSpaceNode::~VirtualSpaceNode() {
 969   _rs.release();
 970 #ifdef ASSERT
 971   size_t word_size = sizeof(*this) / BytesPerWord;
 972   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 973 #endif
 974 }
 975 
 976 size_t VirtualSpaceNode::used_words_in_vs() const {
 977   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 978 }
 979 
 980 // Space committed in the VirtualSpace
 981 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 982   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 983 }
 984 
 985 size_t VirtualSpaceNode::free_words_in_vs() const {
 986   return pointer_delta(end(), top(), sizeof(MetaWord));
 987 }
 988 
 989 // Allocates the chunk from the virtual space only.
 990 // This interface is also used internally for debugging.  Not all
 991 // chunks removed here are necessarily used for allocation.
 992 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 993   // Bottom of the new chunk
 994   MetaWord* chunk_limit = top();
 995   assert(chunk_limit != NULL, "Not safe to call this method");
 996 
 997   // The virtual spaces are always expanded by the
 998   // commit granularity to enforce the following condition.
 999   // Without this the is_available check will not work correctly.
1000   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1001       "The committed memory doesn't match the expanded memory.");
1002 
1003   if (!is_available(chunk_word_size)) {
1004     LogTarget(Debug, gc, metaspace, freelist) lt;
1005     if (lt.is_enabled()) {
1006       LogStream ls(lt);
1007       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1008       // Dump some information about the virtual space that is nearly full
1009       print_on(&ls);
1010     }
1011     return NULL;
1012   }
1013 
1014   // Take the space  (bump top on the current virtual space).
1015   inc_top(chunk_word_size);
1016 
1017   // Initialize the chunk
1018   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1019   return result;
1020 }
1021 
1022 
1023 // Expand the virtual space (commit more of the reserved space)
1024 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1025   size_t min_bytes = min_words * BytesPerWord;
1026   size_t preferred_bytes = preferred_words * BytesPerWord;
1027 
1028   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1029 
1030   if (uncommitted < min_bytes) {
1031     return false;
1032   }
1033 
1034   size_t commit = MIN2(preferred_bytes, uncommitted);
1035   bool result = virtual_space()->expand_by(commit, false);
1036 
1037   assert(result, "Failed to commit memory");
1038 
1039   return result;
1040 }
1041 
1042 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1043   assert_lock_strong(SpaceManager::expand_lock());
1044   Metachunk* result = take_from_committed(chunk_word_size);
1045   if (result != NULL) {
1046     inc_container_count();
1047   }
1048   return result;
1049 }
1050 
1051 bool VirtualSpaceNode::initialize() {
1052 
1053   if (!_rs.is_reserved()) {
1054     return false;
1055   }
1056 
1057   // These are necessary restriction to make sure that the virtual space always
1058   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1059   // aligned only the middle alignment of the VirtualSpace is used.
1060   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1061   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1062 
1063   // ReservedSpaces marked as special will have the entire memory
1064   // pre-committed. Setting a committed size will make sure that
1065   // committed_size and actual_committed_size agrees.
1066   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1067 
1068   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1069                                             Metaspace::commit_alignment());
1070   if (result) {
1071     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1072         "Checking that the pre-committed memory was registered by the VirtualSpace");
1073 
1074     set_top((MetaWord*)virtual_space()->low());
1075     set_reserved(MemRegion((HeapWord*)_rs.base(),
1076                  (HeapWord*)(_rs.base() + _rs.size())));
1077 
1078     assert(reserved()->start() == (HeapWord*) _rs.base(),
1079            "Reserved start was not set properly " PTR_FORMAT
1080            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1081     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1082            "Reserved size was not set properly " SIZE_FORMAT
1083            " != " SIZE_FORMAT, reserved()->word_size(),
1084            _rs.size() / BytesPerWord);
1085   }
1086 
1087   return result;
1088 }
1089 
1090 void VirtualSpaceNode::print_on(outputStream* st) const {
1091   size_t used = used_words_in_vs();
1092   size_t capacity = capacity_words_in_vs();
1093   VirtualSpace* vs = virtual_space();
1094   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1095            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1096            PTR_FORMAT ", " PTR_FORMAT ")",
1097            p2i(vs), capacity / K,
1098            capacity == 0 ? 0 : used * 100 / capacity,
1099            p2i(bottom()), p2i(top()), p2i(end()),
1100            p2i(vs->high_boundary()));
1101 }
1102 
1103 #ifdef ASSERT
1104 void VirtualSpaceNode::mangle() {
1105   size_t word_size = capacity_words_in_vs();
1106   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1107 }
1108 #endif // ASSERT
1109 
1110 // VirtualSpaceList methods
1111 // Space allocated from the VirtualSpace
1112 
1113 VirtualSpaceList::~VirtualSpaceList() {
1114   VirtualSpaceListIterator iter(virtual_space_list());
1115   while (iter.repeat()) {
1116     VirtualSpaceNode* vsl = iter.get_next();
1117     delete vsl;
1118   }
1119 }
1120 
1121 void VirtualSpaceList::inc_reserved_words(size_t v) {
1122   assert_lock_strong(SpaceManager::expand_lock());
1123   _reserved_words = _reserved_words + v;
1124 }
1125 void VirtualSpaceList::dec_reserved_words(size_t v) {
1126   assert_lock_strong(SpaceManager::expand_lock());
1127   _reserved_words = _reserved_words - v;
1128 }
1129 
1130 #define assert_committed_below_limit()                        \
1131   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1132          "Too much committed memory. Committed: " SIZE_FORMAT \
1133          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1134          MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1135 
1136 void VirtualSpaceList::inc_committed_words(size_t v) {
1137   assert_lock_strong(SpaceManager::expand_lock());
1138   _committed_words = _committed_words + v;
1139 
1140   assert_committed_below_limit();
1141 }
1142 void VirtualSpaceList::dec_committed_words(size_t v) {
1143   assert_lock_strong(SpaceManager::expand_lock());
1144   _committed_words = _committed_words - v;
1145 
1146   assert_committed_below_limit();
1147 }
1148 
1149 void VirtualSpaceList::inc_virtual_space_count() {
1150   assert_lock_strong(SpaceManager::expand_lock());
1151   _virtual_space_count++;
1152 }
1153 void VirtualSpaceList::dec_virtual_space_count() {
1154   assert_lock_strong(SpaceManager::expand_lock());
1155   _virtual_space_count--;
1156 }
1157 
1158 void ChunkManager::remove_chunk(Metachunk* chunk) {
1159   size_t word_size = chunk->word_size();
1160   ChunkIndex index = list_index(word_size);
1161   if (index != HumongousIndex) {
1162     free_chunks(index)->remove_chunk(chunk);
1163   } else {
1164     humongous_dictionary()->remove_chunk(chunk);
1165   }
1166 
1167   // Chunk has been removed from the chunks free list, update counters.
1168   account_for_removed_chunk(chunk);
1169 }
1170 
1171 // Walk the list of VirtualSpaceNodes and delete
1172 // nodes with a 0 container_count.  Remove Metachunks in
1173 // the node from their respective freelists.
1174 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1175   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1176   assert_lock_strong(SpaceManager::expand_lock());
1177   // Don't use a VirtualSpaceListIterator because this
1178   // list is being changed and a straightforward use of an iterator is not safe.
1179   VirtualSpaceNode* purged_vsl = NULL;
1180   VirtualSpaceNode* prev_vsl = virtual_space_list();
1181   VirtualSpaceNode* next_vsl = prev_vsl;
1182   while (next_vsl != NULL) {
1183     VirtualSpaceNode* vsl = next_vsl;
1184     DEBUG_ONLY(vsl->verify_container_count();)
1185     next_vsl = vsl->next();
1186     // Don't free the current virtual space since it will likely
1187     // be needed soon.
1188     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1189       // Unlink it from the list
1190       if (prev_vsl == vsl) {
1191         // This is the case of the current node being the first node.
1192         assert(vsl == virtual_space_list(), "Expected to be the first node");
1193         set_virtual_space_list(vsl->next());
1194       } else {
1195         prev_vsl->set_next(vsl->next());
1196       }
1197 
1198       vsl->purge(chunk_manager);
1199       dec_reserved_words(vsl->reserved_words());
1200       dec_committed_words(vsl->committed_words());
1201       dec_virtual_space_count();
1202       purged_vsl = vsl;
1203       delete vsl;
1204     } else {
1205       prev_vsl = vsl;
1206     }
1207   }
1208 #ifdef ASSERT
1209   if (purged_vsl != NULL) {
1210     // List should be stable enough to use an iterator here.
1211     VirtualSpaceListIterator iter(virtual_space_list());
1212     while (iter.repeat()) {
1213       VirtualSpaceNode* vsl = iter.get_next();
1214       assert(vsl != purged_vsl, "Purge of vsl failed");
1215     }
1216   }
1217 #endif
1218 }
1219 
1220 
1221 // This function looks at the mmap regions in the metaspace without locking.
1222 // The chunks are added with store ordering and not deleted except for at
1223 // unloading time during a safepoint.
1224 bool VirtualSpaceList::contains(const void* ptr) {
1225   // List should be stable enough to use an iterator here because removing virtual
1226   // space nodes is only allowed at a safepoint.
1227   VirtualSpaceListIterator iter(virtual_space_list());
1228   while (iter.repeat()) {
1229     VirtualSpaceNode* vsn = iter.get_next();
1230     if (vsn->contains(ptr)) {
1231       return true;
1232     }
1233   }
1234   return false;
1235 }
1236 
1237 void VirtualSpaceList::retire_current_virtual_space() {
1238   assert_lock_strong(SpaceManager::expand_lock());
1239 
1240   VirtualSpaceNode* vsn = current_virtual_space();
1241 
1242   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1243                                   Metaspace::chunk_manager_metadata();
1244 
1245   vsn->retire(cm);
1246 }
1247 
1248 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1249   DEBUG_ONLY(verify_container_count();)
1250   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1251     ChunkIndex index = (ChunkIndex)i;
1252     size_t chunk_size = chunk_manager->size_by_index(index);
1253 
1254     while (free_words_in_vs() >= chunk_size) {
1255       Metachunk* chunk = get_chunk_vs(chunk_size);
1256       assert(chunk != NULL, "allocation should have been successful");
1257 
1258       chunk_manager->return_single_chunk(index, chunk);
1259     }
1260     DEBUG_ONLY(verify_container_count();)
1261   }
1262   assert(free_words_in_vs() == 0, "should be empty now");
1263 }
1264 
1265 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1266                                    _is_class(false),
1267                                    _virtual_space_list(NULL),
1268                                    _current_virtual_space(NULL),
1269                                    _reserved_words(0),
1270                                    _committed_words(0),
1271                                    _virtual_space_count(0) {
1272   MutexLockerEx cl(SpaceManager::expand_lock(),
1273                    Mutex::_no_safepoint_check_flag);
1274   create_new_virtual_space(word_size);
1275 }
1276 
1277 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1278                                    _is_class(true),
1279                                    _virtual_space_list(NULL),
1280                                    _current_virtual_space(NULL),
1281                                    _reserved_words(0),
1282                                    _committed_words(0),
1283                                    _virtual_space_count(0) {
1284   MutexLockerEx cl(SpaceManager::expand_lock(),
1285                    Mutex::_no_safepoint_check_flag);
1286   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1287   bool succeeded = class_entry->initialize();
1288   if (succeeded) {
1289     link_vs(class_entry);
1290   }
1291 }
1292 
1293 size_t VirtualSpaceList::free_bytes() {
1294   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1295 }
1296 
1297 // Allocate another meta virtual space and add it to the list.
1298 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1299   assert_lock_strong(SpaceManager::expand_lock());
1300 
1301   if (is_class()) {
1302     assert(false, "We currently don't support more than one VirtualSpace for"
1303                   " the compressed class space. The initialization of the"
1304                   " CCS uses another code path and should not hit this path.");
1305     return false;
1306   }
1307 
1308   if (vs_word_size == 0) {
1309     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1310     return false;
1311   }
1312 
1313   // Reserve the space
1314   size_t vs_byte_size = vs_word_size * BytesPerWord;
1315   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1316 
1317   // Allocate the meta virtual space and initialize it.
1318   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1319   if (!new_entry->initialize()) {
1320     delete new_entry;
1321     return false;
1322   } else {
1323     assert(new_entry->reserved_words() == vs_word_size,
1324         "Reserved memory size differs from requested memory size");
1325     // ensure lock-free iteration sees fully initialized node
1326     OrderAccess::storestore();
1327     link_vs(new_entry);
1328     return true;
1329   }
1330 }
1331 
1332 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1333   if (virtual_space_list() == NULL) {
1334       set_virtual_space_list(new_entry);
1335   } else {
1336     current_virtual_space()->set_next(new_entry);
1337   }
1338   set_current_virtual_space(new_entry);
1339   inc_reserved_words(new_entry->reserved_words());
1340   inc_committed_words(new_entry->committed_words());
1341   inc_virtual_space_count();
1342 #ifdef ASSERT
1343   new_entry->mangle();
1344 #endif
1345   LogTarget(Trace, gc, metaspace) lt;
1346   if (lt.is_enabled()) {
1347     LogStream ls(lt);
1348     VirtualSpaceNode* vsl = current_virtual_space();
1349     ResourceMark rm;
1350     vsl->print_on(&ls);
1351   }
1352 }
1353 
1354 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1355                                       size_t min_words,
1356                                       size_t preferred_words) {
1357   size_t before = node->committed_words();
1358 
1359   bool result = node->expand_by(min_words, preferred_words);
1360 
1361   size_t after = node->committed_words();
1362 
1363   // after and before can be the same if the memory was pre-committed.
1364   assert(after >= before, "Inconsistency");
1365   inc_committed_words(after - before);
1366 
1367   return result;
1368 }
1369 
1370 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1371   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1372   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1373   assert(min_words <= preferred_words, "Invalid arguments");
1374 
1375   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1376     return  false;
1377   }
1378 
1379   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1380   if (allowed_expansion_words < min_words) {
1381     return false;
1382   }
1383 
1384   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1385 
1386   // Commit more memory from the the current virtual space.
1387   bool vs_expanded = expand_node_by(current_virtual_space(),
1388                                     min_words,
1389                                     max_expansion_words);
1390   if (vs_expanded) {
1391     return true;
1392   }
1393   retire_current_virtual_space();
1394 
1395   // Get another virtual space.
1396   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1397   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1398 
1399   if (create_new_virtual_space(grow_vs_words)) {
1400     if (current_virtual_space()->is_pre_committed()) {
1401       // The memory was pre-committed, so we are done here.
1402       assert(min_words <= current_virtual_space()->committed_words(),
1403           "The new VirtualSpace was pre-committed, so it"
1404           "should be large enough to fit the alloc request.");
1405       return true;
1406     }
1407 
1408     return expand_node_by(current_virtual_space(),
1409                           min_words,
1410                           max_expansion_words);
1411   }
1412 
1413   return false;
1414 }
1415 
1416 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1417 
1418   // Allocate a chunk out of the current virtual space.
1419   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1420 
1421   if (next != NULL) {
1422     return next;
1423   }
1424 
1425   // The expand amount is currently only determined by the requested sizes
1426   // and not how much committed memory is left in the current virtual space.
1427 
1428   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1429   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1430   if (min_word_size >= preferred_word_size) {
1431     // Can happen when humongous chunks are allocated.
1432     preferred_word_size = min_word_size;
1433   }
1434 
1435   bool expanded = expand_by(min_word_size, preferred_word_size);
1436   if (expanded) {
1437     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1438     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1439   }
1440 
1441    return next;
1442 }
1443 
1444 void VirtualSpaceList::print_on(outputStream* st) const {
1445   VirtualSpaceListIterator iter(virtual_space_list());
1446   while (iter.repeat()) {
1447     VirtualSpaceNode* node = iter.get_next();
1448     node->print_on(st);
1449   }
1450 }
1451 
1452 // MetaspaceGC methods
1453 
1454 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1455 // Within the VM operation after the GC the attempt to allocate the metadata
1456 // should succeed.  If the GC did not free enough space for the metaspace
1457 // allocation, the HWM is increased so that another virtualspace will be
1458 // allocated for the metadata.  With perm gen the increase in the perm
1459 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1460 // metaspace policy uses those as the small and large steps for the HWM.
1461 //
1462 // After the GC the compute_new_size() for MetaspaceGC is called to
1463 // resize the capacity of the metaspaces.  The current implementation
1464 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1465 // to resize the Java heap by some GC's.  New flags can be implemented
1466 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1467 // free space is desirable in the metaspace capacity to decide how much
1468 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1469 // free space is desirable in the metaspace capacity before decreasing
1470 // the HWM.
1471 
1472 // Calculate the amount to increase the high water mark (HWM).
1473 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1474 // another expansion is not requested too soon.  If that is not
1475 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1476 // If that is still not enough, expand by the size of the allocation
1477 // plus some.
1478 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1479   size_t min_delta = MinMetaspaceExpansion;
1480   size_t max_delta = MaxMetaspaceExpansion;
1481   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1482 
1483   if (delta <= min_delta) {
1484     delta = min_delta;
1485   } else if (delta <= max_delta) {
1486     // Don't want to hit the high water mark on the next
1487     // allocation so make the delta greater than just enough
1488     // for this allocation.
1489     delta = max_delta;
1490   } else {
1491     // This allocation is large but the next ones are probably not
1492     // so increase by the minimum.
1493     delta = delta + min_delta;
1494   }
1495 
1496   assert_is_aligned(delta, Metaspace::commit_alignment());
1497 
1498   return delta;
1499 }
1500 
1501 size_t MetaspaceGC::capacity_until_GC() {
1502   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1503   assert(value >= MetaspaceSize, "Not initialized properly?");
1504   return value;
1505 }
1506 
1507 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1508   assert_is_aligned(v, Metaspace::commit_alignment());
1509 
1510   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1511   size_t new_value = capacity_until_GC + v;
1512 
1513   if (new_value < capacity_until_GC) {
1514     // The addition wrapped around, set new_value to aligned max value.
1515     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1516   }
1517 
1518   intptr_t expected = (intptr_t) capacity_until_GC;
1519   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1520 
1521   if (expected != actual) {
1522     return false;
1523   }
1524 
1525   if (new_cap_until_GC != NULL) {
1526     *new_cap_until_GC = new_value;
1527   }
1528   if (old_cap_until_GC != NULL) {
1529     *old_cap_until_GC = capacity_until_GC;
1530   }
1531   return true;
1532 }
1533 
1534 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1535   assert_is_aligned(v, Metaspace::commit_alignment());
1536 
1537   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1538 }
1539 
1540 void MetaspaceGC::initialize() {
1541   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1542   // we can't do a GC during initialization.
1543   _capacity_until_GC = MaxMetaspaceSize;
1544 }
1545 
1546 void MetaspaceGC::post_initialize() {
1547   // Reset the high-water mark once the VM initialization is done.
1548   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1549 }
1550 
1551 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1552   // Check if the compressed class space is full.
1553   if (is_class && Metaspace::using_class_space()) {
1554     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1555     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1556       return false;
1557     }
1558   }
1559 
1560   // Check if the user has imposed a limit on the metaspace memory.
1561   size_t committed_bytes = MetaspaceAux::committed_bytes();
1562   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1563     return false;
1564   }
1565 
1566   return true;
1567 }
1568 
1569 size_t MetaspaceGC::allowed_expansion() {
1570   size_t committed_bytes = MetaspaceAux::committed_bytes();
1571   size_t capacity_until_gc = capacity_until_GC();
1572 
1573   assert(capacity_until_gc >= committed_bytes,
1574          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1575          capacity_until_gc, committed_bytes);
1576 
1577   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1578   size_t left_until_GC = capacity_until_gc - committed_bytes;
1579   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1580 
1581   return left_to_commit / BytesPerWord;
1582 }
1583 
1584 void MetaspaceGC::compute_new_size() {
1585   assert(_shrink_factor <= 100, "invalid shrink factor");
1586   uint current_shrink_factor = _shrink_factor;
1587   _shrink_factor = 0;
1588 
1589   // Using committed_bytes() for used_after_gc is an overestimation, since the
1590   // chunk free lists are included in committed_bytes() and the memory in an
1591   // un-fragmented chunk free list is available for future allocations.
1592   // However, if the chunk free lists becomes fragmented, then the memory may
1593   // not be available for future allocations and the memory is therefore "in use".
1594   // Including the chunk free lists in the definition of "in use" is therefore
1595   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1596   // shrink below committed_bytes() and this has caused serious bugs in the past.
1597   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1598   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1599 
1600   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1601   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1602 
1603   const double min_tmp = used_after_gc / maximum_used_percentage;
1604   size_t minimum_desired_capacity =
1605     (size_t)MIN2(min_tmp, double(max_uintx));
1606   // Don't shrink less than the initial generation size
1607   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1608                                   MetaspaceSize);
1609 
1610   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1611   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1612                            minimum_free_percentage, maximum_used_percentage);
1613   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1614 
1615 
1616   size_t shrink_bytes = 0;
1617   if (capacity_until_GC < minimum_desired_capacity) {
1618     // If we have less capacity below the metaspace HWM, then
1619     // increment the HWM.
1620     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1621     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1622     // Don't expand unless it's significant
1623     if (expand_bytes >= MinMetaspaceExpansion) {
1624       size_t new_capacity_until_GC = 0;
1625       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1626       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1627 
1628       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1629                                                new_capacity_until_GC,
1630                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1631       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1632                                minimum_desired_capacity / (double) K,
1633                                expand_bytes / (double) K,
1634                                MinMetaspaceExpansion / (double) K,
1635                                new_capacity_until_GC / (double) K);
1636     }
1637     return;
1638   }
1639 
1640   // No expansion, now see if we want to shrink
1641   // We would never want to shrink more than this
1642   assert(capacity_until_GC >= minimum_desired_capacity,
1643          SIZE_FORMAT " >= " SIZE_FORMAT,
1644          capacity_until_GC, minimum_desired_capacity);
1645   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1646 
1647   // Should shrinking be considered?
1648   if (MaxMetaspaceFreeRatio < 100) {
1649     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1650     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1651     const double max_tmp = used_after_gc / minimum_used_percentage;
1652     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1653     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1654                                     MetaspaceSize);
1655     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1656                              maximum_free_percentage, minimum_used_percentage);
1657     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1658                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1659 
1660     assert(minimum_desired_capacity <= maximum_desired_capacity,
1661            "sanity check");
1662 
1663     if (capacity_until_GC > maximum_desired_capacity) {
1664       // Capacity too large, compute shrinking size
1665       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1666       // We don't want shrink all the way back to initSize if people call
1667       // System.gc(), because some programs do that between "phases" and then
1668       // we'd just have to grow the heap up again for the next phase.  So we
1669       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1670       // on the third call, and 100% by the fourth call.  But if we recompute
1671       // size without shrinking, it goes back to 0%.
1672       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1673 
1674       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1675 
1676       assert(shrink_bytes <= max_shrink_bytes,
1677              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1678              shrink_bytes, max_shrink_bytes);
1679       if (current_shrink_factor == 0) {
1680         _shrink_factor = 10;
1681       } else {
1682         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1683       }
1684       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1685                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1686       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1687                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1688     }
1689   }
1690 
1691   // Don't shrink unless it's significant
1692   if (shrink_bytes >= MinMetaspaceExpansion &&
1693       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1694     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1695     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1696                                              new_capacity_until_GC,
1697                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1698   }
1699 }
1700 
1701 // Metadebug methods
1702 
1703 void Metadebug::init_allocation_fail_alot_count() {
1704   if (MetadataAllocationFailALot) {
1705     _allocation_fail_alot_count =
1706       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1707   }
1708 }
1709 
1710 #ifdef ASSERT
1711 bool Metadebug::test_metadata_failure() {
1712   if (MetadataAllocationFailALot &&
1713       Threads::is_vm_complete()) {
1714     if (_allocation_fail_alot_count > 0) {
1715       _allocation_fail_alot_count--;
1716     } else {
1717       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
1718       init_allocation_fail_alot_count();
1719       return true;
1720     }
1721   }
1722   return false;
1723 }
1724 #endif
1725 
1726 // ChunkManager methods
1727 
1728 size_t ChunkManager::free_chunks_total_words() {
1729   return _free_chunks_total;
1730 }
1731 
1732 size_t ChunkManager::free_chunks_total_bytes() {
1733   return free_chunks_total_words() * BytesPerWord;
1734 }
1735 
1736 // Update internal accounting after a chunk was added
1737 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
1738   assert_lock_strong(SpaceManager::expand_lock());
1739   _free_chunks_count ++;
1740   _free_chunks_total += c->word_size();
1741 }
1742 
1743 // Update internal accounting after a chunk was removed
1744 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
1745   assert_lock_strong(SpaceManager::expand_lock());
1746   assert(_free_chunks_count >= 1,
1747     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
1748   assert(_free_chunks_total >= c->word_size(),
1749     "ChunkManager::_free_chunks_total: about to go negative"
1750      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
1751   _free_chunks_count --;
1752   _free_chunks_total -= c->word_size();
1753 }
1754 
1755 size_t ChunkManager::free_chunks_count() {
1756 #ifdef ASSERT
1757   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1758     MutexLockerEx cl(SpaceManager::expand_lock(),
1759                      Mutex::_no_safepoint_check_flag);
1760     // This lock is only needed in debug because the verification
1761     // of the _free_chunks_totals walks the list of free chunks
1762     slow_locked_verify_free_chunks_count();
1763   }
1764 #endif
1765   return _free_chunks_count;
1766 }
1767 
1768 ChunkIndex ChunkManager::list_index(size_t size) {
1769   if (size_by_index(SpecializedIndex) == size) {
1770     return SpecializedIndex;
1771   }
1772   if (size_by_index(SmallIndex) == size) {
1773     return SmallIndex;
1774   }
1775   const size_t med_size = size_by_index(MediumIndex);
1776   if (med_size == size) {
1777     return MediumIndex;
1778   }
1779 
1780   assert(size > med_size, "Not a humongous chunk");
1781   return HumongousIndex;
1782 }
1783 
1784 size_t ChunkManager::size_by_index(ChunkIndex index) const {
1785   index_bounds_check(index);
1786   assert(index != HumongousIndex, "Do not call for humongous chunks.");
1787   return _free_chunks[index].size();
1788 }
1789 
1790 void ChunkManager::locked_verify_free_chunks_total() {
1791   assert_lock_strong(SpaceManager::expand_lock());
1792   assert(sum_free_chunks() == _free_chunks_total,
1793          "_free_chunks_total " SIZE_FORMAT " is not the"
1794          " same as sum " SIZE_FORMAT, _free_chunks_total,
1795          sum_free_chunks());
1796 }
1797 
1798 void ChunkManager::verify_free_chunks_total() {
1799   MutexLockerEx cl(SpaceManager::expand_lock(),
1800                      Mutex::_no_safepoint_check_flag);
1801   locked_verify_free_chunks_total();
1802 }
1803 
1804 void ChunkManager::locked_verify_free_chunks_count() {
1805   assert_lock_strong(SpaceManager::expand_lock());
1806   assert(sum_free_chunks_count() == _free_chunks_count,
1807          "_free_chunks_count " SIZE_FORMAT " is not the"
1808          " same as sum " SIZE_FORMAT, _free_chunks_count,
1809          sum_free_chunks_count());
1810 }
1811 
1812 void ChunkManager::verify_free_chunks_count() {
1813 #ifdef ASSERT
1814   MutexLockerEx cl(SpaceManager::expand_lock(),
1815                      Mutex::_no_safepoint_check_flag);
1816   locked_verify_free_chunks_count();
1817 #endif
1818 }
1819 
1820 void ChunkManager::verify() {
1821   MutexLockerEx cl(SpaceManager::expand_lock(),
1822                      Mutex::_no_safepoint_check_flag);
1823   locked_verify();
1824 }
1825 
1826 void ChunkManager::locked_verify() {
1827   locked_verify_free_chunks_count();
1828   locked_verify_free_chunks_total();
1829 }
1830 
1831 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1832   assert_lock_strong(SpaceManager::expand_lock());
1833   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1834                 _free_chunks_total, _free_chunks_count);
1835 }
1836 
1837 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1838   assert_lock_strong(SpaceManager::expand_lock());
1839   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1840                 sum_free_chunks(), sum_free_chunks_count());
1841 }
1842 
1843 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1844   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1845          "Bad index: %d", (int)index);
1846 
1847   return &_free_chunks[index];
1848 }
1849 
1850 // These methods that sum the free chunk lists are used in printing
1851 // methods that are used in product builds.
1852 size_t ChunkManager::sum_free_chunks() {
1853   assert_lock_strong(SpaceManager::expand_lock());
1854   size_t result = 0;
1855   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1856     ChunkList* list = free_chunks(i);
1857 
1858     if (list == NULL) {
1859       continue;
1860     }
1861 
1862     result = result + list->count() * list->size();
1863   }
1864   result = result + humongous_dictionary()->total_size();
1865   return result;
1866 }
1867 
1868 size_t ChunkManager::sum_free_chunks_count() {
1869   assert_lock_strong(SpaceManager::expand_lock());
1870   size_t count = 0;
1871   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1872     ChunkList* list = free_chunks(i);
1873     if (list == NULL) {
1874       continue;
1875     }
1876     count = count + list->count();
1877   }
1878   count = count + humongous_dictionary()->total_free_blocks();
1879   return count;
1880 }
1881 
1882 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1883   ChunkIndex index = list_index(word_size);
1884   assert(index < HumongousIndex, "No humongous list");
1885   return free_chunks(index);
1886 }
1887 
1888 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1889   assert_lock_strong(SpaceManager::expand_lock());
1890 
1891   slow_locked_verify();
1892 
1893   Metachunk* chunk = NULL;
1894   if (list_index(word_size) != HumongousIndex) {
1895     ChunkList* free_list = find_free_chunks_list(word_size);
1896     assert(free_list != NULL, "Sanity check");
1897 
1898     chunk = free_list->head();
1899 
1900     if (chunk == NULL) {
1901       return NULL;
1902     }
1903 
1904     // Remove the chunk as the head of the list.
1905     free_list->remove_chunk(chunk);
1906 
1907     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1908                                        p2i(free_list), p2i(chunk), chunk->word_size());
1909   } else {
1910     chunk = humongous_dictionary()->get_chunk(word_size);
1911 
1912     if (chunk == NULL) {
1913       return NULL;
1914     }
1915 
1916     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1917                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
1918   }
1919 
1920   // Chunk has been removed from the chunk manager; update counters.
1921   account_for_removed_chunk(chunk);
1922 
1923   // Remove it from the links to this freelist
1924   chunk->set_next(NULL);
1925   chunk->set_prev(NULL);
1926 #ifdef ASSERT
1927   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1928   // work.
1929   chunk->set_is_tagged_free(false);
1930 #endif
1931   chunk->container()->inc_container_count();
1932 
1933   slow_locked_verify();
1934   return chunk;
1935 }
1936 
1937 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1938   assert_lock_strong(SpaceManager::expand_lock());
1939   slow_locked_verify();
1940 
1941   // Take from the beginning of the list
1942   Metachunk* chunk = free_chunks_get(word_size);
1943   if (chunk == NULL) {
1944     return NULL;
1945   }
1946 
1947   assert((word_size <= chunk->word_size()) ||
1948          (list_index(chunk->word_size()) == HumongousIndex),
1949          "Non-humongous variable sized chunk");
1950   LogTarget(Debug, gc, metaspace, freelist) lt;
1951   if (lt.is_enabled()) {
1952     size_t list_count;
1953     if (list_index(word_size) < HumongousIndex) {
1954       ChunkList* list = find_free_chunks_list(word_size);
1955       list_count = list->count();
1956     } else {
1957       list_count = humongous_dictionary()->total_count();
1958     }
1959     LogStream ls(lt);
1960     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1961              p2i(this), p2i(chunk), chunk->word_size(), list_count);
1962     ResourceMark rm;
1963     locked_print_free_chunks(&ls);
1964   }
1965 
1966   return chunk;
1967 }
1968 
1969 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1970   assert_lock_strong(SpaceManager::expand_lock());
1971   assert(chunk != NULL, "Expected chunk.");
1972   assert(chunk->container() != NULL, "Container should have been set.");
1973   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1974   index_bounds_check(index);
1975 
1976   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1977   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1978   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1979   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1980 
1981   if (index != HumongousIndex) {
1982     // Return non-humongous chunk to freelist.
1983     ChunkList* list = free_chunks(index);
1984     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
1985     list->return_chunk_at_head(chunk);
1986     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
1987         chunk_size_name(index), p2i(chunk));
1988   } else {
1989     // Return humongous chunk to dictionary.
1990     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
1991     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
1992            "Humongous chunk has wrong alignment.");
1993     _humongous_dictionary.return_chunk(chunk);
1994     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
1995         chunk_size_name(index), p2i(chunk), chunk->word_size());
1996   }
1997   chunk->container()->dec_container_count();
1998   DEBUG_ONLY(chunk->set_is_tagged_free(true);)
1999 
2000   // Chunk has been added; update counters.
2001   account_for_added_chunk(chunk);
2002 
2003 }
2004 
2005 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2006   index_bounds_check(index);
2007   if (chunks == NULL) {
2008     return;
2009   }
2010   LogTarget(Trace, gc, metaspace, freelist) log;
2011   if (log.is_enabled()) { // tracing
2012     log.print("returning list of %s chunks...", chunk_size_name(index));
2013   }
2014   unsigned num_chunks_returned = 0;
2015   size_t size_chunks_returned = 0;
2016   Metachunk* cur = chunks;
2017   while (cur != NULL) {
2018     // Capture the next link before it is changed
2019     // by the call to return_chunk_at_head();
2020     Metachunk* next = cur->next();
2021     if (log.is_enabled()) { // tracing
2022       num_chunks_returned ++;
2023       size_chunks_returned += cur->word_size();
2024     }
2025     return_single_chunk(index, cur);
2026     cur = next;
2027   }
2028   if (log.is_enabled()) { // tracing
2029     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
2030         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
2031     if (index != HumongousIndex) {
2032       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
2033     } else {
2034       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
2035     }
2036   }
2037 }
2038 
2039 void ChunkManager::print_on(outputStream* out) const {
2040   _humongous_dictionary.report_statistics(out);
2041 }
2042 
2043 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
2044   assert_lock_strong(SpaceManager::expand_lock());
2045   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2046     stat->num_by_type[i] = num_free_chunks(i);
2047     stat->single_size_by_type[i] = size_by_index(i);
2048     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
2049   }
2050   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
2051   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
2052 }
2053 
2054 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
2055   MutexLockerEx cl(SpaceManager::expand_lock(),
2056                    Mutex::_no_safepoint_check_flag);
2057   locked_get_statistics(stat);
2058 }
2059 
2060 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out) {
2061   size_t total = 0;
2062   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2063     out->print_cr("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total " SIZE_FORMAT " bytes",
2064                  stat->num_by_type[i], chunk_size_name(i),
2065                  stat->single_size_by_type[i],
2066                  stat->total_size_by_type[i]);
2067     total += stat->total_size_by_type[i];
2068   }
2069   out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
2070                stat->num_humongous_chunks, stat->total_size_humongous_chunks);
2071   total += stat->total_size_humongous_chunks;
2072   out->print_cr("  total size: " SIZE_FORMAT ".", total);
2073 }
2074 
2075 void ChunkManager::print_all_chunkmanagers(outputStream* out) {
2076   // Note: keep lock protection only to retrieving statistics; keep printing
2077   // out of lock protection
2078   ChunkManagerStatistics stat;
2079   out->print_cr("Chunkmanager (non-class):");
2080   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
2081   if (non_class_cm != NULL) {
2082     non_class_cm->get_statistics(&stat);
2083     ChunkManager::print_statistics(&stat, out);
2084   } else {
2085     out->print_cr("unavailable.");
2086   }
2087   out->print_cr("Chunkmanager (class):");
2088   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
2089   if (class_cm != NULL) {
2090     class_cm->get_statistics(&stat);
2091     ChunkManager::print_statistics(&stat, out);
2092   } else {
2093     out->print_cr("unavailable.");
2094   }
2095 }
2096 
2097 // SpaceManager methods
2098 
2099 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
2100   size_t chunk_sizes[] = {
2101       specialized_chunk_size(is_class_space),
2102       small_chunk_size(is_class_space),
2103       medium_chunk_size(is_class_space)
2104   };
2105 
2106   // Adjust up to one of the fixed chunk sizes ...
2107   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
2108     if (requested <= chunk_sizes[i]) {
2109       return chunk_sizes[i];
2110     }
2111   }
2112 
2113   // ... or return the size as a humongous chunk.
2114   return requested;
2115 }
2116 
2117 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
2118   return adjust_initial_chunk_size(requested, is_class());
2119 }
2120 
2121 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
2122   size_t requested;
2123 
2124   if (is_class()) {
2125     switch (type) {
2126     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
2127     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
2128     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
2129     default:                                 requested = ClassSmallChunk; break;
2130     }
2131   } else {
2132     switch (type) {
2133     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
2134     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
2135     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
2136     default:                                 requested = SmallChunk; break;
2137     }
2138   }
2139 
2140   // Adjust to one of the fixed chunk sizes (unless humongous)
2141   const size_t adjusted = adjust_initial_chunk_size(requested);
2142 
2143   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
2144          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
2145 
2146   return adjusted;
2147 }
2148 
2149 size_t SpaceManager::sum_free_in_chunks_in_use() const {
2150   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2151   size_t free = 0;
2152   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2153     Metachunk* chunk = chunks_in_use(i);
2154     while (chunk != NULL) {
2155       free += chunk->free_word_size();
2156       chunk = chunk->next();
2157     }
2158   }
2159   return free;
2160 }
2161 
2162 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
2163   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2164   size_t result = 0;
2165   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2166    result += sum_waste_in_chunks_in_use(i);
2167   }
2168 
2169   return result;
2170 }
2171 
2172 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
2173   size_t result = 0;
2174   Metachunk* chunk = chunks_in_use(index);
2175   // Count the free space in all the chunk but not the
2176   // current chunk from which allocations are still being done.
2177   while (chunk != NULL) {
2178     if (chunk != current_chunk()) {
2179       result += chunk->free_word_size();
2180     }
2181     chunk = chunk->next();
2182   }
2183   return result;
2184 }
2185 
2186 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2187   // For CMS use "allocated_chunks_words()" which does not need the
2188   // Metaspace lock.  For the other collectors sum over the
2189   // lists.  Use both methods as a check that "allocated_chunks_words()"
2190   // is correct.  That is, sum_capacity_in_chunks() is too expensive
2191   // to use in the product and allocated_chunks_words() should be used
2192   // but allow for  checking that allocated_chunks_words() returns the same
2193   // value as sum_capacity_in_chunks_in_use() which is the definitive
2194   // answer.
2195   if (UseConcMarkSweepGC) {
2196     return allocated_chunks_words();
2197   } else {
2198     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2199     size_t sum = 0;
2200     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2201       Metachunk* chunk = chunks_in_use(i);
2202       while (chunk != NULL) {
2203         sum += chunk->word_size();
2204         chunk = chunk->next();
2205       }
2206     }
2207   return sum;
2208   }
2209 }
2210 
2211 size_t SpaceManager::sum_count_in_chunks_in_use() {
2212   size_t count = 0;
2213   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2214     count = count + sum_count_in_chunks_in_use(i);
2215   }
2216 
2217   return count;
2218 }
2219 
2220 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2221   size_t count = 0;
2222   Metachunk* chunk = chunks_in_use(i);
2223   while (chunk != NULL) {
2224     count++;
2225     chunk = chunk->next();
2226   }
2227   return count;
2228 }
2229 
2230 
2231 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2232   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2233   size_t used = 0;
2234   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2235     Metachunk* chunk = chunks_in_use(i);
2236     while (chunk != NULL) {
2237       used += chunk->used_word_size();
2238       chunk = chunk->next();
2239     }
2240   }
2241   return used;
2242 }
2243 
2244 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2245 
2246   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2247     Metachunk* chunk = chunks_in_use(i);
2248     st->print("SpaceManager: %s " PTR_FORMAT,
2249                  chunk_size_name(i), p2i(chunk));
2250     if (chunk != NULL) {
2251       st->print_cr(" free " SIZE_FORMAT,
2252                    chunk->free_word_size());
2253     } else {
2254       st->cr();
2255     }
2256   }
2257 
2258   chunk_manager()->locked_print_free_chunks(st);
2259   chunk_manager()->locked_print_sum_free_chunks(st);
2260 }
2261 
2262 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2263 
2264   // Decide between a small chunk and a medium chunk.  Up to
2265   // _small_chunk_limit small chunks can be allocated.
2266   // After that a medium chunk is preferred.
2267   size_t chunk_word_size;
2268   if (chunks_in_use(MediumIndex) == NULL &&
2269       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2270     chunk_word_size = (size_t) small_chunk_size();
2271     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2272       chunk_word_size = medium_chunk_size();
2273     }
2274   } else {
2275     chunk_word_size = medium_chunk_size();
2276   }
2277 
2278   // Might still need a humongous chunk.  Enforce
2279   // humongous allocations sizes to be aligned up to
2280   // the smallest chunk size.
2281   size_t if_humongous_sized_chunk =
2282     align_up(word_size + Metachunk::overhead(),
2283                   smallest_chunk_size());
2284   chunk_word_size =
2285     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2286 
2287   assert(!SpaceManager::is_humongous(word_size) ||
2288          chunk_word_size == if_humongous_sized_chunk,
2289          "Size calculation is wrong, word_size " SIZE_FORMAT
2290          " chunk_word_size " SIZE_FORMAT,
2291          word_size, chunk_word_size);
2292   Log(gc, metaspace, alloc) log;
2293   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2294     log.debug("Metadata humongous allocation:");
2295     log.debug("  word_size " PTR_FORMAT, word_size);
2296     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2297     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2298   }
2299   return chunk_word_size;
2300 }
2301 
2302 void SpaceManager::track_metaspace_memory_usage() {
2303   if (is_init_completed()) {
2304     if (is_class()) {
2305       MemoryService::track_compressed_class_memory_usage();
2306     }
2307     MemoryService::track_metaspace_memory_usage();
2308   }
2309 }
2310 
2311 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2312   assert(vs_list()->current_virtual_space() != NULL,
2313          "Should have been set");
2314   assert(current_chunk() == NULL ||
2315          current_chunk()->allocate(word_size) == NULL,
2316          "Don't need to expand");
2317   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2318 
2319   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2320     size_t words_left = 0;
2321     size_t words_used = 0;
2322     if (current_chunk() != NULL) {
2323       words_left = current_chunk()->free_word_size();
2324       words_used = current_chunk()->used_word_size();
2325     }
2326     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
2327                                        word_size, words_used, words_left);
2328   }
2329 
2330   // Get another chunk
2331   size_t chunk_word_size = calc_chunk_size(word_size);
2332   Metachunk* next = get_new_chunk(chunk_word_size);
2333 
2334   MetaWord* mem = NULL;
2335 
2336   // If a chunk was available, add it to the in-use chunk list
2337   // and do an allocation from it.
2338   if (next != NULL) {
2339     // Add to this manager's list of chunks in use.
2340     add_chunk(next, false);
2341     mem = next->allocate(word_size);
2342   }
2343 
2344   // Track metaspace memory usage statistic.
2345   track_metaspace_memory_usage();
2346 
2347   return mem;
2348 }
2349 
2350 void SpaceManager::print_on(outputStream* st) const {
2351 
2352   for (ChunkIndex i = ZeroIndex;
2353        i < NumberOfInUseLists ;
2354        i = next_chunk_index(i) ) {
2355     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2356                  p2i(chunks_in_use(i)),
2357                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2358   }
2359   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2360                " Humongous " SIZE_FORMAT,
2361                sum_waste_in_chunks_in_use(SmallIndex),
2362                sum_waste_in_chunks_in_use(MediumIndex),
2363                sum_waste_in_chunks_in_use(HumongousIndex));
2364   // block free lists
2365   if (block_freelists() != NULL) {
2366     st->print_cr("total in block free lists " SIZE_FORMAT,
2367       block_freelists()->total_size());
2368   }
2369 }
2370 
2371 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2372                            Mutex* lock) :
2373   _mdtype(mdtype),
2374   _allocated_blocks_words(0),
2375   _allocated_chunks_words(0),
2376   _allocated_chunks_count(0),
2377   _block_freelists(NULL),
2378   _lock(lock)
2379 {
2380   initialize();
2381 }
2382 
2383 void SpaceManager::inc_size_metrics(size_t words) {
2384   assert_lock_strong(SpaceManager::expand_lock());
2385   // Total of allocated Metachunks and allocated Metachunks count
2386   // for each SpaceManager
2387   _allocated_chunks_words = _allocated_chunks_words + words;
2388   _allocated_chunks_count++;
2389   // Global total of capacity in allocated Metachunks
2390   MetaspaceAux::inc_capacity(mdtype(), words);
2391   // Global total of allocated Metablocks.
2392   // used_words_slow() includes the overhead in each
2393   // Metachunk so include it in the used when the
2394   // Metachunk is first added (so only added once per
2395   // Metachunk).
2396   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2397 }
2398 
2399 void SpaceManager::inc_used_metrics(size_t words) {
2400   // Add to the per SpaceManager total
2401   Atomic::add_ptr(words, &_allocated_blocks_words);
2402   // Add to the global total
2403   MetaspaceAux::inc_used(mdtype(), words);
2404 }
2405 
2406 void SpaceManager::dec_total_from_size_metrics() {
2407   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2408   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2409   // Also deduct the overhead per Metachunk
2410   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2411 }
2412 
2413 void SpaceManager::initialize() {
2414   Metadebug::init_allocation_fail_alot_count();
2415   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2416     _chunks_in_use[i] = NULL;
2417   }
2418   _current_chunk = NULL;
2419   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2420 }
2421 
2422 SpaceManager::~SpaceManager() {
2423   // This call this->_lock which can't be done while holding expand_lock()
2424   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2425          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2426          " allocated_chunks_words() " SIZE_FORMAT,
2427          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2428 
2429   MutexLockerEx fcl(SpaceManager::expand_lock(),
2430                     Mutex::_no_safepoint_check_flag);
2431 
2432   chunk_manager()->slow_locked_verify();
2433 
2434   dec_total_from_size_metrics();
2435 
2436   Log(gc, metaspace, freelist) log;
2437   if (log.is_trace()) {
2438     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2439     ResourceMark rm;
2440     LogStream ls(log.trace());
2441     locked_print_chunks_in_use_on(&ls);
2442     if (block_freelists() != NULL) {
2443       block_freelists()->print_on(&ls);
2444     }
2445   }
2446 
2447   // Add all the chunks in use by this space manager
2448   // to the global list of free chunks.
2449 
2450   // Follow each list of chunks-in-use and add them to the
2451   // free lists.  Each list is NULL terminated.
2452 
2453   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2454     Metachunk* chunks = chunks_in_use(i);
2455     chunk_manager()->return_chunk_list(i, chunks);
2456     set_chunks_in_use(i, NULL);
2457   }
2458 
2459   chunk_manager()->slow_locked_verify();
2460 
2461   if (_block_freelists != NULL) {
2462     delete _block_freelists;
2463   }
2464 }
2465 
2466 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2467   assert_lock_strong(_lock);
2468   // Allocations and deallocations are in raw_word_size
2469   size_t raw_word_size = get_allocation_word_size(word_size);
2470   // Lazily create a block_freelist
2471   if (block_freelists() == NULL) {
2472     _block_freelists = new BlockFreelist();
2473   }
2474   block_freelists()->return_block(p, raw_word_size);
2475 }
2476 
2477 // Adds a chunk to the list of chunks in use.
2478 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2479 
2480   assert(new_chunk != NULL, "Should not be NULL");
2481   assert(new_chunk->next() == NULL, "Should not be on a list");
2482 
2483   new_chunk->reset_empty();
2484 
2485   // Find the correct list and and set the current
2486   // chunk for that list.
2487   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2488 
2489   if (index != HumongousIndex) {
2490     retire_current_chunk();
2491     set_current_chunk(new_chunk);
2492     new_chunk->set_next(chunks_in_use(index));
2493     set_chunks_in_use(index, new_chunk);
2494   } else {
2495     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2496     // small, so small will be null.  Link this first chunk as the current
2497     // chunk.
2498     if (make_current) {
2499       // Set as the current chunk but otherwise treat as a humongous chunk.
2500       set_current_chunk(new_chunk);
2501     }
2502     // Link at head.  The _current_chunk only points to a humongous chunk for
2503     // the null class loader metaspace (class and data virtual space managers)
2504     // any humongous chunks so will not point to the tail
2505     // of the humongous chunks list.
2506     new_chunk->set_next(chunks_in_use(HumongousIndex));
2507     set_chunks_in_use(HumongousIndex, new_chunk);
2508 
2509     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2510   }
2511 
2512   // Add to the running sum of capacity
2513   inc_size_metrics(new_chunk->word_size());
2514 
2515   assert(new_chunk->is_empty(), "Not ready for reuse");
2516   Log(gc, metaspace, freelist) log;
2517   if (log.is_trace()) {
2518     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2519     ResourceMark rm;
2520     LogStream ls(log.trace());
2521     new_chunk->print_on(&ls);
2522     chunk_manager()->locked_print_free_chunks(&ls);
2523   }
2524 }
2525 
2526 void SpaceManager::retire_current_chunk() {
2527   if (current_chunk() != NULL) {
2528     size_t remaining_words = current_chunk()->free_word_size();
2529     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2530       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2531       deallocate(ptr, remaining_words);
2532       inc_used_metrics(remaining_words);
2533     }
2534   }
2535 }
2536 
2537 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2538   // Get a chunk from the chunk freelist
2539   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2540 
2541   if (next == NULL) {
2542     next = vs_list()->get_new_chunk(chunk_word_size,
2543                                     medium_chunk_bunch());
2544   }
2545 
2546   Log(gc, metaspace, alloc) log;
2547   if (log.is_debug() && next != NULL &&
2548       SpaceManager::is_humongous(next->word_size())) {
2549     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
2550   }
2551 
2552   return next;
2553 }
2554 
2555 /*
2556  * The policy is to allocate up to _small_chunk_limit small chunks
2557  * after which only medium chunks are allocated.  This is done to
2558  * reduce fragmentation.  In some cases, this can result in a lot
2559  * of small chunks being allocated to the point where it's not
2560  * possible to expand.  If this happens, there may be no medium chunks
2561  * available and OOME would be thrown.  Instead of doing that,
2562  * if the allocation request size fits in a small chunk, an attempt
2563  * will be made to allocate a small chunk.
2564  */
2565 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2566   size_t raw_word_size = get_allocation_word_size(word_size);
2567 
2568   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2569     return NULL;
2570   }
2571 
2572   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2573   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2574 
2575   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2576 
2577   MetaWord* mem = NULL;
2578 
2579   if (chunk != NULL) {
2580     // Add chunk to the in-use chunk list and do an allocation from it.
2581     // Add to this manager's list of chunks in use.
2582     add_chunk(chunk, false);
2583     mem = chunk->allocate(raw_word_size);
2584 
2585     inc_used_metrics(raw_word_size);
2586 
2587     // Track metaspace memory usage statistic.
2588     track_metaspace_memory_usage();
2589   }
2590 
2591   return mem;
2592 }
2593 
2594 MetaWord* SpaceManager::allocate(size_t word_size) {
2595   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2596   size_t raw_word_size = get_allocation_word_size(word_size);
2597   BlockFreelist* fl =  block_freelists();
2598   MetaWord* p = NULL;
2599   // Allocation from the dictionary is expensive in the sense that
2600   // the dictionary has to be searched for a size.  Don't allocate
2601   // from the dictionary until it starts to get fat.  Is this
2602   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2603   // for allocations.  Do some profiling.  JJJ
2604   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2605     p = fl->get_block(raw_word_size);
2606   }
2607   if (p == NULL) {
2608     p = allocate_work(raw_word_size);
2609   }
2610 
2611   return p;
2612 }
2613 
2614 // Returns the address of spaced allocated for "word_size".
2615 // This methods does not know about blocks (Metablocks)
2616 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2617   assert_lock_strong(_lock);
2618 #ifdef ASSERT
2619   if (Metadebug::test_metadata_failure()) {
2620     return NULL;
2621   }
2622 #endif
2623   // Is there space in the current chunk?
2624   MetaWord* result = NULL;
2625 
2626   if (current_chunk() != NULL) {
2627     result = current_chunk()->allocate(word_size);
2628   }
2629 
2630   if (result == NULL) {
2631     result = grow_and_allocate(word_size);
2632   }
2633 
2634   if (result != NULL) {
2635     inc_used_metrics(word_size);
2636     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2637            "Head of the list is being allocated");
2638   }
2639 
2640   return result;
2641 }
2642 
2643 void SpaceManager::verify() {
2644   // If there are blocks in the dictionary, then
2645   // verification of chunks does not work since
2646   // being in the dictionary alters a chunk.
2647   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2648     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2649       Metachunk* curr = chunks_in_use(i);
2650       while (curr != NULL) {
2651         curr->verify();
2652         verify_chunk_size(curr);
2653         curr = curr->next();
2654       }
2655     }
2656   }
2657 }
2658 
2659 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2660   assert(is_humongous(chunk->word_size()) ||
2661          chunk->word_size() == medium_chunk_size() ||
2662          chunk->word_size() == small_chunk_size() ||
2663          chunk->word_size() == specialized_chunk_size(),
2664          "Chunk size is wrong");
2665   return;
2666 }
2667 
2668 #ifdef ASSERT
2669 void SpaceManager::verify_allocated_blocks_words() {
2670   // Verification is only guaranteed at a safepoint.
2671   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2672     "Verification can fail if the applications is running");
2673   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2674          "allocation total is not consistent " SIZE_FORMAT
2675          " vs " SIZE_FORMAT,
2676          allocated_blocks_words(), sum_used_in_chunks_in_use());
2677 }
2678 
2679 #endif
2680 
2681 void SpaceManager::dump(outputStream* const out) const {
2682   size_t curr_total = 0;
2683   size_t waste = 0;
2684   uint i = 0;
2685   size_t used = 0;
2686   size_t capacity = 0;
2687 
2688   // Add up statistics for all chunks in this SpaceManager.
2689   for (ChunkIndex index = ZeroIndex;
2690        index < NumberOfInUseLists;
2691        index = next_chunk_index(index)) {
2692     for (Metachunk* curr = chunks_in_use(index);
2693          curr != NULL;
2694          curr = curr->next()) {
2695       out->print("%d) ", i++);
2696       curr->print_on(out);
2697       curr_total += curr->word_size();
2698       used += curr->used_word_size();
2699       capacity += curr->word_size();
2700       waste += curr->free_word_size() + curr->overhead();;
2701     }
2702   }
2703 
2704   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2705     if (block_freelists() != NULL) block_freelists()->print_on(out);
2706   }
2707 
2708   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2709   // Free space isn't wasted.
2710   waste -= free;
2711 
2712   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2713                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2714                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2715 }
2716 
2717 // MetaspaceAux
2718 
2719 
2720 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2721 size_t MetaspaceAux::_used_words[] = {0, 0};
2722 
2723 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2724   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2725   return list == NULL ? 0 : list->free_bytes();
2726 }
2727 
2728 size_t MetaspaceAux::free_bytes() {
2729   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2730 }
2731 
2732 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2733   assert_lock_strong(SpaceManager::expand_lock());
2734   assert(words <= capacity_words(mdtype),
2735          "About to decrement below 0: words " SIZE_FORMAT
2736          " is greater than _capacity_words[%u] " SIZE_FORMAT,
2737          words, mdtype, capacity_words(mdtype));
2738   _capacity_words[mdtype] -= words;
2739 }
2740 
2741 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2742   assert_lock_strong(SpaceManager::expand_lock());
2743   // Needs to be atomic
2744   _capacity_words[mdtype] += words;
2745 }
2746 
2747 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2748   assert(words <= used_words(mdtype),
2749          "About to decrement below 0: words " SIZE_FORMAT
2750          " is greater than _used_words[%u] " SIZE_FORMAT,
2751          words, mdtype, used_words(mdtype));
2752   // For CMS deallocation of the Metaspaces occurs during the
2753   // sweep which is a concurrent phase.  Protection by the expand_lock()
2754   // is not enough since allocation is on a per Metaspace basis
2755   // and protected by the Metaspace lock.
2756   jlong minus_words = (jlong) - (jlong) words;
2757   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2758 }
2759 
2760 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2761   // _used_words tracks allocations for
2762   // each piece of metadata.  Those allocations are
2763   // generally done concurrently by different application
2764   // threads so must be done atomically.
2765   Atomic::add_ptr(words, &_used_words[mdtype]);
2766 }
2767 
2768 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2769   size_t used = 0;
2770   ClassLoaderDataGraphMetaspaceIterator iter;
2771   while (iter.repeat()) {
2772     Metaspace* msp = iter.get_next();
2773     // Sum allocated_blocks_words for each metaspace
2774     if (msp != NULL) {
2775       used += msp->used_words_slow(mdtype);
2776     }
2777   }
2778   return used * BytesPerWord;
2779 }
2780 
2781 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2782   size_t free = 0;
2783   ClassLoaderDataGraphMetaspaceIterator iter;
2784   while (iter.repeat()) {
2785     Metaspace* msp = iter.get_next();
2786     if (msp != NULL) {
2787       free += msp->free_words_slow(mdtype);
2788     }
2789   }
2790   return free * BytesPerWord;
2791 }
2792 
2793 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2794   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2795     return 0;
2796   }
2797   // Don't count the space in the freelists.  That space will be
2798   // added to the capacity calculation as needed.
2799   size_t capacity = 0;
2800   ClassLoaderDataGraphMetaspaceIterator iter;
2801   while (iter.repeat()) {
2802     Metaspace* msp = iter.get_next();
2803     if (msp != NULL) {
2804       capacity += msp->capacity_words_slow(mdtype);
2805     }
2806   }
2807   return capacity * BytesPerWord;
2808 }
2809 
2810 size_t MetaspaceAux::capacity_bytes_slow() {
2811 #ifdef PRODUCT
2812   // Use capacity_bytes() in PRODUCT instead of this function.
2813   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2814 #endif
2815   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2816   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2817   assert(capacity_bytes() == class_capacity + non_class_capacity,
2818          "bad accounting: capacity_bytes() " SIZE_FORMAT
2819          " class_capacity + non_class_capacity " SIZE_FORMAT
2820          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2821          capacity_bytes(), class_capacity + non_class_capacity,
2822          class_capacity, non_class_capacity);
2823 
2824   return class_capacity + non_class_capacity;
2825 }
2826 
2827 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2828   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2829   return list == NULL ? 0 : list->reserved_bytes();
2830 }
2831 
2832 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2833   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2834   return list == NULL ? 0 : list->committed_bytes();
2835 }
2836 
2837 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2838 
2839 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2840   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2841   if (chunk_manager == NULL) {
2842     return 0;
2843   }
2844   chunk_manager->slow_verify();
2845   return chunk_manager->free_chunks_total_words();
2846 }
2847 
2848 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2849   return free_chunks_total_words(mdtype) * BytesPerWord;
2850 }
2851 
2852 size_t MetaspaceAux::free_chunks_total_words() {
2853   return free_chunks_total_words(Metaspace::ClassType) +
2854          free_chunks_total_words(Metaspace::NonClassType);
2855 }
2856 
2857 size_t MetaspaceAux::free_chunks_total_bytes() {
2858   return free_chunks_total_words() * BytesPerWord;
2859 }
2860 
2861 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2862   return Metaspace::get_chunk_manager(mdtype) != NULL;
2863 }
2864 
2865 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2866   if (!has_chunk_free_list(mdtype)) {
2867     return MetaspaceChunkFreeListSummary();
2868   }
2869 
2870   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2871   return cm->chunk_free_list_summary();
2872 }
2873 
2874 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2875   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2876                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
2877 }
2878 
2879 void MetaspaceAux::print_on(outputStream* out) {
2880   Metaspace::MetadataType nct = Metaspace::NonClassType;
2881 
2882   out->print_cr(" Metaspace       "
2883                 "used "      SIZE_FORMAT "K, "
2884                 "capacity "  SIZE_FORMAT "K, "
2885                 "committed " SIZE_FORMAT "K, "
2886                 "reserved "  SIZE_FORMAT "K",
2887                 used_bytes()/K,
2888                 capacity_bytes()/K,
2889                 committed_bytes()/K,
2890                 reserved_bytes()/K);
2891 
2892   if (Metaspace::using_class_space()) {
2893     Metaspace::MetadataType ct = Metaspace::ClassType;
2894     out->print_cr("  class space    "
2895                   "used "      SIZE_FORMAT "K, "
2896                   "capacity "  SIZE_FORMAT "K, "
2897                   "committed " SIZE_FORMAT "K, "
2898                   "reserved "  SIZE_FORMAT "K",
2899                   used_bytes(ct)/K,
2900                   capacity_bytes(ct)/K,
2901                   committed_bytes(ct)/K,
2902                   reserved_bytes(ct)/K);
2903   }
2904 }
2905 
2906 // Print information for class space and data space separately.
2907 // This is almost the same as above.
2908 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2909   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2910   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2911   size_t used_bytes = used_bytes_slow(mdtype);
2912   size_t free_bytes = free_bytes_slow(mdtype);
2913   size_t used_and_free = used_bytes + free_bytes +
2914                            free_chunks_capacity_bytes;
2915   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2916              "K + unused in chunks " SIZE_FORMAT "K  + "
2917              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2918              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2919              used_bytes / K,
2920              free_bytes / K,
2921              free_chunks_capacity_bytes / K,
2922              used_and_free / K,
2923              capacity_bytes / K);
2924   // Accounting can only be correct if we got the values during a safepoint
2925   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2926 }
2927 
2928 // Print total fragmentation for class metaspaces
2929 void MetaspaceAux::print_class_waste(outputStream* out) {
2930   assert(Metaspace::using_class_space(), "class metaspace not used");
2931   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2932   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2933   ClassLoaderDataGraphMetaspaceIterator iter;
2934   while (iter.repeat()) {
2935     Metaspace* msp = iter.get_next();
2936     if (msp != NULL) {
2937       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2938       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2939       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2940       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2941       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2942       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2943       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2944     }
2945   }
2946   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2947                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2948                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2949                 "large count " SIZE_FORMAT,
2950                 cls_specialized_count, cls_specialized_waste,
2951                 cls_small_count, cls_small_waste,
2952                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2953 }
2954 
2955 // Print total fragmentation for data and class metaspaces separately
2956 void MetaspaceAux::print_waste(outputStream* out) {
2957   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2958   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2959 
2960   ClassLoaderDataGraphMetaspaceIterator iter;
2961   while (iter.repeat()) {
2962     Metaspace* msp = iter.get_next();
2963     if (msp != NULL) {
2964       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2965       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2966       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2967       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2968       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2969       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2970       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2971     }
2972   }
2973   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2974   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2975                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2976                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2977                         "large count " SIZE_FORMAT,
2978              specialized_count, specialized_waste, small_count,
2979              small_waste, medium_count, medium_waste, humongous_count);
2980   if (Metaspace::using_class_space()) {
2981     print_class_waste(out);
2982   }
2983 }
2984 
2985 // Dump global metaspace things from the end of ClassLoaderDataGraph
2986 void MetaspaceAux::dump(outputStream* out) {
2987   out->print_cr("All Metaspace:");
2988   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2989   out->print("class space: "); print_on(out, Metaspace::ClassType);
2990   print_waste(out);
2991 }
2992 
2993 void MetaspaceAux::verify_free_chunks() {
2994   Metaspace::chunk_manager_metadata()->verify();
2995   if (Metaspace::using_class_space()) {
2996     Metaspace::chunk_manager_class()->verify();
2997   }
2998 }
2999 
3000 void MetaspaceAux::verify_capacity() {
3001 #ifdef ASSERT
3002   size_t running_sum_capacity_bytes = capacity_bytes();
3003   // For purposes of the running sum of capacity, verify against capacity
3004   size_t capacity_in_use_bytes = capacity_bytes_slow();
3005   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
3006          "capacity_words() * BytesPerWord " SIZE_FORMAT
3007          " capacity_bytes_slow()" SIZE_FORMAT,
3008          running_sum_capacity_bytes, capacity_in_use_bytes);
3009   for (Metaspace::MetadataType i = Metaspace::ClassType;
3010        i < Metaspace:: MetadataTypeCount;
3011        i = (Metaspace::MetadataType)(i + 1)) {
3012     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
3013     assert(capacity_bytes(i) == capacity_in_use_bytes,
3014            "capacity_bytes(%u) " SIZE_FORMAT
3015            " capacity_bytes_slow(%u)" SIZE_FORMAT,
3016            i, capacity_bytes(i), i, capacity_in_use_bytes);
3017   }
3018 #endif
3019 }
3020 
3021 void MetaspaceAux::verify_used() {
3022 #ifdef ASSERT
3023   size_t running_sum_used_bytes = used_bytes();
3024   // For purposes of the running sum of used, verify against used
3025   size_t used_in_use_bytes = used_bytes_slow();
3026   assert(used_bytes() == used_in_use_bytes,
3027          "used_bytes() " SIZE_FORMAT
3028          " used_bytes_slow()" SIZE_FORMAT,
3029          used_bytes(), used_in_use_bytes);
3030   for (Metaspace::MetadataType i = Metaspace::ClassType;
3031        i < Metaspace:: MetadataTypeCount;
3032        i = (Metaspace::MetadataType)(i + 1)) {
3033     size_t used_in_use_bytes = used_bytes_slow(i);
3034     assert(used_bytes(i) == used_in_use_bytes,
3035            "used_bytes(%u) " SIZE_FORMAT
3036            " used_bytes_slow(%u)" SIZE_FORMAT,
3037            i, used_bytes(i), i, used_in_use_bytes);
3038   }
3039 #endif
3040 }
3041 
3042 void MetaspaceAux::verify_metrics() {
3043   verify_capacity();
3044   verify_used();
3045 }
3046 
3047 
3048 // Metaspace methods
3049 
3050 size_t Metaspace::_first_chunk_word_size = 0;
3051 size_t Metaspace::_first_class_chunk_word_size = 0;
3052 
3053 size_t Metaspace::_commit_alignment = 0;
3054 size_t Metaspace::_reserve_alignment = 0;
3055 
3056 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
3057   initialize(lock, type);
3058 }
3059 
3060 Metaspace::~Metaspace() {
3061   delete _vsm;
3062   if (using_class_space()) {
3063     delete _class_vsm;
3064   }
3065 }
3066 
3067 VirtualSpaceList* Metaspace::_space_list = NULL;
3068 VirtualSpaceList* Metaspace::_class_space_list = NULL;
3069 
3070 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3071 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3072 
3073 #define VIRTUALSPACEMULTIPLIER 2
3074 
3075 #ifdef _LP64
3076 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3077 
3078 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3079   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
3080   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
3081   // narrow_klass_base is the lower of the metaspace base and the cds base
3082   // (if cds is enabled).  The narrow_klass_shift depends on the distance
3083   // between the lower base and higher address.
3084   address lower_base;
3085   address higher_address;
3086 #if INCLUDE_CDS
3087   if (UseSharedSpaces) {
3088     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
3089                           (address)(metaspace_base + compressed_class_space_size()));
3090     lower_base = MIN2(metaspace_base, cds_base);
3091   } else
3092 #endif
3093   {
3094     higher_address = metaspace_base + compressed_class_space_size();
3095     lower_base = metaspace_base;
3096 
3097     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3098     // If compressed class space fits in lower 32G, we don't need a base.
3099     if (higher_address <= (address)klass_encoding_max) {
3100       lower_base = 0; // Effectively lower base is zero.
3101     }
3102   }
3103 
3104   Universe::set_narrow_klass_base(lower_base);
3105 
3106   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3107     Universe::set_narrow_klass_shift(0);
3108   } else {
3109     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3110     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3111   }
3112   AOTLoader::set_narrow_klass_shift();
3113 }
3114 
3115 #if INCLUDE_CDS
3116 // Return TRUE if the specified metaspace_base and cds_base are close enough
3117 // to work with compressed klass pointers.
3118 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3119   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3120   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3121   address lower_base = MIN2((address)metaspace_base, cds_base);
3122   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
3123                                 (address)(metaspace_base + compressed_class_space_size()));
3124   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3125 }
3126 #endif
3127 
3128 // Try to allocate the metaspace at the requested addr.
3129 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3130   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
3131   assert(using_class_space(), "called improperly");
3132   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3133   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3134          "Metaspace size is too big");
3135   assert_is_aligned(requested_addr, _reserve_alignment);
3136   assert_is_aligned(cds_base, _reserve_alignment);
3137   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3138 
3139   // Don't use large pages for the class space.
3140   bool large_pages = false;
3141 
3142 #if !(defined(AARCH64) || defined(AIX))
3143   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3144                                              _reserve_alignment,
3145                                              large_pages,
3146                                              requested_addr);
3147 #else // AARCH64
3148   ReservedSpace metaspace_rs;
3149 
3150   // Our compressed klass pointers may fit nicely into the lower 32
3151   // bits.
3152   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3153     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3154                                  _reserve_alignment,
3155                                  large_pages,
3156                                  requested_addr);
3157   }
3158 
3159   if (! metaspace_rs.is_reserved()) {
3160     // Aarch64: Try to align metaspace so that we can decode a compressed
3161     // klass with a single MOVK instruction.  We can do this iff the
3162     // compressed class base is a multiple of 4G.
3163     // Aix: Search for a place where we can find memory. If we need to load
3164     // the base, 4G alignment is helpful, too.
3165     size_t increment = AARCH64_ONLY(4*)G;
3166     for (char *a = align_up(requested_addr, increment);
3167          a < (char*)(1024*G);
3168          a += increment) {
3169       if (a == (char *)(32*G)) {
3170         // Go faster from here on. Zero-based is no longer possible.
3171         increment = 4*G;
3172       }
3173 
3174 #if INCLUDE_CDS
3175       if (UseSharedSpaces
3176           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3177         // We failed to find an aligned base that will reach.  Fall
3178         // back to using our requested addr.
3179         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3180                                      _reserve_alignment,
3181                                      large_pages,
3182                                      requested_addr);
3183         break;
3184       }
3185 #endif
3186 
3187       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3188                                    _reserve_alignment,
3189                                    large_pages,
3190                                    a);
3191       if (metaspace_rs.is_reserved())
3192         break;
3193     }
3194   }
3195 
3196 #endif // AARCH64
3197 
3198   if (!metaspace_rs.is_reserved()) {
3199 #if INCLUDE_CDS
3200     if (UseSharedSpaces) {
3201       size_t increment = align_up(1*G, _reserve_alignment);
3202 
3203       // Keep trying to allocate the metaspace, increasing the requested_addr
3204       // by 1GB each time, until we reach an address that will no longer allow
3205       // use of CDS with compressed klass pointers.
3206       char *addr = requested_addr;
3207       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3208              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3209         addr = addr + increment;
3210         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3211                                      _reserve_alignment, large_pages, addr);
3212       }
3213     }
3214 #endif
3215     // If no successful allocation then try to allocate the space anywhere.  If
3216     // that fails then OOM doom.  At this point we cannot try allocating the
3217     // metaspace as if UseCompressedClassPointers is off because too much
3218     // initialization has happened that depends on UseCompressedClassPointers.
3219     // So, UseCompressedClassPointers cannot be turned off at this point.
3220     if (!metaspace_rs.is_reserved()) {
3221       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3222                                    _reserve_alignment, large_pages);
3223       if (!metaspace_rs.is_reserved()) {
3224         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3225                                               compressed_class_space_size()));
3226       }
3227     }
3228   }
3229 
3230   // If we got here then the metaspace got allocated.
3231   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3232 
3233 #if INCLUDE_CDS
3234   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3235   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3236     FileMapInfo::stop_sharing_and_unmap(
3237         "Could not allocate metaspace at a compatible address");
3238   }
3239 #endif
3240   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3241                                   UseSharedSpaces ? (address)cds_base : 0);
3242 
3243   initialize_class_space(metaspace_rs);
3244 
3245   LogTarget(Trace, gc, metaspace) lt;
3246   if (lt.is_enabled()) {
3247     ResourceMark rm;
3248     LogStream ls(lt);
3249     print_compressed_class_space(&ls, requested_addr);
3250   }
3251 }
3252 
3253 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3254   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3255                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3256   if (_class_space_list != NULL) {
3257     address base = (address)_class_space_list->current_virtual_space()->bottom();
3258     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3259                  compressed_class_space_size(), p2i(base));
3260     if (requested_addr != 0) {
3261       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3262     }
3263     st->cr();
3264   }
3265 }
3266 
3267 // For UseCompressedClassPointers the class space is reserved above the top of
3268 // the Java heap.  The argument passed in is at the base of the compressed space.
3269 void Metaspace::initialize_class_space(ReservedSpace rs) {
3270   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3271   assert(rs.size() >= CompressedClassSpaceSize,
3272          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3273   assert(using_class_space(), "Must be using class space");
3274   _class_space_list = new VirtualSpaceList(rs);
3275   _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3276 
3277   if (!_class_space_list->initialization_succeeded()) {
3278     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3279   }
3280 }
3281 
3282 #endif
3283 
3284 void Metaspace::ergo_initialize() {
3285   if (DumpSharedSpaces) {
3286     // Using large pages when dumping the shared archive is currently not implemented.
3287     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3288   }
3289 
3290   size_t page_size = os::vm_page_size();
3291   if (UseLargePages && UseLargePagesInMetaspace) {
3292     page_size = os::large_page_size();
3293   }
3294 
3295   _commit_alignment  = page_size;
3296   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3297 
3298   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3299   // override if MaxMetaspaceSize was set on the command line or not.
3300   // This information is needed later to conform to the specification of the
3301   // java.lang.management.MemoryUsage API.
3302   //
3303   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3304   // globals.hpp to the aligned value, but this is not possible, since the
3305   // alignment depends on other flags being parsed.
3306   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3307 
3308   if (MetaspaceSize > MaxMetaspaceSize) {
3309     MetaspaceSize = MaxMetaspaceSize;
3310   }
3311 
3312   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3313 
3314   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3315 
3316   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3317   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3318 
3319   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3320   set_compressed_class_space_size(CompressedClassSpaceSize);
3321 }
3322 
3323 void Metaspace::global_initialize() {
3324   MetaspaceGC::initialize();
3325 
3326 #if INCLUDE_CDS
3327   if (DumpSharedSpaces) {
3328     MetaspaceShared::initialize_shared_rs();
3329   } else if (UseSharedSpaces) {
3330     // If using shared space, open the file that contains the shared space
3331     // and map in the memory before initializing the rest of metaspace (so
3332     // the addresses don't conflict)
3333     address cds_address = NULL;
3334     FileMapInfo* mapinfo = new FileMapInfo();
3335 
3336     // Open the shared archive file, read and validate the header. If
3337     // initialization fails, shared spaces [UseSharedSpaces] are
3338     // disabled and the file is closed.
3339     // Map in spaces now also
3340     if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3341       size_t cds_total = MetaspaceShared::core_spaces_size();
3342       cds_address = (address)mapinfo->header()->region_addr(0);
3343 #ifdef _LP64
3344       if (using_class_space()) {
3345         char* cds_end = (char*)(cds_address + cds_total);
3346         cds_end = (char *)align_up(cds_end, _reserve_alignment);
3347         // If UseCompressedClassPointers is set then allocate the metaspace area
3348         // above the heap and above the CDS area (if it exists).
3349         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3350         // map_heap_regions() compares the current narrow oop and klass encodings
3351         // with the archived ones, so it must be done after all encodings are determined.
3352         mapinfo->map_heap_regions();
3353       }
3354 #endif // _LP64
3355     } else {
3356       assert(!mapinfo->is_open() && !UseSharedSpaces,
3357              "archive file not closed or shared spaces not disabled.");
3358     }
3359   }
3360 #endif // INCLUDE_CDS
3361 
3362 #ifdef _LP64
3363   if (!UseSharedSpaces && using_class_space()) {
3364     if (DumpSharedSpaces) {
3365       // Already initialized inside MetaspaceShared::initialize_shared_rs()
3366     } else {
3367       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3368       allocate_metaspace_compressed_klass_ptrs(base, 0);
3369     }
3370   }
3371 #endif // _LP64
3372 
3373   // Initialize these before initializing the VirtualSpaceList
3374   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3375   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3376   // Make the first class chunk bigger than a medium chunk so it's not put
3377   // on the medium chunk list.   The next chunk will be small and progress
3378   // from there.  This size calculated by -version.
3379   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3380                                      (CompressedClassSpaceSize/BytesPerWord)*2);
3381   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3382   // Arbitrarily set the initial virtual space to a multiple
3383   // of the boot class loader size.
3384   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3385   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3386 
3387   // Initialize the list of virtual spaces.
3388   _space_list = new VirtualSpaceList(word_size);
3389   _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3390 
3391   if (!_space_list->initialization_succeeded()) {
3392     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3393   }
3394 
3395   _tracer = new MetaspaceTracer();
3396 }
3397 
3398 void Metaspace::post_initialize() {
3399   MetaspaceGC::post_initialize();
3400 }
3401 
3402 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3403   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3404   if (chunk != NULL) {
3405     // Add to this manager's list of chunks in use and current_chunk().
3406     get_space_manager(mdtype)->add_chunk(chunk, true);
3407   }
3408 }
3409 
3410 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3411   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3412 
3413   // Get a chunk from the chunk freelist
3414   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3415 
3416   if (chunk == NULL) {
3417     chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3418                                                   get_space_manager(mdtype)->medium_chunk_bunch());
3419   }
3420 
3421   return chunk;
3422 }
3423 
3424 void Metaspace::verify_global_initialization() {
3425   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3426   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3427 
3428   if (using_class_space()) {
3429     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3430     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3431   }
3432 }
3433 
3434 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3435   verify_global_initialization();
3436 
3437   // Allocate SpaceManager for metadata objects.
3438   _vsm = new SpaceManager(NonClassType, lock);
3439 
3440   if (using_class_space()) {
3441     // Allocate SpaceManager for classes.
3442     _class_vsm = new SpaceManager(ClassType, lock);
3443   }
3444 
3445   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3446 
3447   // Allocate chunk for metadata objects
3448   initialize_first_chunk(type, NonClassType);
3449 
3450   // Allocate chunk for class metadata objects
3451   if (using_class_space()) {
3452     initialize_first_chunk(type, ClassType);
3453   }
3454 }
3455 
3456 size_t Metaspace::align_word_size_up(size_t word_size) {
3457   size_t byte_size = word_size * wordSize;
3458   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3459 }
3460 
3461 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3462   assert(!_frozen, "sanity");
3463   // Don't use class_vsm() unless UseCompressedClassPointers is true.
3464   if (is_class_space_allocation(mdtype)) {
3465     return  class_vsm()->allocate(word_size);
3466   } else {
3467     return  vsm()->allocate(word_size);
3468   }
3469 }
3470 
3471 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3472   assert(!_frozen, "sanity");
3473   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3474   assert(delta_bytes > 0, "Must be");
3475 
3476   size_t before = 0;
3477   size_t after = 0;
3478   MetaWord* res;
3479   bool incremented;
3480 
3481   // Each thread increments the HWM at most once. Even if the thread fails to increment
3482   // the HWM, an allocation is still attempted. This is because another thread must then
3483   // have incremented the HWM and therefore the allocation might still succeed.
3484   do {
3485     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3486     res = allocate(word_size, mdtype);
3487   } while (!incremented && res == NULL);
3488 
3489   if (incremented) {
3490     tracer()->report_gc_threshold(before, after,
3491                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3492     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
3493   }
3494 
3495   return res;
3496 }
3497 
3498 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3499   if (mdtype == ClassType) {
3500     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3501   } else {
3502     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3503   }
3504 }
3505 
3506 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3507   assert(!_frozen, "sanity");
3508   if (mdtype == ClassType) {
3509     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3510   } else {
3511     return vsm()->sum_free_in_chunks_in_use();
3512   }
3513 }
3514 
3515 // Space capacity in the Metaspace.  It includes
3516 // space in the list of chunks from which allocations
3517 // have been made. Don't include space in the global freelist and
3518 // in the space available in the dictionary which
3519 // is already counted in some chunk.
3520 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3521   if (mdtype == ClassType) {
3522     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3523   } else {
3524     return vsm()->sum_capacity_in_chunks_in_use();
3525   }
3526 }
3527 
3528 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3529   return used_words_slow(mdtype) * BytesPerWord;
3530 }
3531 
3532 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3533   return capacity_words_slow(mdtype) * BytesPerWord;
3534 }
3535 
3536 size_t Metaspace::allocated_blocks_bytes() const {
3537   return vsm()->allocated_blocks_bytes() +
3538       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3539 }
3540 
3541 size_t Metaspace::allocated_chunks_bytes() const {
3542   return vsm()->allocated_chunks_bytes() +
3543       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3544 }
3545 
3546 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3547   assert(!_frozen, "sanity");
3548   assert(!SafepointSynchronize::is_at_safepoint()
3549          || Thread::current()->is_VM_thread(), "should be the VM thread");
3550 
3551   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3552 
3553   if (is_class && using_class_space()) {
3554     class_vsm()->deallocate(ptr, word_size);
3555   } else {
3556     vsm()->deallocate(ptr, word_size);
3557   }
3558 }
3559 
3560 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3561                               MetaspaceObj::Type type, TRAPS) {
3562   assert(!_frozen, "sanity");
3563   if (HAS_PENDING_EXCEPTION) {
3564     assert(false, "Should not allocate with exception pending");
3565     return NULL;  // caller does a CHECK_NULL too
3566   }
3567 
3568   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3569         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3570 
3571   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3572 
3573   // Try to allocate metadata.
3574   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3575 
3576   if (result == NULL) {
3577     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3578 
3579     // Allocation failed.
3580     if (is_init_completed()) {
3581       // Only start a GC if the bootstrapping has completed.
3582 
3583       // Try to clean out some memory and retry.
3584       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3585           loader_data, word_size, mdtype);
3586     }
3587   }
3588 
3589   if (result == NULL) {
3590     SpaceManager* sm;
3591     if (is_class_space_allocation(mdtype)) {
3592       sm = loader_data->metaspace_non_null()->class_vsm();
3593     } else {
3594       sm = loader_data->metaspace_non_null()->vsm();
3595     }
3596 
3597     result = sm->get_small_chunk_and_allocate(word_size);
3598 
3599     if (result == NULL) {
3600       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3601     }
3602   }
3603 
3604   // Zero initialize.
3605   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3606 
3607   return result;
3608 }
3609 
3610 size_t Metaspace::class_chunk_size(size_t word_size) {
3611   assert(using_class_space(), "Has to use class space");
3612   return class_vsm()->calc_chunk_size(word_size);
3613 }
3614 
3615 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3616   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3617 
3618   // If result is still null, we are out of memory.
3619   Log(gc, metaspace, freelist) log;
3620   if (log.is_info()) {
3621     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3622              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3623     ResourceMark rm;
3624     if (log.is_debug()) {
3625       if (loader_data->metaspace_or_null() != NULL) {
3626         LogStream ls(log.debug());
3627         loader_data->dump(&ls);
3628       }
3629     }
3630     LogStream ls(log.info());
3631     MetaspaceAux::dump(&ls);
3632     ChunkManager::print_all_chunkmanagers(&ls);
3633   }
3634 
3635   bool out_of_compressed_class_space = false;
3636   if (is_class_space_allocation(mdtype)) {
3637     Metaspace* metaspace = loader_data->metaspace_non_null();
3638     out_of_compressed_class_space =
3639       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3640       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3641       CompressedClassSpaceSize;
3642   }
3643 
3644   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3645   const char* space_string = out_of_compressed_class_space ?
3646     "Compressed class space" : "Metaspace";
3647 
3648   report_java_out_of_memory(space_string);
3649 
3650   if (JvmtiExport::should_post_resource_exhausted()) {
3651     JvmtiExport::post_resource_exhausted(
3652         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3653         space_string);
3654   }
3655 
3656   if (!is_init_completed()) {
3657     vm_exit_during_initialization("OutOfMemoryError", space_string);
3658   }
3659 
3660   if (out_of_compressed_class_space) {
3661     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3662   } else {
3663     THROW_OOP(Universe::out_of_memory_error_metaspace());
3664   }
3665 }
3666 
3667 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3668   switch (mdtype) {
3669     case Metaspace::ClassType: return "Class";
3670     case Metaspace::NonClassType: return "Metadata";
3671     default:
3672       assert(false, "Got bad mdtype: %d", (int) mdtype);
3673       return NULL;
3674   }
3675 }
3676 
3677 void Metaspace::purge(MetadataType mdtype) {
3678   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3679 }
3680 
3681 void Metaspace::purge() {
3682   MutexLockerEx cl(SpaceManager::expand_lock(),
3683                    Mutex::_no_safepoint_check_flag);
3684   purge(NonClassType);
3685   if (using_class_space()) {
3686     purge(ClassType);
3687   }
3688 }
3689 
3690 void Metaspace::print_on(outputStream* out) const {
3691   // Print both class virtual space counts and metaspace.
3692   if (Verbose) {
3693     vsm()->print_on(out);
3694     if (using_class_space()) {
3695       class_vsm()->print_on(out);
3696     }
3697   }
3698 }
3699 
3700 bool Metaspace::contains(const void* ptr) {
3701   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3702     return true;
3703   }
3704   return contains_non_shared(ptr);
3705 }
3706 
3707 bool Metaspace::contains_non_shared(const void* ptr) {
3708   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3709      return true;
3710   }
3711 
3712   return get_space_list(NonClassType)->contains(ptr);
3713 }
3714 
3715 void Metaspace::verify() {
3716   vsm()->verify();
3717   if (using_class_space()) {
3718     class_vsm()->verify();
3719   }
3720 }
3721 
3722 void Metaspace::dump(outputStream* const out) const {
3723   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3724   vsm()->dump(out);
3725   if (using_class_space()) {
3726     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3727     class_vsm()->dump(out);
3728   }
3729 }
3730 
3731 /////////////// Unit tests ///////////////
3732 
3733 #ifndef PRODUCT
3734 
3735 class TestMetaspaceAuxTest : AllStatic {
3736  public:
3737   static void test_reserved() {
3738     size_t reserved = MetaspaceAux::reserved_bytes();
3739 
3740     assert(reserved > 0, "assert");
3741 
3742     size_t committed  = MetaspaceAux::committed_bytes();
3743     assert(committed <= reserved, "assert");
3744 
3745     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3746     assert(reserved_metadata > 0, "assert");
3747     assert(reserved_metadata <= reserved, "assert");
3748 
3749     if (UseCompressedClassPointers) {
3750       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3751       assert(reserved_class > 0, "assert");
3752       assert(reserved_class < reserved, "assert");
3753     }
3754   }
3755 
3756   static void test_committed() {
3757     size_t committed = MetaspaceAux::committed_bytes();
3758 
3759     assert(committed > 0, "assert");
3760 
3761     size_t reserved  = MetaspaceAux::reserved_bytes();
3762     assert(committed <= reserved, "assert");
3763 
3764     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3765     assert(committed_metadata > 0, "assert");
3766     assert(committed_metadata <= committed, "assert");
3767 
3768     if (UseCompressedClassPointers) {
3769       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3770       assert(committed_class > 0, "assert");
3771       assert(committed_class < committed, "assert");
3772     }
3773   }
3774 
3775   static void test_virtual_space_list_large_chunk() {
3776     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3777     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3778     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3779     // vm_allocation_granularity aligned on Windows.
3780     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3781     large_size += (os::vm_page_size()/BytesPerWord);
3782     vs_list->get_new_chunk(large_size, 0);
3783   }
3784 
3785   static void test() {
3786     test_reserved();
3787     test_committed();
3788     test_virtual_space_list_large_chunk();
3789   }
3790 };
3791 
3792 void TestMetaspaceAux_test() {
3793   TestMetaspaceAuxTest::test();
3794 }
3795 
3796 class TestVirtualSpaceNodeTest {
3797   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3798                                           size_t& num_small_chunks,
3799                                           size_t& num_specialized_chunks) {
3800     num_medium_chunks = words_left / MediumChunk;
3801     words_left = words_left % MediumChunk;
3802 
3803     num_small_chunks = words_left / SmallChunk;
3804     words_left = words_left % SmallChunk;
3805     // how many specialized chunks can we get?
3806     num_specialized_chunks = words_left / SpecializedChunk;
3807     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3808   }
3809 
3810  public:
3811   static void test() {
3812     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3813     const size_t vsn_test_size_words = MediumChunk  * 4;
3814     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3815 
3816     // The chunk sizes must be multiples of eachother, or this will fail
3817     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3818     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3819 
3820     { // No committed memory in VSN
3821       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3822       VirtualSpaceNode vsn(vsn_test_size_bytes);
3823       vsn.initialize();
3824       vsn.retire(&cm);
3825       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3826     }
3827 
3828     { // All of VSN is committed, half is used by chunks
3829       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3830       VirtualSpaceNode vsn(vsn_test_size_bytes);
3831       vsn.initialize();
3832       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3833       vsn.get_chunk_vs(MediumChunk);
3834       vsn.get_chunk_vs(MediumChunk);
3835       vsn.retire(&cm);
3836       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3837       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3838     }
3839 
3840     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3841     // This doesn't work for systems with vm_page_size >= 16K.
3842     if (page_chunks < MediumChunk) {
3843       // 4 pages of VSN is committed, some is used by chunks
3844       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3845       VirtualSpaceNode vsn(vsn_test_size_bytes);
3846 
3847       vsn.initialize();
3848       vsn.expand_by(page_chunks, page_chunks);
3849       vsn.get_chunk_vs(SmallChunk);
3850       vsn.get_chunk_vs(SpecializedChunk);
3851       vsn.retire(&cm);
3852 
3853       // committed - used = words left to retire
3854       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3855 
3856       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3857       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3858 
3859       assert(num_medium_chunks == 0, "should not get any medium chunks");
3860       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3861       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3862     }
3863 
3864     { // Half of VSN is committed, a humongous chunk is used
3865       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3866       VirtualSpaceNode vsn(vsn_test_size_bytes);
3867       vsn.initialize();
3868       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3869       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3870       vsn.retire(&cm);
3871 
3872       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3873       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3874       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3875 
3876       assert(num_medium_chunks == 0, "should not get any medium chunks");
3877       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3878       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3879     }
3880 
3881   }
3882 
3883 #define assert_is_available_positive(word_size) \
3884   assert(vsn.is_available(word_size), \
3885          #word_size ": " PTR_FORMAT " bytes were not available in " \
3886          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3887          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3888 
3889 #define assert_is_available_negative(word_size) \
3890   assert(!vsn.is_available(word_size), \
3891          #word_size ": " PTR_FORMAT " bytes should not be available in " \
3892          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3893          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3894 
3895   static void test_is_available_positive() {
3896     // Reserve some memory.
3897     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3898     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3899 
3900     // Commit some memory.
3901     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3902     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3903     assert(expanded, "Failed to commit");
3904 
3905     // Check that is_available accepts the committed size.
3906     assert_is_available_positive(commit_word_size);
3907 
3908     // Check that is_available accepts half the committed size.
3909     size_t expand_word_size = commit_word_size / 2;
3910     assert_is_available_positive(expand_word_size);
3911   }
3912 
3913   static void test_is_available_negative() {
3914     // Reserve some memory.
3915     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3916     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3917 
3918     // Commit some memory.
3919     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3920     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3921     assert(expanded, "Failed to commit");
3922 
3923     // Check that is_available doesn't accept a too large size.
3924     size_t two_times_commit_word_size = commit_word_size * 2;
3925     assert_is_available_negative(two_times_commit_word_size);
3926   }
3927 
3928   static void test_is_available_overflow() {
3929     // Reserve some memory.
3930     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3931     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3932 
3933     // Commit some memory.
3934     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3935     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3936     assert(expanded, "Failed to commit");
3937 
3938     // Calculate a size that will overflow the virtual space size.
3939     void* virtual_space_max = (void*)(uintptr_t)-1;
3940     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3941     size_t overflow_size = bottom_to_max + BytesPerWord;
3942     size_t overflow_word_size = overflow_size / BytesPerWord;
3943 
3944     // Check that is_available can handle the overflow.
3945     assert_is_available_negative(overflow_word_size);
3946   }
3947 
3948   static void test_is_available() {
3949     TestVirtualSpaceNodeTest::test_is_available_positive();
3950     TestVirtualSpaceNodeTest::test_is_available_negative();
3951     TestVirtualSpaceNodeTest::test_is_available_overflow();
3952   }
3953 };
3954 
3955 void TestVirtualSpaceNode_test() {
3956   TestVirtualSpaceNodeTest::test();
3957   TestVirtualSpaceNodeTest::test_is_available();
3958 }
3959 
3960 // The following test is placed here instead of a gtest / unittest file
3961 // because the ChunkManager class is only available in this file.
3962 void ChunkManager_test_list_index() {
3963   ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3964 
3965   // Test previous bug where a query for a humongous class metachunk,
3966   // incorrectly matched the non-class medium metachunk size.
3967   {
3968     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
3969 
3970     ChunkIndex index = manager.list_index(MediumChunk);
3971 
3972     assert(index == HumongousIndex,
3973            "Requested size is larger than ClassMediumChunk,"
3974            " so should return HumongousIndex. Got index: %d", (int)index);
3975   }
3976 
3977   // Check the specified sizes as well.
3978   {
3979     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
3980     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
3981   }
3982   {
3983     ChunkIndex index = manager.list_index(ClassSmallChunk);
3984     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
3985   }
3986   {
3987     ChunkIndex index = manager.list_index(ClassMediumChunk);
3988     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
3989   }
3990   {
3991     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
3992     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
3993   }
3994 }
3995 
3996 #endif // !PRODUCT
3997 
3998 #ifdef ASSERT
3999 
4000 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4001 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4002 // content.
4003 class ChunkManagerReturnTestImpl {
4004 
4005   VirtualSpaceNode _vsn;
4006   ChunkManager _cm;
4007 
4008   // The expected content of the chunk manager.
4009   unsigned _chunks_in_chunkmanager;
4010   size_t _words_in_chunkmanager;
4011 
4012   // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4013   static const int num_chunks = 256;
4014   Metachunk* _pool[num_chunks];
4015 
4016   // Helper, return a random position into the chunk pool.
4017   static int get_random_position() {
4018     return os::random() % num_chunks;
4019   }
4020 
4021   // Asserts that ChunkManager counters match expectations.
4022   void assert_counters() {
4023     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4024     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4025     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4026   }
4027 
4028   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4029   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4030   size_t get_random_chunk_size() {
4031     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4032     const int rand = os::random() % 4;
4033     if (rand < 3) {
4034       return sizes[rand];
4035     } else {
4036       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4037       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4038     }
4039   }
4040 
4041   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4042   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4043   int next_matching_chunk(int start, bool is_free) const {
4044     assert(start >= 0 && start < num_chunks, "invalid parameter");
4045     int pos = start;
4046     do {
4047       if (++pos == num_chunks) {
4048         pos = 0;
4049       }
4050       if (_pool[pos]->is_tagged_free() == is_free) {
4051         return pos;
4052       }
4053     } while (pos != start);
4054     return -1;
4055   }
4056 
4057   // A structure to keep information about a chunk list including which
4058   // chunks are part of this list. This is needed to keep information about a chunk list
4059   // we will to return to the ChunkManager, because the original list will be destroyed.
4060   struct AChunkList {
4061     Metachunk* head;
4062     Metachunk* all[num_chunks];
4063     size_t size;
4064     int num;
4065     ChunkIndex index;
4066   };
4067 
4068   // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4069   // a random chunk list of max. length <list_size> of chunks with the same
4070   // ChunkIndex (chunk size).
4071   // Returns false if list cannot be assembled. List is returned in the <out>
4072   // structure. Returned list may be smaller than <list_size>.
4073   bool assemble_random_chunklist(AChunkList* out, int list_size) {
4074     // Choose a random in-use chunk from the pool...
4075     const int headpos = next_matching_chunk(get_random_position(), false);
4076     if (headpos == -1) {
4077       return false;
4078     }
4079     Metachunk* const head = _pool[headpos];
4080     out->all[0] = head;
4081     assert(head->is_tagged_free() == false, "Chunk state mismatch");
4082     // ..then go from there, chain it up with up to list_size - 1 number of other
4083     // in-use chunks of the same index.
4084     const ChunkIndex index = _cm.list_index(head->word_size());
4085     int num_added = 1;
4086     size_t size_added = head->word_size();
4087     int pos = headpos;
4088     Metachunk* tail = head;
4089     do {
4090       pos = next_matching_chunk(pos, false);
4091       if (pos != headpos) {
4092         Metachunk* c = _pool[pos];
4093         assert(c->is_tagged_free() == false, "Chunk state mismatch");
4094         if (index == _cm.list_index(c->word_size())) {
4095           tail->set_next(c);
4096           c->set_prev(tail);
4097           tail = c;
4098           out->all[num_added] = c;
4099           num_added ++;
4100           size_added += c->word_size();
4101         }
4102       }
4103     } while (num_added < list_size && pos != headpos);
4104     out->head = head;
4105     out->index = index;
4106     out->size = size_added;
4107     out->num = num_added;
4108     return true;
4109   }
4110 
4111   // Take a single random chunk from the ChunkManager.
4112   bool take_single_random_chunk_from_chunkmanager() {
4113     assert_counters();
4114     _cm.locked_verify();
4115     int pos = next_matching_chunk(get_random_position(), true);
4116     if (pos == -1) {
4117       return false;
4118     }
4119     Metachunk* c = _pool[pos];
4120     assert(c->is_tagged_free(), "Chunk state mismatch");
4121     // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4122     // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4123     // to exercise ChunkManager::free_chunks_get() because that one gets called for
4124     // normal chunk allocation.
4125     Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4126     assert(c2 != NULL, "Unexpected.");
4127     assert(!c2->is_tagged_free(), "Chunk state mismatch");
4128     assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4129     _chunks_in_chunkmanager --;
4130     _words_in_chunkmanager -= c->word_size();
4131     assert_counters();
4132     _cm.locked_verify();
4133     return true;
4134   }
4135 
4136   // Returns a single random chunk to the chunk manager. Returns false if that
4137   // was not possible (all chunks are already in the chunk manager).
4138   bool return_single_random_chunk_to_chunkmanager() {
4139     assert_counters();
4140     _cm.locked_verify();
4141     int pos = next_matching_chunk(get_random_position(), false);
4142     if (pos == -1) {
4143       return false;
4144     }
4145     Metachunk* c = _pool[pos];
4146     assert(c->is_tagged_free() == false, "wrong chunk information");
4147     _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4148     _chunks_in_chunkmanager ++;
4149     _words_in_chunkmanager += c->word_size();
4150     assert(c->is_tagged_free() == true, "wrong chunk information");
4151     assert_counters();
4152     _cm.locked_verify();
4153     return true;
4154   }
4155 
4156   // Return a random chunk list to the chunk manager. Returns the length of the
4157   // returned list.
4158   int return_random_chunk_list_to_chunkmanager(int list_size) {
4159     assert_counters();
4160     _cm.locked_verify();
4161     AChunkList aChunkList;
4162     if (!assemble_random_chunklist(&aChunkList, list_size)) {
4163       return 0;
4164     }
4165     // Before returning chunks are returned, they should be tagged in use.
4166     for (int i = 0; i < aChunkList.num; i ++) {
4167       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4168     }
4169     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4170     _chunks_in_chunkmanager += aChunkList.num;
4171     _words_in_chunkmanager += aChunkList.size;
4172     // After all chunks are returned, check that they are now tagged free.
4173     for (int i = 0; i < aChunkList.num; i ++) {
4174       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4175     }
4176     assert_counters();
4177     _cm.locked_verify();
4178     return aChunkList.num;
4179   }
4180 
4181 public:
4182 
4183   ChunkManagerReturnTestImpl()
4184     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4185     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4186     , _chunks_in_chunkmanager(0)
4187     , _words_in_chunkmanager(0)
4188   {
4189     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4190     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4191     // "in use", because not yet added to any chunk manager.
4192     _vsn.initialize();
4193     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4194     for (int i = 0; i < num_chunks; i ++) {
4195       const size_t size = get_random_chunk_size();
4196       _pool[i] = _vsn.get_chunk_vs(size);
4197       assert(_pool[i] != NULL, "allocation failed");
4198     }
4199     assert_counters();
4200     _cm.locked_verify();
4201   }
4202 
4203   // Test entry point.
4204   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4205   // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4206   // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4207   // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4208   // chunks manager, thereby emptying or filling it completely.
4209   void do_test(float phase_length_factor) {
4210     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4211     assert_counters();
4212     // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4213     const int num_max_ops = num_chunks * 100;
4214     int num_ops = num_max_ops;
4215     const int average_phase_length = (int)(phase_length_factor * num_chunks);
4216     int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4217     bool return_phase = true;
4218     while (num_ops > 0) {
4219       int chunks_moved = 0;
4220       if (return_phase) {
4221         // Randomly switch between returning a single chunk or a random length chunk list.
4222         if (os::random() % 2 == 0) {
4223           if (return_single_random_chunk_to_chunkmanager()) {
4224             chunks_moved = 1;
4225           }
4226         } else {
4227           const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4228           chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4229         }
4230       } else {
4231         // Breath out.
4232         if (take_single_random_chunk_from_chunkmanager()) {
4233           chunks_moved = 1;
4234         }
4235       }
4236       num_ops -= chunks_moved;
4237       num_ops_until_switch -= chunks_moved;
4238       if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4239         return_phase = !return_phase;
4240         num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4241       }
4242     }
4243   }
4244 };
4245 
4246 void* setup_chunkmanager_returntests() {
4247   ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4248   return p;
4249 }
4250 
4251 void teardown_chunkmanager_returntests(void* p) {
4252   delete (ChunkManagerReturnTestImpl*) p;
4253 }
4254 
4255 void run_chunkmanager_returntests(void* p, float phase_length) {
4256   ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4257   test->do_test(phase_length);
4258 }
4259 
4260 // The following test is placed here instead of a gtest / unittest file
4261 // because the ChunkManager class is only available in this file.
4262 class SpaceManagerTest : AllStatic {
4263   friend void SpaceManager_test_adjust_initial_chunk_size();
4264 
4265   static void test_adjust_initial_chunk_size(bool is_class) {
4266     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4267     const size_t normal   = SpaceManager::small_chunk_size(is_class);
4268     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
4269 
4270 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
4271     do {                                                                         \
4272       size_t v = value;                                                          \
4273       size_t e = expected;                                                       \
4274       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
4275              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
4276     } while (0)
4277 
4278     // Smallest (specialized)
4279     test_adjust_initial_chunk_size(1,            smallest, is_class);
4280     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4281     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
4282 
4283     // Small
4284     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4285     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
4286     test_adjust_initial_chunk_size(normal,       normal, is_class);
4287 
4288     // Medium
4289     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4290     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4291     test_adjust_initial_chunk_size(medium,     medium, is_class);
4292 
4293     // Humongous
4294     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4295 
4296 #undef test_adjust_initial_chunk_size
4297   }
4298 
4299   static void test_adjust_initial_chunk_size() {
4300     test_adjust_initial_chunk_size(false);
4301     test_adjust_initial_chunk_size(true);
4302   }
4303 };
4304 
4305 void SpaceManager_test_adjust_initial_chunk_size() {
4306   SpaceManagerTest::test_adjust_initial_chunk_size();
4307 }
4308 
4309 #endif // ASSERT