1 /*
   2  * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceGCThresholdUpdater.hpp"
  36 #include "memory/metaspaceShared.hpp"
  37 #include "memory/metaspaceTracer.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "runtime/atomic.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/mutex.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "services/memTracker.hpp"
  47 #include "services/memoryService.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/macros.hpp"
  51 
  52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  53 
  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Set this constant to enable slow integrity checking of the free chunk lists
  58 const bool metaspace_slow_verify = false;
  59 
  60 size_t const allocation_from_dictionary_limit = 4 * K;
  61 
  62 MetaWord* last_allocated = 0;
  63 
  64 size_t Metaspace::_compressed_class_space_size;
  65 const MetaspaceTracer* Metaspace::_tracer = NULL;
  66 
  67 // Used in declarations in SpaceManager and ChunkManager
  68 enum ChunkIndex {
  69   ZeroIndex = 0,
  70   SpecializedIndex = ZeroIndex,
  71   SmallIndex = SpecializedIndex + 1,
  72   MediumIndex = SmallIndex + 1,
  73   HumongousIndex = MediumIndex + 1,
  74   NumberOfFreeLists = 3,
  75   NumberOfInUseLists = 4
  76 };
  77 
  78 enum ChunkSizes {    // in words.
  79   ClassSpecializedChunk = 128,
  80   SpecializedChunk = 128,
  81   ClassSmallChunk = 256,
  82   SmallChunk = 512,
  83   ClassMediumChunk = 4 * K,
  84   MediumChunk = 8 * K
  85 };
  86 
  87 static ChunkIndex next_chunk_index(ChunkIndex i) {
  88   assert(i < NumberOfInUseLists, "Out of bound");
  89   return (ChunkIndex) (i+1);
  90 }
  91 
  92 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  93 uint MetaspaceGC::_shrink_factor = 0;
  94 bool MetaspaceGC::_should_concurrent_collect = false;
  95 
  96 typedef class FreeList<Metachunk> ChunkList;
  97 
  98 // Manages the global free lists of chunks.
  99 class ChunkManager : public CHeapObj<mtInternal> {
 100   friend class TestVirtualSpaceNodeTest;
 101 
 102   // Free list of chunks of different sizes.
 103   //   SpecializedChunk
 104   //   SmallChunk
 105   //   MediumChunk
 106   //   HumongousChunk
 107   ChunkList _free_chunks[NumberOfFreeLists];
 108 
 109   //   HumongousChunk
 110   ChunkTreeDictionary _humongous_dictionary;
 111 
 112   // ChunkManager in all lists of this type
 113   size_t _free_chunks_total;
 114   size_t _free_chunks_count;
 115 
 116   void dec_free_chunks_total(size_t v) {
 117     assert(_free_chunks_count > 0 &&
 118              _free_chunks_total > 0,
 119              "About to go negative");
 120     Atomic::add_ptr(-1, &_free_chunks_count);
 121     jlong minus_v = (jlong) - (jlong) v;
 122     Atomic::add_ptr(minus_v, &_free_chunks_total);
 123   }
 124 
 125   // Debug support
 126 
 127   size_t sum_free_chunks();
 128   size_t sum_free_chunks_count();
 129 
 130   void locked_verify_free_chunks_total();
 131   void slow_locked_verify_free_chunks_total() {
 132     if (metaspace_slow_verify) {
 133       locked_verify_free_chunks_total();
 134     }
 135   }
 136   void locked_verify_free_chunks_count();
 137   void slow_locked_verify_free_chunks_count() {
 138     if (metaspace_slow_verify) {
 139       locked_verify_free_chunks_count();
 140     }
 141   }
 142   void verify_free_chunks_count();
 143 
 144  public:
 145 
 146   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 147       : _free_chunks_total(0), _free_chunks_count(0) {
 148     _free_chunks[SpecializedIndex].set_size(specialized_size);
 149     _free_chunks[SmallIndex].set_size(small_size);
 150     _free_chunks[MediumIndex].set_size(medium_size);
 151   }
 152 
 153   // add or delete (return) a chunk to the global freelist.
 154   Metachunk* chunk_freelist_allocate(size_t word_size);
 155 
 156   // Map a size to a list index assuming that there are lists
 157   // for special, small, medium, and humongous chunks.
 158   static ChunkIndex list_index(size_t size);
 159 
 160   // Remove the chunk from its freelist.  It is
 161   // expected to be on one of the _free_chunks[] lists.
 162   void remove_chunk(Metachunk* chunk);
 163 
 164   // Add the simple linked list of chunks to the freelist of chunks
 165   // of type index.
 166   void return_chunks(ChunkIndex index, Metachunk* chunks);
 167 
 168   // Total of the space in the free chunks list
 169   size_t free_chunks_total_words();
 170   size_t free_chunks_total_bytes();
 171 
 172   // Number of chunks in the free chunks list
 173   size_t free_chunks_count();
 174 
 175   void inc_free_chunks_total(size_t v, size_t count = 1) {
 176     Atomic::add_ptr(count, &_free_chunks_count);
 177     Atomic::add_ptr(v, &_free_chunks_total);
 178   }
 179   ChunkTreeDictionary* humongous_dictionary() {
 180     return &_humongous_dictionary;
 181   }
 182 
 183   ChunkList* free_chunks(ChunkIndex index);
 184 
 185   // Returns the list for the given chunk word size.
 186   ChunkList* find_free_chunks_list(size_t word_size);
 187 
 188   // Remove from a list by size.  Selects list based on size of chunk.
 189   Metachunk* free_chunks_get(size_t chunk_word_size);
 190 
 191 #define index_bounds_check(index)                                         \
 192   assert(index == SpecializedIndex ||                                     \
 193          index == SmallIndex ||                                           \
 194          index == MediumIndex ||                                          \
 195          index == HumongousIndex, err_msg("Bad index: %d", (int) index))
 196 
 197   size_t num_free_chunks(ChunkIndex index) const {
 198     index_bounds_check(index);
 199 
 200     if (index == HumongousIndex) {
 201       return _humongous_dictionary.total_free_blocks();
 202     }
 203 
 204     ssize_t count = _free_chunks[index].count();
 205     return count == -1 ? 0 : (size_t) count;
 206   }
 207 
 208   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 209     index_bounds_check(index);
 210 
 211     size_t word_size = 0;
 212     if (index == HumongousIndex) {
 213       word_size = _humongous_dictionary.total_size();
 214     } else {
 215       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 216       word_size = size_per_chunk_in_words * num_free_chunks(index);
 217     }
 218 
 219     return word_size * BytesPerWord;
 220   }
 221 
 222   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 223     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 224                                          num_free_chunks(SmallIndex),
 225                                          num_free_chunks(MediumIndex),
 226                                          num_free_chunks(HumongousIndex),
 227                                          size_free_chunks_in_bytes(SpecializedIndex),
 228                                          size_free_chunks_in_bytes(SmallIndex),
 229                                          size_free_chunks_in_bytes(MediumIndex),
 230                                          size_free_chunks_in_bytes(HumongousIndex));
 231   }
 232 
 233   // Debug support
 234   void verify();
 235   void slow_verify() {
 236     if (metaspace_slow_verify) {
 237       verify();
 238     }
 239   }
 240   void locked_verify();
 241   void slow_locked_verify() {
 242     if (metaspace_slow_verify) {
 243       locked_verify();
 244     }
 245   }
 246   void verify_free_chunks_total();
 247 
 248   void locked_print_free_chunks(outputStream* st);
 249   void locked_print_sum_free_chunks(outputStream* st);
 250 
 251   void print_on(outputStream* st) const;
 252 };
 253 
 254 // Used to manage the free list of Metablocks (a block corresponds
 255 // to the allocation of a quantum of metadata).
 256 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 257   BlockTreeDictionary* _dictionary;
 258 
 259   // Only allocate and split from freelist if the size of the allocation
 260   // is at least 1/4th the size of the available block.
 261   const static int WasteMultiplier = 4;
 262 
 263   // Accessors
 264   BlockTreeDictionary* dictionary() const { return _dictionary; }
 265 
 266  public:
 267   BlockFreelist();
 268   ~BlockFreelist();
 269 
 270   // Get and return a block to the free list
 271   MetaWord* get_block(size_t word_size);
 272   void return_block(MetaWord* p, size_t word_size);
 273 
 274   size_t total_size() {
 275   if (dictionary() == NULL) {
 276     return 0;
 277   } else {
 278     return dictionary()->total_size();
 279   }
 280 }
 281 
 282   void print_on(outputStream* st) const;
 283 };
 284 
 285 // A VirtualSpaceList node.
 286 class VirtualSpaceNode : public CHeapObj<mtClass> {
 287   friend class VirtualSpaceList;
 288 
 289   // Link to next VirtualSpaceNode
 290   VirtualSpaceNode* _next;
 291 
 292   // total in the VirtualSpace
 293   MemRegion _reserved;
 294   ReservedSpace _rs;
 295   VirtualSpace _virtual_space;
 296   MetaWord* _top;
 297   // count of chunks contained in this VirtualSpace
 298   uintx _container_count;
 299 
 300   // Convenience functions to access the _virtual_space
 301   char* low()  const { return virtual_space()->low(); }
 302   char* high() const { return virtual_space()->high(); }
 303 
 304   // The first Metachunk will be allocated at the bottom of the
 305   // VirtualSpace
 306   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 307 
 308   // Committed but unused space in the virtual space
 309   size_t free_words_in_vs() const;
 310  public:
 311 
 312   VirtualSpaceNode(size_t byte_size);
 313   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 314   ~VirtualSpaceNode();
 315 
 316   // Convenience functions for logical bottom and end
 317   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 318   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 319 
 320   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 321 
 322   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 323   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 324 
 325   bool is_pre_committed() const { return _virtual_space.special(); }
 326 
 327   // address of next available space in _virtual_space;
 328   // Accessors
 329   VirtualSpaceNode* next() { return _next; }
 330   void set_next(VirtualSpaceNode* v) { _next = v; }
 331 
 332   void set_reserved(MemRegion const v) { _reserved = v; }
 333   void set_top(MetaWord* v) { _top = v; }
 334 
 335   // Accessors
 336   MemRegion* reserved() { return &_reserved; }
 337   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 338 
 339   // Returns true if "word_size" is available in the VirtualSpace
 340   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 341 
 342   MetaWord* top() const { return _top; }
 343   void inc_top(size_t word_size) { _top += word_size; }
 344 
 345   uintx container_count() { return _container_count; }
 346   void inc_container_count();
 347   void dec_container_count();
 348 #ifdef ASSERT
 349   uintx container_count_slow();
 350   void verify_container_count();
 351 #endif
 352 
 353   // used and capacity in this single entry in the list
 354   size_t used_words_in_vs() const;
 355   size_t capacity_words_in_vs() const;
 356 
 357   bool initialize();
 358 
 359   // get space from the virtual space
 360   Metachunk* take_from_committed(size_t chunk_word_size);
 361 
 362   // Allocate a chunk from the virtual space and return it.
 363   Metachunk* get_chunk_vs(size_t chunk_word_size);
 364 
 365   // Expands/shrinks the committed space in a virtual space.  Delegates
 366   // to Virtualspace
 367   bool expand_by(size_t min_words, size_t preferred_words);
 368 
 369   // In preparation for deleting this node, remove all the chunks
 370   // in the node from any freelist.
 371   void purge(ChunkManager* chunk_manager);
 372 
 373   // If an allocation doesn't fit in the current node a new node is created.
 374   // Allocate chunks out of the remaining committed space in this node
 375   // to avoid wasting that memory.
 376   // This always adds up because all the chunk sizes are multiples of
 377   // the smallest chunk size.
 378   void retire(ChunkManager* chunk_manager);
 379 
 380 #ifdef ASSERT
 381   // Debug support
 382   void mangle();
 383 #endif
 384 
 385   void print_on(outputStream* st) const;
 386 };
 387 
 388 #define assert_is_ptr_aligned(ptr, alignment) \
 389   assert(is_ptr_aligned(ptr, alignment),      \
 390     err_msg(PTR_FORMAT " is not aligned to "  \
 391       SIZE_FORMAT, ptr, alignment))
 392 
 393 #define assert_is_size_aligned(size, alignment) \
 394   assert(is_size_aligned(size, alignment),      \
 395     err_msg(SIZE_FORMAT " is not aligned to "   \
 396        SIZE_FORMAT, size, alignment))
 397 
 398 
 399 // Decide if large pages should be committed when the memory is reserved.
 400 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 401   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 402     size_t words = bytes / BytesPerWord;
 403     bool is_class = false; // We never reserve large pages for the class space.
 404     if (MetaspaceGC::can_expand(words, is_class) &&
 405         MetaspaceGC::allowed_expansion() >= words) {
 406       return true;
 407     }
 408   }
 409 
 410   return false;
 411 }
 412 
 413   // byte_size is the size of the associated virtualspace.
 414 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 415   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 416 
 417 #if INCLUDE_CDS
 418   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 419   // configurable address, generally at the top of the Java heap so other
 420   // memory addresses don't conflict.
 421   if (DumpSharedSpaces) {
 422     bool large_pages = false; // No large pages when dumping the CDS archive.
 423     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 424 
 425     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 426     if (_rs.is_reserved()) {
 427       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 428     } else {
 429       // Get a mmap region anywhere if the SharedBaseAddress fails.
 430       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 431     }
 432     MetaspaceShared::set_shared_rs(&_rs);
 433   } else
 434 #endif
 435   {
 436     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 437 
 438     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 439   }
 440 
 441   if (_rs.is_reserved()) {
 442     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 443     assert(_rs.size() != 0, "Catch if we get a 0 size");
 444     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 445     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 446 
 447     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 448   }
 449 }
 450 
 451 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 452   Metachunk* chunk = first_chunk();
 453   Metachunk* invalid_chunk = (Metachunk*) top();
 454   while (chunk < invalid_chunk ) {
 455     assert(chunk->is_tagged_free(), "Should be tagged free");
 456     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 457     chunk_manager->remove_chunk(chunk);
 458     assert(chunk->next() == NULL &&
 459            chunk->prev() == NULL,
 460            "Was not removed from its list");
 461     chunk = (Metachunk*) next;
 462   }
 463 }
 464 
 465 #ifdef ASSERT
 466 uintx VirtualSpaceNode::container_count_slow() {
 467   uintx count = 0;
 468   Metachunk* chunk = first_chunk();
 469   Metachunk* invalid_chunk = (Metachunk*) top();
 470   while (chunk < invalid_chunk ) {
 471     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 472     // Don't count the chunks on the free lists.  Those are
 473     // still part of the VirtualSpaceNode but not currently
 474     // counted.
 475     if (!chunk->is_tagged_free()) {
 476       count++;
 477     }
 478     chunk = (Metachunk*) next;
 479   }
 480   return count;
 481 }
 482 #endif
 483 
 484 // List of VirtualSpaces for metadata allocation.
 485 class VirtualSpaceList : public CHeapObj<mtClass> {
 486   friend class VirtualSpaceNode;
 487 
 488   enum VirtualSpaceSizes {
 489     VirtualSpaceSize = 256 * K
 490   };
 491 
 492   // Head of the list
 493   VirtualSpaceNode* _virtual_space_list;
 494   // virtual space currently being used for allocations
 495   VirtualSpaceNode* _current_virtual_space;
 496 
 497   // Is this VirtualSpaceList used for the compressed class space
 498   bool _is_class;
 499 
 500   // Sum of reserved and committed memory in the virtual spaces
 501   size_t _reserved_words;
 502   size_t _committed_words;
 503 
 504   // Number of virtual spaces
 505   size_t _virtual_space_count;
 506 
 507   ~VirtualSpaceList();
 508 
 509   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 510 
 511   void set_virtual_space_list(VirtualSpaceNode* v) {
 512     _virtual_space_list = v;
 513   }
 514   void set_current_virtual_space(VirtualSpaceNode* v) {
 515     _current_virtual_space = v;
 516   }
 517 
 518   void link_vs(VirtualSpaceNode* new_entry);
 519 
 520   // Get another virtual space and add it to the list.  This
 521   // is typically prompted by a failed attempt to allocate a chunk
 522   // and is typically followed by the allocation of a chunk.
 523   bool create_new_virtual_space(size_t vs_word_size);
 524 
 525   // Chunk up the unused committed space in the current
 526   // virtual space and add the chunks to the free list.
 527   void retire_current_virtual_space();
 528 
 529  public:
 530   VirtualSpaceList(size_t word_size);
 531   VirtualSpaceList(ReservedSpace rs);
 532 
 533   size_t free_bytes();
 534 
 535   Metachunk* get_new_chunk(size_t word_size,
 536                            size_t grow_chunks_by_words,
 537                            size_t medium_chunk_bunch);
 538 
 539   bool expand_node_by(VirtualSpaceNode* node,
 540                       size_t min_words,
 541                       size_t preferred_words);
 542 
 543   bool expand_by(size_t min_words,
 544                  size_t preferred_words);
 545 
 546   VirtualSpaceNode* current_virtual_space() {
 547     return _current_virtual_space;
 548   }
 549 
 550   bool is_class() const { return _is_class; }
 551 
 552   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 553 
 554   size_t reserved_words()  { return _reserved_words; }
 555   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 556   size_t committed_words() { return _committed_words; }
 557   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 558 
 559   void inc_reserved_words(size_t v);
 560   void dec_reserved_words(size_t v);
 561   void inc_committed_words(size_t v);
 562   void dec_committed_words(size_t v);
 563   void inc_virtual_space_count();
 564   void dec_virtual_space_count();
 565 
 566   bool contains(const void* ptr);
 567 
 568   // Unlink empty VirtualSpaceNodes and free it.
 569   void purge(ChunkManager* chunk_manager);
 570 
 571   void print_on(outputStream* st) const;
 572 
 573   class VirtualSpaceListIterator : public StackObj {
 574     VirtualSpaceNode* _virtual_spaces;
 575    public:
 576     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 577       _virtual_spaces(virtual_spaces) {}
 578 
 579     bool repeat() {
 580       return _virtual_spaces != NULL;
 581     }
 582 
 583     VirtualSpaceNode* get_next() {
 584       VirtualSpaceNode* result = _virtual_spaces;
 585       if (_virtual_spaces != NULL) {
 586         _virtual_spaces = _virtual_spaces->next();
 587       }
 588       return result;
 589     }
 590   };
 591 };
 592 
 593 class Metadebug : AllStatic {
 594   // Debugging support for Metaspaces
 595   static int _allocation_fail_alot_count;
 596 
 597  public:
 598 
 599   static void init_allocation_fail_alot_count();
 600 #ifdef ASSERT
 601   static bool test_metadata_failure();
 602 #endif
 603 };
 604 
 605 int Metadebug::_allocation_fail_alot_count = 0;
 606 
 607 //  SpaceManager - used by Metaspace to handle allocations
 608 class SpaceManager : public CHeapObj<mtClass> {
 609   friend class Metaspace;
 610   friend class Metadebug;
 611 
 612  private:
 613 
 614   // protects allocations
 615   Mutex* const _lock;
 616 
 617   // Type of metadata allocated.
 618   Metaspace::MetadataType _mdtype;
 619 
 620   // List of chunks in use by this SpaceManager.  Allocations
 621   // are done from the current chunk.  The list is used for deallocating
 622   // chunks when the SpaceManager is freed.
 623   Metachunk* _chunks_in_use[NumberOfInUseLists];
 624   Metachunk* _current_chunk;
 625 
 626   // Number of small chunks to allocate to a manager
 627   // If class space manager, small chunks are unlimited
 628   static uint const _small_chunk_limit;
 629 
 630   // Sum of all space in allocated chunks
 631   size_t _allocated_blocks_words;
 632 
 633   // Sum of all allocated chunks
 634   size_t _allocated_chunks_words;
 635   size_t _allocated_chunks_count;
 636 
 637   // Free lists of blocks are per SpaceManager since they
 638   // are assumed to be in chunks in use by the SpaceManager
 639   // and all chunks in use by a SpaceManager are freed when
 640   // the class loader using the SpaceManager is collected.
 641   BlockFreelist _block_freelists;
 642 
 643   // protects virtualspace and chunk expansions
 644   static const char*  _expand_lock_name;
 645   static const int    _expand_lock_rank;
 646   static Mutex* const _expand_lock;
 647 
 648  private:
 649   // Accessors
 650   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 651   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
 652     _chunks_in_use[index] = v;
 653   }
 654 
 655   BlockFreelist* block_freelists() const {
 656     return (BlockFreelist*) &_block_freelists;
 657   }
 658 
 659   Metaspace::MetadataType mdtype() { return _mdtype; }
 660 
 661   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 662   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 663 
 664   Metachunk* current_chunk() const { return _current_chunk; }
 665   void set_current_chunk(Metachunk* v) {
 666     _current_chunk = v;
 667   }
 668 
 669   Metachunk* find_current_chunk(size_t word_size);
 670 
 671   // Add chunk to the list of chunks in use
 672   void add_chunk(Metachunk* v, bool make_current);
 673   void retire_current_chunk();
 674 
 675   Mutex* lock() const { return _lock; }
 676 
 677   const char* chunk_size_name(ChunkIndex index) const;
 678 
 679  protected:
 680   void initialize();
 681 
 682  public:
 683   SpaceManager(Metaspace::MetadataType mdtype,
 684                Mutex* lock);
 685   ~SpaceManager();
 686 
 687   enum ChunkMultiples {
 688     MediumChunkMultiple = 4
 689   };
 690 
 691   bool is_class() { return _mdtype == Metaspace::ClassType; }
 692 
 693   // Accessors
 694   size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
 695   size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
 696   size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
 697   size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
 698 
 699   size_t smallest_chunk_size()  { return specialized_chunk_size(); }
 700 
 701   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 702   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 703   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 704   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
 705   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 706 
 707   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 708 
 709   static Mutex* expand_lock() { return _expand_lock; }
 710 
 711   // Increment the per Metaspace and global running sums for Metachunks
 712   // by the given size.  This is used when a Metachunk to added to
 713   // the in-use list.
 714   void inc_size_metrics(size_t words);
 715   // Increment the per Metaspace and global running sums Metablocks by the given
 716   // size.  This is used when a Metablock is allocated.
 717   void inc_used_metrics(size_t words);
 718   // Delete the portion of the running sums for this SpaceManager. That is,
 719   // the globals running sums for the Metachunks and Metablocks are
 720   // decremented for all the Metachunks in-use by this SpaceManager.
 721   void dec_total_from_size_metrics();
 722 
 723   // Set the sizes for the initial chunks.
 724   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 725                                size_t* chunk_word_size,
 726                                size_t* class_chunk_word_size);
 727 
 728   size_t sum_capacity_in_chunks_in_use() const;
 729   size_t sum_used_in_chunks_in_use() const;
 730   size_t sum_free_in_chunks_in_use() const;
 731   size_t sum_waste_in_chunks_in_use() const;
 732   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 733 
 734   size_t sum_count_in_chunks_in_use();
 735   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 736 
 737   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 738 
 739   // Block allocation and deallocation.
 740   // Allocates a block from the current chunk
 741   MetaWord* allocate(size_t word_size);
 742 
 743   // Helper for allocations
 744   MetaWord* allocate_work(size_t word_size);
 745 
 746   // Returns a block to the per manager freelist
 747   void deallocate(MetaWord* p, size_t word_size);
 748 
 749   // Based on the allocation size and a minimum chunk size,
 750   // returned chunk size (for expanding space for chunk allocation).
 751   size_t calc_chunk_size(size_t allocation_word_size);
 752 
 753   // Called when an allocation from the current chunk fails.
 754   // Gets a new chunk (may require getting a new virtual space),
 755   // and allocates from that chunk.
 756   MetaWord* grow_and_allocate(size_t word_size);
 757 
 758   // Notify memory usage to MemoryService.
 759   void track_metaspace_memory_usage();
 760 
 761   // debugging support.
 762 
 763   void dump(outputStream* const out) const;
 764   void print_on(outputStream* st) const;
 765   void locked_print_chunks_in_use_on(outputStream* st) const;
 766 
 767   void verify();
 768   void verify_chunk_size(Metachunk* chunk);
 769   NOT_PRODUCT(void mangle_freed_chunks();)
 770 #ifdef ASSERT
 771   void verify_allocated_blocks_words();
 772 #endif
 773 
 774   size_t get_raw_word_size(size_t word_size) {
 775     size_t byte_size = word_size * BytesPerWord;
 776 
 777     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 778     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 779 
 780     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 781     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 782 
 783     return raw_word_size;
 784   }
 785 };
 786 
 787 uint const SpaceManager::_small_chunk_limit = 4;
 788 
 789 const char* SpaceManager::_expand_lock_name =
 790   "SpaceManager chunk allocation lock";
 791 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 792 Mutex* const SpaceManager::_expand_lock =
 793   new Mutex(SpaceManager::_expand_lock_rank,
 794             SpaceManager::_expand_lock_name,
 795             Mutex::_allow_vm_block_flag,
 796             Monitor::_safepoint_check_never);
 797 
 798 void VirtualSpaceNode::inc_container_count() {
 799   assert_lock_strong(SpaceManager::expand_lock());
 800   _container_count++;
 801   DEBUG_ONLY(verify_container_count();)
 802 }
 803 
 804 void VirtualSpaceNode::dec_container_count() {
 805   assert_lock_strong(SpaceManager::expand_lock());
 806   _container_count--;
 807 }
 808 
 809 #ifdef ASSERT
 810 void VirtualSpaceNode::verify_container_count() {
 811   assert(_container_count == container_count_slow(),
 812     err_msg("Inconsistency in container_count _container_count " UINTX_FORMAT
 813             " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()));
 814 }
 815 #endif
 816 
 817 // BlockFreelist methods
 818 
 819 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 820 
 821 BlockFreelist::~BlockFreelist() {
 822   if (_dictionary != NULL) {
 823     if (Verbose && TraceMetadataChunkAllocation) {
 824       _dictionary->print_free_lists(gclog_or_tty);
 825     }
 826     delete _dictionary;
 827   }
 828 }
 829 
 830 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 831   Metablock* free_chunk = ::new (p) Metablock(word_size);
 832   if (dictionary() == NULL) {
 833    _dictionary = new BlockTreeDictionary();
 834   }
 835   dictionary()->return_chunk(free_chunk);
 836 }
 837 
 838 MetaWord* BlockFreelist::get_block(size_t word_size) {
 839   if (dictionary() == NULL) {
 840     return NULL;
 841   }
 842 
 843   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 844     // Dark matter.  Too small for dictionary.
 845     return NULL;
 846   }
 847 
 848   Metablock* free_block =
 849     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 850   if (free_block == NULL) {
 851     return NULL;
 852   }
 853 
 854   const size_t block_size = free_block->size();
 855   if (block_size > WasteMultiplier * word_size) {
 856     return_block((MetaWord*)free_block, block_size);
 857     return NULL;
 858   }
 859 
 860   MetaWord* new_block = (MetaWord*)free_block;
 861   assert(block_size >= word_size, "Incorrect size of block from freelist");
 862   const size_t unused = block_size - word_size;
 863   if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 864     return_block(new_block + word_size, unused);
 865   }
 866 
 867   return new_block;
 868 }
 869 
 870 void BlockFreelist::print_on(outputStream* st) const {
 871   if (dictionary() == NULL) {
 872     return;
 873   }
 874   dictionary()->print_free_lists(st);
 875 }
 876 
 877 // VirtualSpaceNode methods
 878 
 879 VirtualSpaceNode::~VirtualSpaceNode() {
 880   _rs.release();
 881 #ifdef ASSERT
 882   size_t word_size = sizeof(*this) / BytesPerWord;
 883   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 884 #endif
 885 }
 886 
 887 size_t VirtualSpaceNode::used_words_in_vs() const {
 888   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 889 }
 890 
 891 // Space committed in the VirtualSpace
 892 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 893   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 894 }
 895 
 896 size_t VirtualSpaceNode::free_words_in_vs() const {
 897   return pointer_delta(end(), top(), sizeof(MetaWord));
 898 }
 899 
 900 // Allocates the chunk from the virtual space only.
 901 // This interface is also used internally for debugging.  Not all
 902 // chunks removed here are necessarily used for allocation.
 903 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 904   // Bottom of the new chunk
 905   MetaWord* chunk_limit = top();
 906   assert(chunk_limit != NULL, "Not safe to call this method");
 907 
 908   // The virtual spaces are always expanded by the
 909   // commit granularity to enforce the following condition.
 910   // Without this the is_available check will not work correctly.
 911   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 912       "The committed memory doesn't match the expanded memory.");
 913 
 914   if (!is_available(chunk_word_size)) {
 915     if (TraceMetadataChunkAllocation) {
 916       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 917       // Dump some information about the virtual space that is nearly full
 918       print_on(gclog_or_tty);
 919     }
 920     return NULL;
 921   }
 922 
 923   // Take the space  (bump top on the current virtual space).
 924   inc_top(chunk_word_size);
 925 
 926   // Initialize the chunk
 927   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 928   return result;
 929 }
 930 
 931 
 932 // Expand the virtual space (commit more of the reserved space)
 933 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 934   size_t min_bytes = min_words * BytesPerWord;
 935   size_t preferred_bytes = preferred_words * BytesPerWord;
 936 
 937   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 938 
 939   if (uncommitted < min_bytes) {
 940     return false;
 941   }
 942 
 943   size_t commit = MIN2(preferred_bytes, uncommitted);
 944   bool result = virtual_space()->expand_by(commit, false);
 945 
 946   assert(result, "Failed to commit memory");
 947 
 948   return result;
 949 }
 950 
 951 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 952   assert_lock_strong(SpaceManager::expand_lock());
 953   Metachunk* result = take_from_committed(chunk_word_size);
 954   if (result != NULL) {
 955     inc_container_count();
 956   }
 957   return result;
 958 }
 959 
 960 bool VirtualSpaceNode::initialize() {
 961 
 962   if (!_rs.is_reserved()) {
 963     return false;
 964   }
 965 
 966   // These are necessary restriction to make sure that the virtual space always
 967   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 968   // aligned only the middle alignment of the VirtualSpace is used.
 969   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 970   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 971 
 972   // ReservedSpaces marked as special will have the entire memory
 973   // pre-committed. Setting a committed size will make sure that
 974   // committed_size and actual_committed_size agrees.
 975   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 976 
 977   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 978                                             Metaspace::commit_alignment());
 979   if (result) {
 980     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 981         "Checking that the pre-committed memory was registered by the VirtualSpace");
 982 
 983     set_top((MetaWord*)virtual_space()->low());
 984     set_reserved(MemRegion((HeapWord*)_rs.base(),
 985                  (HeapWord*)(_rs.base() + _rs.size())));
 986 
 987     assert(reserved()->start() == (HeapWord*) _rs.base(),
 988       err_msg("Reserved start was not set properly " PTR_FORMAT
 989         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 990     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 991       err_msg("Reserved size was not set properly " SIZE_FORMAT
 992         " != " SIZE_FORMAT, reserved()->word_size(),
 993         _rs.size() / BytesPerWord));
 994   }
 995 
 996   return result;
 997 }
 998 
 999 void VirtualSpaceNode::print_on(outputStream* st) const {
1000   size_t used = used_words_in_vs();
1001   size_t capacity = capacity_words_in_vs();
1002   VirtualSpace* vs = virtual_space();
1003   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
1004            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1005            PTR_FORMAT ", " PTR_FORMAT ")",
1006            vs, capacity / K,
1007            capacity == 0 ? 0 : used * 100 / capacity,
1008            bottom(), top(), end(),
1009            vs->high_boundary());
1010 }
1011 
1012 #ifdef ASSERT
1013 void VirtualSpaceNode::mangle() {
1014   size_t word_size = capacity_words_in_vs();
1015   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1016 }
1017 #endif // ASSERT
1018 
1019 // VirtualSpaceList methods
1020 // Space allocated from the VirtualSpace
1021 
1022 VirtualSpaceList::~VirtualSpaceList() {
1023   VirtualSpaceListIterator iter(virtual_space_list());
1024   while (iter.repeat()) {
1025     VirtualSpaceNode* vsl = iter.get_next();
1026     delete vsl;
1027   }
1028 }
1029 
1030 void VirtualSpaceList::inc_reserved_words(size_t v) {
1031   assert_lock_strong(SpaceManager::expand_lock());
1032   _reserved_words = _reserved_words + v;
1033 }
1034 void VirtualSpaceList::dec_reserved_words(size_t v) {
1035   assert_lock_strong(SpaceManager::expand_lock());
1036   _reserved_words = _reserved_words - v;
1037 }
1038 
1039 #define assert_committed_below_limit()                             \
1040   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1041       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1042               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1043           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1044 
1045 void VirtualSpaceList::inc_committed_words(size_t v) {
1046   assert_lock_strong(SpaceManager::expand_lock());
1047   _committed_words = _committed_words + v;
1048 
1049   assert_committed_below_limit();
1050 }
1051 void VirtualSpaceList::dec_committed_words(size_t v) {
1052   assert_lock_strong(SpaceManager::expand_lock());
1053   _committed_words = _committed_words - v;
1054 
1055   assert_committed_below_limit();
1056 }
1057 
1058 void VirtualSpaceList::inc_virtual_space_count() {
1059   assert_lock_strong(SpaceManager::expand_lock());
1060   _virtual_space_count++;
1061 }
1062 void VirtualSpaceList::dec_virtual_space_count() {
1063   assert_lock_strong(SpaceManager::expand_lock());
1064   _virtual_space_count--;
1065 }
1066 
1067 void ChunkManager::remove_chunk(Metachunk* chunk) {
1068   size_t word_size = chunk->word_size();
1069   ChunkIndex index = list_index(word_size);
1070   if (index != HumongousIndex) {
1071     free_chunks(index)->remove_chunk(chunk);
1072   } else {
1073     humongous_dictionary()->remove_chunk(chunk);
1074   }
1075 
1076   // Chunk is being removed from the chunks free list.
1077   dec_free_chunks_total(chunk->word_size());
1078 }
1079 
1080 // Walk the list of VirtualSpaceNodes and delete
1081 // nodes with a 0 container_count.  Remove Metachunks in
1082 // the node from their respective freelists.
1083 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1084   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1085   assert_lock_strong(SpaceManager::expand_lock());
1086   // Don't use a VirtualSpaceListIterator because this
1087   // list is being changed and a straightforward use of an iterator is not safe.
1088   VirtualSpaceNode* purged_vsl = NULL;
1089   VirtualSpaceNode* prev_vsl = virtual_space_list();
1090   VirtualSpaceNode* next_vsl = prev_vsl;
1091   while (next_vsl != NULL) {
1092     VirtualSpaceNode* vsl = next_vsl;
1093     next_vsl = vsl->next();
1094     // Don't free the current virtual space since it will likely
1095     // be needed soon.
1096     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1097       // Unlink it from the list
1098       if (prev_vsl == vsl) {
1099         // This is the case of the current node being the first node.
1100         assert(vsl == virtual_space_list(), "Expected to be the first node");
1101         set_virtual_space_list(vsl->next());
1102       } else {
1103         prev_vsl->set_next(vsl->next());
1104       }
1105 
1106       vsl->purge(chunk_manager);
1107       dec_reserved_words(vsl->reserved_words());
1108       dec_committed_words(vsl->committed_words());
1109       dec_virtual_space_count();
1110       purged_vsl = vsl;
1111       delete vsl;
1112     } else {
1113       prev_vsl = vsl;
1114     }
1115   }
1116 #ifdef ASSERT
1117   if (purged_vsl != NULL) {
1118     // List should be stable enough to use an iterator here.
1119     VirtualSpaceListIterator iter(virtual_space_list());
1120     while (iter.repeat()) {
1121       VirtualSpaceNode* vsl = iter.get_next();
1122       assert(vsl != purged_vsl, "Purge of vsl failed");
1123     }
1124   }
1125 #endif
1126 }
1127 
1128 
1129 // This function looks at the mmap regions in the metaspace without locking.
1130 // The chunks are added with store ordering and not deleted except for at
1131 // unloading time during a safepoint.
1132 bool VirtualSpaceList::contains(const void* ptr) {
1133   // List should be stable enough to use an iterator here because removing virtual
1134   // space nodes is only allowed at a safepoint.
1135   VirtualSpaceListIterator iter(virtual_space_list());
1136   while (iter.repeat()) {
1137     VirtualSpaceNode* vsn = iter.get_next();
1138     if (vsn->contains(ptr)) {
1139       return true;
1140     }
1141   }
1142   return false;
1143 }
1144 
1145 void VirtualSpaceList::retire_current_virtual_space() {
1146   assert_lock_strong(SpaceManager::expand_lock());
1147 
1148   VirtualSpaceNode* vsn = current_virtual_space();
1149 
1150   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1151                                   Metaspace::chunk_manager_metadata();
1152 
1153   vsn->retire(cm);
1154 }
1155 
1156 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1157   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1158     ChunkIndex index = (ChunkIndex)i;
1159     size_t chunk_size = chunk_manager->free_chunks(index)->size();
1160 
1161     while (free_words_in_vs() >= chunk_size) {
1162       DEBUG_ONLY(verify_container_count();)
1163       Metachunk* chunk = get_chunk_vs(chunk_size);
1164       assert(chunk != NULL, "allocation should have been successful");
1165 
1166       chunk_manager->return_chunks(index, chunk);
1167       chunk_manager->inc_free_chunks_total(chunk_size);
1168       DEBUG_ONLY(verify_container_count();)
1169     }
1170   }
1171   assert(free_words_in_vs() == 0, "should be empty now");
1172 }
1173 
1174 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1175                                    _is_class(false),
1176                                    _virtual_space_list(NULL),
1177                                    _current_virtual_space(NULL),
1178                                    _reserved_words(0),
1179                                    _committed_words(0),
1180                                    _virtual_space_count(0) {
1181   MutexLockerEx cl(SpaceManager::expand_lock(),
1182                    Mutex::_no_safepoint_check_flag);
1183   create_new_virtual_space(word_size);
1184 }
1185 
1186 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1187                                    _is_class(true),
1188                                    _virtual_space_list(NULL),
1189                                    _current_virtual_space(NULL),
1190                                    _reserved_words(0),
1191                                    _committed_words(0),
1192                                    _virtual_space_count(0) {
1193   MutexLockerEx cl(SpaceManager::expand_lock(),
1194                    Mutex::_no_safepoint_check_flag);
1195   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1196   bool succeeded = class_entry->initialize();
1197   if (succeeded) {
1198     link_vs(class_entry);
1199   }
1200 }
1201 
1202 size_t VirtualSpaceList::free_bytes() {
1203   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1204 }
1205 
1206 // Allocate another meta virtual space and add it to the list.
1207 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1208   assert_lock_strong(SpaceManager::expand_lock());
1209 
1210   if (is_class()) {
1211     assert(false, "We currently don't support more than one VirtualSpace for"
1212                   " the compressed class space. The initialization of the"
1213                   " CCS uses another code path and should not hit this path.");
1214     return false;
1215   }
1216 
1217   if (vs_word_size == 0) {
1218     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1219     return false;
1220   }
1221 
1222   // Reserve the space
1223   size_t vs_byte_size = vs_word_size * BytesPerWord;
1224   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1225 
1226   // Allocate the meta virtual space and initialize it.
1227   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1228   if (!new_entry->initialize()) {
1229     delete new_entry;
1230     return false;
1231   } else {
1232     assert(new_entry->reserved_words() == vs_word_size,
1233         "Reserved memory size differs from requested memory size");
1234     // ensure lock-free iteration sees fully initialized node
1235     OrderAccess::storestore();
1236     link_vs(new_entry);
1237     return true;
1238   }
1239 }
1240 
1241 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1242   if (virtual_space_list() == NULL) {
1243       set_virtual_space_list(new_entry);
1244   } else {
1245     current_virtual_space()->set_next(new_entry);
1246   }
1247   set_current_virtual_space(new_entry);
1248   inc_reserved_words(new_entry->reserved_words());
1249   inc_committed_words(new_entry->committed_words());
1250   inc_virtual_space_count();
1251 #ifdef ASSERT
1252   new_entry->mangle();
1253 #endif
1254   if (TraceMetavirtualspaceAllocation && Verbose) {
1255     VirtualSpaceNode* vsl = current_virtual_space();
1256     vsl->print_on(gclog_or_tty);
1257   }
1258 }
1259 
1260 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1261                                       size_t min_words,
1262                                       size_t preferred_words) {
1263   size_t before = node->committed_words();
1264 
1265   bool result = node->expand_by(min_words, preferred_words);
1266 
1267   size_t after = node->committed_words();
1268 
1269   // after and before can be the same if the memory was pre-committed.
1270   assert(after >= before, "Inconsistency");
1271   inc_committed_words(after - before);
1272 
1273   return result;
1274 }
1275 
1276 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1277   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1278   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1279   assert(min_words <= preferred_words, "Invalid arguments");
1280 
1281   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1282     return  false;
1283   }
1284 
1285   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1286   if (allowed_expansion_words < min_words) {
1287     return false;
1288   }
1289 
1290   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1291 
1292   // Commit more memory from the the current virtual space.
1293   bool vs_expanded = expand_node_by(current_virtual_space(),
1294                                     min_words,
1295                                     max_expansion_words);
1296   if (vs_expanded) {
1297     return true;
1298   }
1299   retire_current_virtual_space();
1300 
1301   // Get another virtual space.
1302   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1303   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1304 
1305   if (create_new_virtual_space(grow_vs_words)) {
1306     if (current_virtual_space()->is_pre_committed()) {
1307       // The memory was pre-committed, so we are done here.
1308       assert(min_words <= current_virtual_space()->committed_words(),
1309           "The new VirtualSpace was pre-committed, so it"
1310           "should be large enough to fit the alloc request.");
1311       return true;
1312     }
1313 
1314     return expand_node_by(current_virtual_space(),
1315                           min_words,
1316                           max_expansion_words);
1317   }
1318 
1319   return false;
1320 }
1321 
1322 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1323                                            size_t grow_chunks_by_words,
1324                                            size_t medium_chunk_bunch) {
1325 
1326   // Allocate a chunk out of the current virtual space.
1327   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1328 
1329   if (next != NULL) {
1330     return next;
1331   }
1332 
1333   // The expand amount is currently only determined by the requested sizes
1334   // and not how much committed memory is left in the current virtual space.
1335 
1336   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1337   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1338   if (min_word_size >= preferred_word_size) {
1339     // Can happen when humongous chunks are allocated.
1340     preferred_word_size = min_word_size;
1341   }
1342 
1343   bool expanded = expand_by(min_word_size, preferred_word_size);
1344   if (expanded) {
1345     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1346     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1347   }
1348 
1349    return next;
1350 }
1351 
1352 void VirtualSpaceList::print_on(outputStream* st) const {
1353   if (TraceMetadataChunkAllocation && Verbose) {
1354     VirtualSpaceListIterator iter(virtual_space_list());
1355     while (iter.repeat()) {
1356       VirtualSpaceNode* node = iter.get_next();
1357       node->print_on(st);
1358     }
1359   }
1360 }
1361 
1362 // MetaspaceGC methods
1363 
1364 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1365 // Within the VM operation after the GC the attempt to allocate the metadata
1366 // should succeed.  If the GC did not free enough space for the metaspace
1367 // allocation, the HWM is increased so that another virtualspace will be
1368 // allocated for the metadata.  With perm gen the increase in the perm
1369 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1370 // metaspace policy uses those as the small and large steps for the HWM.
1371 //
1372 // After the GC the compute_new_size() for MetaspaceGC is called to
1373 // resize the capacity of the metaspaces.  The current implementation
1374 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1375 // to resize the Java heap by some GC's.  New flags can be implemented
1376 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1377 // free space is desirable in the metaspace capacity to decide how much
1378 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1379 // free space is desirable in the metaspace capacity before decreasing
1380 // the HWM.
1381 
1382 // Calculate the amount to increase the high water mark (HWM).
1383 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1384 // another expansion is not requested too soon.  If that is not
1385 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1386 // If that is still not enough, expand by the size of the allocation
1387 // plus some.
1388 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1389   size_t min_delta = MinMetaspaceExpansion;
1390   size_t max_delta = MaxMetaspaceExpansion;
1391   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1392 
1393   if (delta <= min_delta) {
1394     delta = min_delta;
1395   } else if (delta <= max_delta) {
1396     // Don't want to hit the high water mark on the next
1397     // allocation so make the delta greater than just enough
1398     // for this allocation.
1399     delta = max_delta;
1400   } else {
1401     // This allocation is large but the next ones are probably not
1402     // so increase by the minimum.
1403     delta = delta + min_delta;
1404   }
1405 
1406   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1407 
1408   return delta;
1409 }
1410 
1411 size_t MetaspaceGC::capacity_until_GC() {
1412   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1413   assert(value >= MetaspaceSize, "Not initialized properly?");
1414   return value;
1415 }
1416 
1417 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1418   assert_is_size_aligned(v, Metaspace::commit_alignment());
1419 
1420   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1421   size_t new_value = capacity_until_GC + v;
1422 
1423   if (new_value < capacity_until_GC) {
1424     // The addition wrapped around, set new_value to aligned max value.
1425     new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1426   }
1427 
1428   intptr_t expected = (intptr_t) capacity_until_GC;
1429   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1430 
1431   if (expected != actual) {
1432     return false;
1433   }
1434 
1435   if (new_cap_until_GC != NULL) {
1436     *new_cap_until_GC = new_value;
1437   }
1438   if (old_cap_until_GC != NULL) {
1439     *old_cap_until_GC = capacity_until_GC;
1440   }
1441   return true;
1442 }
1443 
1444 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1445   assert_is_size_aligned(v, Metaspace::commit_alignment());
1446 
1447   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1448 }
1449 
1450 void MetaspaceGC::initialize() {
1451   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1452   // we can't do a GC during initialization.
1453   _capacity_until_GC = MaxMetaspaceSize;
1454 }
1455 
1456 void MetaspaceGC::post_initialize() {
1457   // Reset the high-water mark once the VM initialization is done.
1458   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1459 }
1460 
1461 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1462   // Check if the compressed class space is full.
1463   if (is_class && Metaspace::using_class_space()) {
1464     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1465     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1466       return false;
1467     }
1468   }
1469 
1470   // Check if the user has imposed a limit on the metaspace memory.
1471   size_t committed_bytes = MetaspaceAux::committed_bytes();
1472   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1473     return false;
1474   }
1475 
1476   return true;
1477 }
1478 
1479 size_t MetaspaceGC::allowed_expansion() {
1480   size_t committed_bytes = MetaspaceAux::committed_bytes();
1481   size_t capacity_until_gc = capacity_until_GC();
1482 
1483   assert(capacity_until_gc >= committed_bytes,
1484         err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1485                 capacity_until_gc, committed_bytes));
1486 
1487   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1488   size_t left_until_GC = capacity_until_gc - committed_bytes;
1489   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1490 
1491   return left_to_commit / BytesPerWord;
1492 }
1493 
1494 void MetaspaceGC::compute_new_size() {
1495   assert(_shrink_factor <= 100, "invalid shrink factor");
1496   uint current_shrink_factor = _shrink_factor;
1497   _shrink_factor = 0;
1498 
1499   // Using committed_bytes() for used_after_gc is an overestimation, since the
1500   // chunk free lists are included in committed_bytes() and the memory in an
1501   // un-fragmented chunk free list is available for future allocations.
1502   // However, if the chunk free lists becomes fragmented, then the memory may
1503   // not be available for future allocations and the memory is therefore "in use".
1504   // Including the chunk free lists in the definition of "in use" is therefore
1505   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1506   // shrink below committed_bytes() and this has caused serious bugs in the past.
1507   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1508   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1509 
1510   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1511   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1512 
1513   const double min_tmp = used_after_gc / maximum_used_percentage;
1514   size_t minimum_desired_capacity =
1515     (size_t)MIN2(min_tmp, double(max_uintx));
1516   // Don't shrink less than the initial generation size
1517   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1518                                   MetaspaceSize);
1519 
1520   if (PrintGCDetails && Verbose) {
1521     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1522     gclog_or_tty->print_cr("  "
1523                   "  minimum_free_percentage: %6.2f"
1524                   "  maximum_used_percentage: %6.2f",
1525                   minimum_free_percentage,
1526                   maximum_used_percentage);
1527     gclog_or_tty->print_cr("  "
1528                   "   used_after_gc       : %6.1fKB",
1529                   used_after_gc / (double) K);
1530   }
1531 
1532 
1533   size_t shrink_bytes = 0;
1534   if (capacity_until_GC < minimum_desired_capacity) {
1535     // If we have less capacity below the metaspace HWM, then
1536     // increment the HWM.
1537     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1538     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1539     // Don't expand unless it's significant
1540     if (expand_bytes >= MinMetaspaceExpansion) {
1541       size_t new_capacity_until_GC = 0;
1542       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1543       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1544 
1545       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1546                                                new_capacity_until_GC,
1547                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1548       if (PrintGCDetails && Verbose) {
1549         gclog_or_tty->print_cr("    expanding:"
1550                       "  minimum_desired_capacity: %6.1fKB"
1551                       "  expand_bytes: %6.1fKB"
1552                       "  MinMetaspaceExpansion: %6.1fKB"
1553                       "  new metaspace HWM:  %6.1fKB",
1554                       minimum_desired_capacity / (double) K,
1555                       expand_bytes / (double) K,
1556                       MinMetaspaceExpansion / (double) K,
1557                       new_capacity_until_GC / (double) K);
1558       }
1559     }
1560     return;
1561   }
1562 
1563   // No expansion, now see if we want to shrink
1564   // We would never want to shrink more than this
1565   assert(capacity_until_GC >= minimum_desired_capacity,
1566          err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
1567                  capacity_until_GC, minimum_desired_capacity));
1568   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1569 
1570   // Should shrinking be considered?
1571   if (MaxMetaspaceFreeRatio < 100) {
1572     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1573     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1574     const double max_tmp = used_after_gc / minimum_used_percentage;
1575     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1576     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1577                                     MetaspaceSize);
1578     if (PrintGCDetails && Verbose) {
1579       gclog_or_tty->print_cr("  "
1580                              "  maximum_free_percentage: %6.2f"
1581                              "  minimum_used_percentage: %6.2f",
1582                              maximum_free_percentage,
1583                              minimum_used_percentage);
1584       gclog_or_tty->print_cr("  "
1585                              "  minimum_desired_capacity: %6.1fKB"
1586                              "  maximum_desired_capacity: %6.1fKB",
1587                              minimum_desired_capacity / (double) K,
1588                              maximum_desired_capacity / (double) K);
1589     }
1590 
1591     assert(minimum_desired_capacity <= maximum_desired_capacity,
1592            "sanity check");
1593 
1594     if (capacity_until_GC > maximum_desired_capacity) {
1595       // Capacity too large, compute shrinking size
1596       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1597       // We don't want shrink all the way back to initSize if people call
1598       // System.gc(), because some programs do that between "phases" and then
1599       // we'd just have to grow the heap up again for the next phase.  So we
1600       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1601       // on the third call, and 100% by the fourth call.  But if we recompute
1602       // size without shrinking, it goes back to 0%.
1603       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1604 
1605       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1606 
1607       assert(shrink_bytes <= max_shrink_bytes,
1608         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1609           shrink_bytes, max_shrink_bytes));
1610       if (current_shrink_factor == 0) {
1611         _shrink_factor = 10;
1612       } else {
1613         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1614       }
1615       if (PrintGCDetails && Verbose) {
1616         gclog_or_tty->print_cr("  "
1617                       "  shrinking:"
1618                       "  initSize: %.1fK"
1619                       "  maximum_desired_capacity: %.1fK",
1620                       MetaspaceSize / (double) K,
1621                       maximum_desired_capacity / (double) K);
1622         gclog_or_tty->print_cr("  "
1623                       "  shrink_bytes: %.1fK"
1624                       "  current_shrink_factor: %d"
1625                       "  new shrink factor: %d"
1626                       "  MinMetaspaceExpansion: %.1fK",
1627                       shrink_bytes / (double) K,
1628                       current_shrink_factor,
1629                       _shrink_factor,
1630                       MinMetaspaceExpansion / (double) K);
1631       }
1632     }
1633   }
1634 
1635   // Don't shrink unless it's significant
1636   if (shrink_bytes >= MinMetaspaceExpansion &&
1637       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1638     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1639     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1640                                              new_capacity_until_GC,
1641                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1642   }
1643 }
1644 
1645 // Metadebug methods
1646 
1647 void Metadebug::init_allocation_fail_alot_count() {
1648   if (MetadataAllocationFailALot) {
1649     _allocation_fail_alot_count =
1650       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1651   }
1652 }
1653 
1654 #ifdef ASSERT
1655 bool Metadebug::test_metadata_failure() {
1656   if (MetadataAllocationFailALot &&
1657       Threads::is_vm_complete()) {
1658     if (_allocation_fail_alot_count > 0) {
1659       _allocation_fail_alot_count--;
1660     } else {
1661       if (TraceMetadataChunkAllocation && Verbose) {
1662         gclog_or_tty->print_cr("Metadata allocation failing for "
1663                                "MetadataAllocationFailALot");
1664       }
1665       init_allocation_fail_alot_count();
1666       return true;
1667     }
1668   }
1669   return false;
1670 }
1671 #endif
1672 
1673 // ChunkManager methods
1674 
1675 size_t ChunkManager::free_chunks_total_words() {
1676   return _free_chunks_total;
1677 }
1678 
1679 size_t ChunkManager::free_chunks_total_bytes() {
1680   return free_chunks_total_words() * BytesPerWord;
1681 }
1682 
1683 size_t ChunkManager::free_chunks_count() {
1684 #ifdef ASSERT
1685   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1686     MutexLockerEx cl(SpaceManager::expand_lock(),
1687                      Mutex::_no_safepoint_check_flag);
1688     // This lock is only needed in debug because the verification
1689     // of the _free_chunks_totals walks the list of free chunks
1690     slow_locked_verify_free_chunks_count();
1691   }
1692 #endif
1693   return _free_chunks_count;
1694 }
1695 
1696 void ChunkManager::locked_verify_free_chunks_total() {
1697   assert_lock_strong(SpaceManager::expand_lock());
1698   assert(sum_free_chunks() == _free_chunks_total,
1699     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1700            " same as sum " SIZE_FORMAT, _free_chunks_total,
1701            sum_free_chunks()));
1702 }
1703 
1704 void ChunkManager::verify_free_chunks_total() {
1705   MutexLockerEx cl(SpaceManager::expand_lock(),
1706                      Mutex::_no_safepoint_check_flag);
1707   locked_verify_free_chunks_total();
1708 }
1709 
1710 void ChunkManager::locked_verify_free_chunks_count() {
1711   assert_lock_strong(SpaceManager::expand_lock());
1712   assert(sum_free_chunks_count() == _free_chunks_count,
1713     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1714            " same as sum " SIZE_FORMAT, _free_chunks_count,
1715            sum_free_chunks_count()));
1716 }
1717 
1718 void ChunkManager::verify_free_chunks_count() {
1719 #ifdef ASSERT
1720   MutexLockerEx cl(SpaceManager::expand_lock(),
1721                      Mutex::_no_safepoint_check_flag);
1722   locked_verify_free_chunks_count();
1723 #endif
1724 }
1725 
1726 void ChunkManager::verify() {
1727   MutexLockerEx cl(SpaceManager::expand_lock(),
1728                      Mutex::_no_safepoint_check_flag);
1729   locked_verify();
1730 }
1731 
1732 void ChunkManager::locked_verify() {
1733   locked_verify_free_chunks_count();
1734   locked_verify_free_chunks_total();
1735 }
1736 
1737 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1738   assert_lock_strong(SpaceManager::expand_lock());
1739   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1740                 _free_chunks_total, _free_chunks_count);
1741 }
1742 
1743 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1744   assert_lock_strong(SpaceManager::expand_lock());
1745   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1746                 sum_free_chunks(), sum_free_chunks_count());
1747 }
1748 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1749   return &_free_chunks[index];
1750 }
1751 
1752 // These methods that sum the free chunk lists are used in printing
1753 // methods that are used in product builds.
1754 size_t ChunkManager::sum_free_chunks() {
1755   assert_lock_strong(SpaceManager::expand_lock());
1756   size_t result = 0;
1757   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1758     ChunkList* list = free_chunks(i);
1759 
1760     if (list == NULL) {
1761       continue;
1762     }
1763 
1764     result = result + list->count() * list->size();
1765   }
1766   result = result + humongous_dictionary()->total_size();
1767   return result;
1768 }
1769 
1770 size_t ChunkManager::sum_free_chunks_count() {
1771   assert_lock_strong(SpaceManager::expand_lock());
1772   size_t count = 0;
1773   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1774     ChunkList* list = free_chunks(i);
1775     if (list == NULL) {
1776       continue;
1777     }
1778     count = count + list->count();
1779   }
1780   count = count + humongous_dictionary()->total_free_blocks();
1781   return count;
1782 }
1783 
1784 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1785   ChunkIndex index = list_index(word_size);
1786   assert(index < HumongousIndex, "No humongous list");
1787   return free_chunks(index);
1788 }
1789 
1790 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1791   assert_lock_strong(SpaceManager::expand_lock());
1792 
1793   slow_locked_verify();
1794 
1795   Metachunk* chunk = NULL;
1796   if (list_index(word_size) != HumongousIndex) {
1797     ChunkList* free_list = find_free_chunks_list(word_size);
1798     assert(free_list != NULL, "Sanity check");
1799 
1800     chunk = free_list->head();
1801 
1802     if (chunk == NULL) {
1803       return NULL;
1804     }
1805 
1806     // Remove the chunk as the head of the list.
1807     free_list->remove_chunk(chunk);
1808 
1809     if (TraceMetadataChunkAllocation && Verbose) {
1810       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1811                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1812                              free_list, chunk, chunk->word_size());
1813     }
1814   } else {
1815     chunk = humongous_dictionary()->get_chunk(
1816       word_size,
1817       FreeBlockDictionary<Metachunk>::atLeast);
1818 
1819     if (chunk == NULL) {
1820       return NULL;
1821     }
1822 
1823     if (TraceMetadataHumongousAllocation) {
1824       size_t waste = chunk->word_size() - word_size;
1825       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1826                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1827                              " waste " SIZE_FORMAT,
1828                              chunk->word_size(), word_size, waste);
1829     }
1830   }
1831 
1832   // Chunk is being removed from the chunks free list.
1833   dec_free_chunks_total(chunk->word_size());
1834 
1835   // Remove it from the links to this freelist
1836   chunk->set_next(NULL);
1837   chunk->set_prev(NULL);
1838 #ifdef ASSERT
1839   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1840   // work.
1841   chunk->set_is_tagged_free(false);
1842 #endif
1843   chunk->container()->inc_container_count();
1844 
1845   slow_locked_verify();
1846   return chunk;
1847 }
1848 
1849 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1850   assert_lock_strong(SpaceManager::expand_lock());
1851   slow_locked_verify();
1852 
1853   // Take from the beginning of the list
1854   Metachunk* chunk = free_chunks_get(word_size);
1855   if (chunk == NULL) {
1856     return NULL;
1857   }
1858 
1859   assert((word_size <= chunk->word_size()) ||
1860          list_index(chunk->word_size() == HumongousIndex),
1861          "Non-humongous variable sized chunk");
1862   if (TraceMetadataChunkAllocation) {
1863     size_t list_count;
1864     if (list_index(word_size) < HumongousIndex) {
1865       ChunkList* list = find_free_chunks_list(word_size);
1866       list_count = list->count();
1867     } else {
1868       list_count = humongous_dictionary()->total_count();
1869     }
1870     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1871                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1872                         this, chunk, chunk->word_size(), list_count);
1873     locked_print_free_chunks(gclog_or_tty);
1874   }
1875 
1876   return chunk;
1877 }
1878 
1879 void ChunkManager::print_on(outputStream* out) const {
1880   if (PrintFLSStatistics != 0) {
1881     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1882   }
1883 }
1884 
1885 // SpaceManager methods
1886 
1887 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1888                                            size_t* chunk_word_size,
1889                                            size_t* class_chunk_word_size) {
1890   switch (type) {
1891   case Metaspace::BootMetaspaceType:
1892     *chunk_word_size = Metaspace::first_chunk_word_size();
1893     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1894     break;
1895   case Metaspace::ROMetaspaceType:
1896     *chunk_word_size = SharedReadOnlySize / wordSize;
1897     *class_chunk_word_size = ClassSpecializedChunk;
1898     break;
1899   case Metaspace::ReadWriteMetaspaceType:
1900     *chunk_word_size = SharedReadWriteSize / wordSize;
1901     *class_chunk_word_size = ClassSpecializedChunk;
1902     break;
1903   case Metaspace::AnonymousMetaspaceType:
1904   case Metaspace::ReflectionMetaspaceType:
1905     *chunk_word_size = SpecializedChunk;
1906     *class_chunk_word_size = ClassSpecializedChunk;
1907     break;
1908   default:
1909     *chunk_word_size = SmallChunk;
1910     *class_chunk_word_size = ClassSmallChunk;
1911     break;
1912   }
1913   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1914     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1915             " class " SIZE_FORMAT,
1916             *chunk_word_size, *class_chunk_word_size));
1917 }
1918 
1919 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1920   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1921   size_t free = 0;
1922   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1923     Metachunk* chunk = chunks_in_use(i);
1924     while (chunk != NULL) {
1925       free += chunk->free_word_size();
1926       chunk = chunk->next();
1927     }
1928   }
1929   return free;
1930 }
1931 
1932 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1933   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1934   size_t result = 0;
1935   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1936    result += sum_waste_in_chunks_in_use(i);
1937   }
1938 
1939   return result;
1940 }
1941 
1942 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1943   size_t result = 0;
1944   Metachunk* chunk = chunks_in_use(index);
1945   // Count the free space in all the chunk but not the
1946   // current chunk from which allocations are still being done.
1947   while (chunk != NULL) {
1948     if (chunk != current_chunk()) {
1949       result += chunk->free_word_size();
1950     }
1951     chunk = chunk->next();
1952   }
1953   return result;
1954 }
1955 
1956 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1957   // For CMS use "allocated_chunks_words()" which does not need the
1958   // Metaspace lock.  For the other collectors sum over the
1959   // lists.  Use both methods as a check that "allocated_chunks_words()"
1960   // is correct.  That is, sum_capacity_in_chunks() is too expensive
1961   // to use in the product and allocated_chunks_words() should be used
1962   // but allow for  checking that allocated_chunks_words() returns the same
1963   // value as sum_capacity_in_chunks_in_use() which is the definitive
1964   // answer.
1965   if (UseConcMarkSweepGC) {
1966     return allocated_chunks_words();
1967   } else {
1968     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1969     size_t sum = 0;
1970     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1971       Metachunk* chunk = chunks_in_use(i);
1972       while (chunk != NULL) {
1973         sum += chunk->word_size();
1974         chunk = chunk->next();
1975       }
1976     }
1977   return sum;
1978   }
1979 }
1980 
1981 size_t SpaceManager::sum_count_in_chunks_in_use() {
1982   size_t count = 0;
1983   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1984     count = count + sum_count_in_chunks_in_use(i);
1985   }
1986 
1987   return count;
1988 }
1989 
1990 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1991   size_t count = 0;
1992   Metachunk* chunk = chunks_in_use(i);
1993   while (chunk != NULL) {
1994     count++;
1995     chunk = chunk->next();
1996   }
1997   return count;
1998 }
1999 
2000 
2001 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2002   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2003   size_t used = 0;
2004   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2005     Metachunk* chunk = chunks_in_use(i);
2006     while (chunk != NULL) {
2007       used += chunk->used_word_size();
2008       chunk = chunk->next();
2009     }
2010   }
2011   return used;
2012 }
2013 
2014 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2015 
2016   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2017     Metachunk* chunk = chunks_in_use(i);
2018     st->print("SpaceManager: %s " PTR_FORMAT,
2019                  chunk_size_name(i), chunk);
2020     if (chunk != NULL) {
2021       st->print_cr(" free " SIZE_FORMAT,
2022                    chunk->free_word_size());
2023     } else {
2024       st->cr();
2025     }
2026   }
2027 
2028   chunk_manager()->locked_print_free_chunks(st);
2029   chunk_manager()->locked_print_sum_free_chunks(st);
2030 }
2031 
2032 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2033 
2034   // Decide between a small chunk and a medium chunk.  Up to
2035   // _small_chunk_limit small chunks can be allocated but
2036   // once a medium chunk has been allocated, no more small
2037   // chunks will be allocated.
2038   size_t chunk_word_size;
2039   if (chunks_in_use(MediumIndex) == NULL &&
2040       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2041     chunk_word_size = (size_t) small_chunk_size();
2042     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2043       chunk_word_size = medium_chunk_size();
2044     }
2045   } else {
2046     chunk_word_size = medium_chunk_size();
2047   }
2048 
2049   // Might still need a humongous chunk.  Enforce
2050   // humongous allocations sizes to be aligned up to
2051   // the smallest chunk size.
2052   size_t if_humongous_sized_chunk =
2053     align_size_up(word_size + Metachunk::overhead(),
2054                   smallest_chunk_size());
2055   chunk_word_size =
2056     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2057 
2058   assert(!SpaceManager::is_humongous(word_size) ||
2059          chunk_word_size == if_humongous_sized_chunk,
2060          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2061                  " chunk_word_size " SIZE_FORMAT,
2062                  word_size, chunk_word_size));
2063   if (TraceMetadataHumongousAllocation &&
2064       SpaceManager::is_humongous(word_size)) {
2065     gclog_or_tty->print_cr("Metadata humongous allocation:");
2066     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2067     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2068                            chunk_word_size);
2069     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2070                            Metachunk::overhead());
2071   }
2072   return chunk_word_size;
2073 }
2074 
2075 void SpaceManager::track_metaspace_memory_usage() {
2076   if (is_init_completed()) {
2077     if (is_class()) {
2078       MemoryService::track_compressed_class_memory_usage();
2079     }
2080     MemoryService::track_metaspace_memory_usage();
2081   }
2082 }
2083 
2084 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2085   assert(vs_list()->current_virtual_space() != NULL,
2086          "Should have been set");
2087   assert(current_chunk() == NULL ||
2088          current_chunk()->allocate(word_size) == NULL,
2089          "Don't need to expand");
2090   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2091 
2092   if (TraceMetadataChunkAllocation && Verbose) {
2093     size_t words_left = 0;
2094     size_t words_used = 0;
2095     if (current_chunk() != NULL) {
2096       words_left = current_chunk()->free_word_size();
2097       words_used = current_chunk()->used_word_size();
2098     }
2099     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2100                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2101                            " words left",
2102                             word_size, words_used, words_left);
2103   }
2104 
2105   // Get another chunk out of the virtual space
2106   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2107   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2108 
2109   MetaWord* mem = NULL;
2110 
2111   // If a chunk was available, add it to the in-use chunk list
2112   // and do an allocation from it.
2113   if (next != NULL) {
2114     // Add to this manager's list of chunks in use.
2115     add_chunk(next, false);
2116     mem = next->allocate(word_size);
2117   }
2118 
2119   // Track metaspace memory usage statistic.
2120   track_metaspace_memory_usage();
2121 
2122   return mem;
2123 }
2124 
2125 void SpaceManager::print_on(outputStream* st) const {
2126 
2127   for (ChunkIndex i = ZeroIndex;
2128        i < NumberOfInUseLists ;
2129        i = next_chunk_index(i) ) {
2130     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2131                  chunks_in_use(i),
2132                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2133   }
2134   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2135                " Humongous " SIZE_FORMAT,
2136                sum_waste_in_chunks_in_use(SmallIndex),
2137                sum_waste_in_chunks_in_use(MediumIndex),
2138                sum_waste_in_chunks_in_use(HumongousIndex));
2139   // block free lists
2140   if (block_freelists() != NULL) {
2141     st->print_cr("total in block free lists " SIZE_FORMAT,
2142       block_freelists()->total_size());
2143   }
2144 }
2145 
2146 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2147                            Mutex* lock) :
2148   _mdtype(mdtype),
2149   _allocated_blocks_words(0),
2150   _allocated_chunks_words(0),
2151   _allocated_chunks_count(0),
2152   _lock(lock)
2153 {
2154   initialize();
2155 }
2156 
2157 void SpaceManager::inc_size_metrics(size_t words) {
2158   assert_lock_strong(SpaceManager::expand_lock());
2159   // Total of allocated Metachunks and allocated Metachunks count
2160   // for each SpaceManager
2161   _allocated_chunks_words = _allocated_chunks_words + words;
2162   _allocated_chunks_count++;
2163   // Global total of capacity in allocated Metachunks
2164   MetaspaceAux::inc_capacity(mdtype(), words);
2165   // Global total of allocated Metablocks.
2166   // used_words_slow() includes the overhead in each
2167   // Metachunk so include it in the used when the
2168   // Metachunk is first added (so only added once per
2169   // Metachunk).
2170   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2171 }
2172 
2173 void SpaceManager::inc_used_metrics(size_t words) {
2174   // Add to the per SpaceManager total
2175   Atomic::add_ptr(words, &_allocated_blocks_words);
2176   // Add to the global total
2177   MetaspaceAux::inc_used(mdtype(), words);
2178 }
2179 
2180 void SpaceManager::dec_total_from_size_metrics() {
2181   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2182   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2183   // Also deduct the overhead per Metachunk
2184   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2185 }
2186 
2187 void SpaceManager::initialize() {
2188   Metadebug::init_allocation_fail_alot_count();
2189   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2190     _chunks_in_use[i] = NULL;
2191   }
2192   _current_chunk = NULL;
2193   if (TraceMetadataChunkAllocation && Verbose) {
2194     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2195   }
2196 }
2197 
2198 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2199   if (chunks == NULL) {
2200     return;
2201   }
2202   ChunkList* list = free_chunks(index);
2203   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2204   assert_lock_strong(SpaceManager::expand_lock());
2205   Metachunk* cur = chunks;
2206 
2207   // This returns chunks one at a time.  If a new
2208   // class List can be created that is a base class
2209   // of FreeList then something like FreeList::prepend()
2210   // can be used in place of this loop
2211   while (cur != NULL) {
2212     assert(cur->container() != NULL, "Container should have been set");
2213     cur->container()->dec_container_count();
2214     // Capture the next link before it is changed
2215     // by the call to return_chunk_at_head();
2216     Metachunk* next = cur->next();
2217     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2218     list->return_chunk_at_head(cur);
2219     cur = next;
2220   }
2221 }
2222 
2223 SpaceManager::~SpaceManager() {
2224   // This call this->_lock which can't be done while holding expand_lock()
2225   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2226     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2227             " allocated_chunks_words() " SIZE_FORMAT,
2228             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2229 
2230   MutexLockerEx fcl(SpaceManager::expand_lock(),
2231                     Mutex::_no_safepoint_check_flag);
2232 
2233   chunk_manager()->slow_locked_verify();
2234 
2235   dec_total_from_size_metrics();
2236 
2237   if (TraceMetadataChunkAllocation && Verbose) {
2238     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2239     locked_print_chunks_in_use_on(gclog_or_tty);
2240   }
2241 
2242   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2243   // is during the freeing of a VirtualSpaceNodes.
2244 
2245   // Have to update before the chunks_in_use lists are emptied
2246   // below.
2247   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2248                                          sum_count_in_chunks_in_use());
2249 
2250   // Add all the chunks in use by this space manager
2251   // to the global list of free chunks.
2252 
2253   // Follow each list of chunks-in-use and add them to the
2254   // free lists.  Each list is NULL terminated.
2255 
2256   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2257     if (TraceMetadataChunkAllocation && Verbose) {
2258       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2259                              sum_count_in_chunks_in_use(i),
2260                              chunk_size_name(i));
2261     }
2262     Metachunk* chunks = chunks_in_use(i);
2263     chunk_manager()->return_chunks(i, chunks);
2264     set_chunks_in_use(i, NULL);
2265     if (TraceMetadataChunkAllocation && Verbose) {
2266       gclog_or_tty->print_cr("updated freelist count %d %s",
2267                              chunk_manager()->free_chunks(i)->count(),
2268                              chunk_size_name(i));
2269     }
2270     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2271   }
2272 
2273   // The medium chunk case may be optimized by passing the head and
2274   // tail of the medium chunk list to add_at_head().  The tail is often
2275   // the current chunk but there are probably exceptions.
2276 
2277   // Humongous chunks
2278   if (TraceMetadataChunkAllocation && Verbose) {
2279     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2280                             sum_count_in_chunks_in_use(HumongousIndex),
2281                             chunk_size_name(HumongousIndex));
2282     gclog_or_tty->print("Humongous chunk dictionary: ");
2283   }
2284   // Humongous chunks are never the current chunk.
2285   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2286 
2287   while (humongous_chunks != NULL) {
2288 #ifdef ASSERT
2289     humongous_chunks->set_is_tagged_free(true);
2290 #endif
2291     if (TraceMetadataChunkAllocation && Verbose) {
2292       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2293                           humongous_chunks,
2294                           humongous_chunks->word_size());
2295     }
2296     assert(humongous_chunks->word_size() == (size_t)
2297            align_size_up(humongous_chunks->word_size(),
2298                              smallest_chunk_size()),
2299            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2300                    " granularity %d",
2301                    humongous_chunks->word_size(), smallest_chunk_size()));
2302     Metachunk* next_humongous_chunks = humongous_chunks->next();
2303     humongous_chunks->container()->dec_container_count();
2304     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2305     humongous_chunks = next_humongous_chunks;
2306   }
2307   if (TraceMetadataChunkAllocation && Verbose) {
2308     gclog_or_tty->cr();
2309     gclog_or_tty->print_cr("updated dictionary count %d %s",
2310                      chunk_manager()->humongous_dictionary()->total_count(),
2311                      chunk_size_name(HumongousIndex));
2312   }
2313   chunk_manager()->slow_locked_verify();
2314 }
2315 
2316 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2317   switch (index) {
2318     case SpecializedIndex:
2319       return "Specialized";
2320     case SmallIndex:
2321       return "Small";
2322     case MediumIndex:
2323       return "Medium";
2324     case HumongousIndex:
2325       return "Humongous";
2326     default:
2327       return NULL;
2328   }
2329 }
2330 
2331 ChunkIndex ChunkManager::list_index(size_t size) {
2332   switch (size) {
2333     case SpecializedChunk:
2334       assert(SpecializedChunk == ClassSpecializedChunk,
2335              "Need branch for ClassSpecializedChunk");
2336       return SpecializedIndex;
2337     case SmallChunk:
2338     case ClassSmallChunk:
2339       return SmallIndex;
2340     case MediumChunk:
2341     case ClassMediumChunk:
2342       return MediumIndex;
2343     default:
2344       assert(size > MediumChunk || size > ClassMediumChunk,
2345              "Not a humongous chunk");
2346       return HumongousIndex;
2347   }
2348 }
2349 
2350 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2351   assert_lock_strong(_lock);
2352   size_t raw_word_size = get_raw_word_size(word_size);
2353   size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2354   assert(raw_word_size >= min_size,
2355          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2356   block_freelists()->return_block(p, raw_word_size);
2357 }
2358 
2359 // Adds a chunk to the list of chunks in use.
2360 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2361 
2362   assert(new_chunk != NULL, "Should not be NULL");
2363   assert(new_chunk->next() == NULL, "Should not be on a list");
2364 
2365   new_chunk->reset_empty();
2366 
2367   // Find the correct list and and set the current
2368   // chunk for that list.
2369   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2370 
2371   if (index != HumongousIndex) {
2372     retire_current_chunk();
2373     set_current_chunk(new_chunk);
2374     new_chunk->set_next(chunks_in_use(index));
2375     set_chunks_in_use(index, new_chunk);
2376   } else {
2377     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2378     // small, so small will be null.  Link this first chunk as the current
2379     // chunk.
2380     if (make_current) {
2381       // Set as the current chunk but otherwise treat as a humongous chunk.
2382       set_current_chunk(new_chunk);
2383     }
2384     // Link at head.  The _current_chunk only points to a humongous chunk for
2385     // the null class loader metaspace (class and data virtual space managers)
2386     // any humongous chunks so will not point to the tail
2387     // of the humongous chunks list.
2388     new_chunk->set_next(chunks_in_use(HumongousIndex));
2389     set_chunks_in_use(HumongousIndex, new_chunk);
2390 
2391     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2392   }
2393 
2394   // Add to the running sum of capacity
2395   inc_size_metrics(new_chunk->word_size());
2396 
2397   assert(new_chunk->is_empty(), "Not ready for reuse");
2398   if (TraceMetadataChunkAllocation && Verbose) {
2399     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2400                         sum_count_in_chunks_in_use());
2401     new_chunk->print_on(gclog_or_tty);
2402     chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2403   }
2404 }
2405 
2406 void SpaceManager::retire_current_chunk() {
2407   if (current_chunk() != NULL) {
2408     size_t remaining_words = current_chunk()->free_word_size();
2409     if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2410       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2411       inc_used_metrics(remaining_words);
2412     }
2413   }
2414 }
2415 
2416 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2417                                        size_t grow_chunks_by_words) {
2418   // Get a chunk from the chunk freelist
2419   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2420 
2421   if (next == NULL) {
2422     next = vs_list()->get_new_chunk(word_size,
2423                                     grow_chunks_by_words,
2424                                     medium_chunk_bunch());
2425   }
2426 
2427   if (TraceMetadataHumongousAllocation && next != NULL &&
2428       SpaceManager::is_humongous(next->word_size())) {
2429     gclog_or_tty->print_cr("  new humongous chunk word size "
2430                            PTR_FORMAT, next->word_size());
2431   }
2432 
2433   return next;
2434 }
2435 
2436 MetaWord* SpaceManager::allocate(size_t word_size) {
2437   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2438 
2439   size_t raw_word_size = get_raw_word_size(word_size);
2440   BlockFreelist* fl =  block_freelists();
2441   MetaWord* p = NULL;
2442   // Allocation from the dictionary is expensive in the sense that
2443   // the dictionary has to be searched for a size.  Don't allocate
2444   // from the dictionary until it starts to get fat.  Is this
2445   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2446   // for allocations.  Do some profiling.  JJJ
2447   if (fl->total_size() > allocation_from_dictionary_limit) {
2448     p = fl->get_block(raw_word_size);
2449   }
2450   if (p == NULL) {
2451     p = allocate_work(raw_word_size);
2452   }
2453 
2454   return p;
2455 }
2456 
2457 // Returns the address of spaced allocated for "word_size".
2458 // This methods does not know about blocks (Metablocks)
2459 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2460   assert_lock_strong(_lock);
2461 #ifdef ASSERT
2462   if (Metadebug::test_metadata_failure()) {
2463     return NULL;
2464   }
2465 #endif
2466   // Is there space in the current chunk?
2467   MetaWord* result = NULL;
2468 
2469   // For DumpSharedSpaces, only allocate out of the current chunk which is
2470   // never null because we gave it the size we wanted.   Caller reports out
2471   // of memory if this returns null.
2472   if (DumpSharedSpaces) {
2473     assert(current_chunk() != NULL, "should never happen");
2474     inc_used_metrics(word_size);
2475     return current_chunk()->allocate(word_size); // caller handles null result
2476   }
2477 
2478   if (current_chunk() != NULL) {
2479     result = current_chunk()->allocate(word_size);
2480   }
2481 
2482   if (result == NULL) {
2483     result = grow_and_allocate(word_size);
2484   }
2485 
2486   if (result != NULL) {
2487     inc_used_metrics(word_size);
2488     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2489            "Head of the list is being allocated");
2490   }
2491 
2492   return result;
2493 }
2494 
2495 void SpaceManager::verify() {
2496   // If there are blocks in the dictionary, then
2497   // verification of chunks does not work since
2498   // being in the dictionary alters a chunk.
2499   if (block_freelists()->total_size() == 0) {
2500     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2501       Metachunk* curr = chunks_in_use(i);
2502       while (curr != NULL) {
2503         curr->verify();
2504         verify_chunk_size(curr);
2505         curr = curr->next();
2506       }
2507     }
2508   }
2509 }
2510 
2511 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2512   assert(is_humongous(chunk->word_size()) ||
2513          chunk->word_size() == medium_chunk_size() ||
2514          chunk->word_size() == small_chunk_size() ||
2515          chunk->word_size() == specialized_chunk_size(),
2516          "Chunk size is wrong");
2517   return;
2518 }
2519 
2520 #ifdef ASSERT
2521 void SpaceManager::verify_allocated_blocks_words() {
2522   // Verification is only guaranteed at a safepoint.
2523   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2524     "Verification can fail if the applications is running");
2525   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2526     err_msg("allocation total is not consistent " SIZE_FORMAT
2527             " vs " SIZE_FORMAT,
2528             allocated_blocks_words(), sum_used_in_chunks_in_use()));
2529 }
2530 
2531 #endif
2532 
2533 void SpaceManager::dump(outputStream* const out) const {
2534   size_t curr_total = 0;
2535   size_t waste = 0;
2536   uint i = 0;
2537   size_t used = 0;
2538   size_t capacity = 0;
2539 
2540   // Add up statistics for all chunks in this SpaceManager.
2541   for (ChunkIndex index = ZeroIndex;
2542        index < NumberOfInUseLists;
2543        index = next_chunk_index(index)) {
2544     for (Metachunk* curr = chunks_in_use(index);
2545          curr != NULL;
2546          curr = curr->next()) {
2547       out->print("%d) ", i++);
2548       curr->print_on(out);
2549       curr_total += curr->word_size();
2550       used += curr->used_word_size();
2551       capacity += curr->word_size();
2552       waste += curr->free_word_size() + curr->overhead();;
2553     }
2554   }
2555 
2556   if (TraceMetadataChunkAllocation && Verbose) {
2557     block_freelists()->print_on(out);
2558   }
2559 
2560   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2561   // Free space isn't wasted.
2562   waste -= free;
2563 
2564   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2565                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2566                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2567 }
2568 
2569 #ifndef PRODUCT
2570 void SpaceManager::mangle_freed_chunks() {
2571   for (ChunkIndex index = ZeroIndex;
2572        index < NumberOfInUseLists;
2573        index = next_chunk_index(index)) {
2574     for (Metachunk* curr = chunks_in_use(index);
2575          curr != NULL;
2576          curr = curr->next()) {
2577       curr->mangle();
2578     }
2579   }
2580 }
2581 #endif // PRODUCT
2582 
2583 // MetaspaceAux
2584 
2585 
2586 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2587 size_t MetaspaceAux::_used_words[] = {0, 0};
2588 
2589 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2590   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2591   return list == NULL ? 0 : list->free_bytes();
2592 }
2593 
2594 size_t MetaspaceAux::free_bytes() {
2595   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2596 }
2597 
2598 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2599   assert_lock_strong(SpaceManager::expand_lock());
2600   assert(words <= capacity_words(mdtype),
2601     err_msg("About to decrement below 0: words " SIZE_FORMAT
2602             " is greater than _capacity_words[%u] " SIZE_FORMAT,
2603             words, mdtype, capacity_words(mdtype)));
2604   _capacity_words[mdtype] -= words;
2605 }
2606 
2607 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2608   assert_lock_strong(SpaceManager::expand_lock());
2609   // Needs to be atomic
2610   _capacity_words[mdtype] += words;
2611 }
2612 
2613 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2614   assert(words <= used_words(mdtype),
2615     err_msg("About to decrement below 0: words " SIZE_FORMAT
2616             " is greater than _used_words[%u] " SIZE_FORMAT,
2617             words, mdtype, used_words(mdtype)));
2618   // For CMS deallocation of the Metaspaces occurs during the
2619   // sweep which is a concurrent phase.  Protection by the expand_lock()
2620   // is not enough since allocation is on a per Metaspace basis
2621   // and protected by the Metaspace lock.
2622   jlong minus_words = (jlong) - (jlong) words;
2623   Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2624 }
2625 
2626 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2627   // _used_words tracks allocations for
2628   // each piece of metadata.  Those allocations are
2629   // generally done concurrently by different application
2630   // threads so must be done atomically.
2631   Atomic::add_ptr(words, &_used_words[mdtype]);
2632 }
2633 
2634 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2635   size_t used = 0;
2636   ClassLoaderDataGraphMetaspaceIterator iter;
2637   while (iter.repeat()) {
2638     Metaspace* msp = iter.get_next();
2639     // Sum allocated_blocks_words for each metaspace
2640     if (msp != NULL) {
2641       used += msp->used_words_slow(mdtype);
2642     }
2643   }
2644   return used * BytesPerWord;
2645 }
2646 
2647 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2648   size_t free = 0;
2649   ClassLoaderDataGraphMetaspaceIterator iter;
2650   while (iter.repeat()) {
2651     Metaspace* msp = iter.get_next();
2652     if (msp != NULL) {
2653       free += msp->free_words_slow(mdtype);
2654     }
2655   }
2656   return free * BytesPerWord;
2657 }
2658 
2659 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2660   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2661     return 0;
2662   }
2663   // Don't count the space in the freelists.  That space will be
2664   // added to the capacity calculation as needed.
2665   size_t capacity = 0;
2666   ClassLoaderDataGraphMetaspaceIterator iter;
2667   while (iter.repeat()) {
2668     Metaspace* msp = iter.get_next();
2669     if (msp != NULL) {
2670       capacity += msp->capacity_words_slow(mdtype);
2671     }
2672   }
2673   return capacity * BytesPerWord;
2674 }
2675 
2676 size_t MetaspaceAux::capacity_bytes_slow() {
2677 #ifdef PRODUCT
2678   // Use capacity_bytes() in PRODUCT instead of this function.
2679   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2680 #endif
2681   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2682   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2683   assert(capacity_bytes() == class_capacity + non_class_capacity,
2684       err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2685         " class_capacity + non_class_capacity " SIZE_FORMAT
2686         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2687         capacity_bytes(), class_capacity + non_class_capacity,
2688         class_capacity, non_class_capacity));
2689 
2690   return class_capacity + non_class_capacity;
2691 }
2692 
2693 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2694   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2695   return list == NULL ? 0 : list->reserved_bytes();
2696 }
2697 
2698 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2699   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2700   return list == NULL ? 0 : list->committed_bytes();
2701 }
2702 
2703 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2704 
2705 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2706   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2707   if (chunk_manager == NULL) {
2708     return 0;
2709   }
2710   chunk_manager->slow_verify();
2711   return chunk_manager->free_chunks_total_words();
2712 }
2713 
2714 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2715   return free_chunks_total_words(mdtype) * BytesPerWord;
2716 }
2717 
2718 size_t MetaspaceAux::free_chunks_total_words() {
2719   return free_chunks_total_words(Metaspace::ClassType) +
2720          free_chunks_total_words(Metaspace::NonClassType);
2721 }
2722 
2723 size_t MetaspaceAux::free_chunks_total_bytes() {
2724   return free_chunks_total_words() * BytesPerWord;
2725 }
2726 
2727 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2728   return Metaspace::get_chunk_manager(mdtype) != NULL;
2729 }
2730 
2731 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2732   if (!has_chunk_free_list(mdtype)) {
2733     return MetaspaceChunkFreeListSummary();
2734   }
2735 
2736   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2737   return cm->chunk_free_list_summary();
2738 }
2739 
2740 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2741   gclog_or_tty->print(", [Metaspace:");
2742   if (PrintGCDetails && Verbose) {
2743     gclog_or_tty->print(" "  SIZE_FORMAT
2744                         "->" SIZE_FORMAT
2745                         "("  SIZE_FORMAT ")",
2746                         prev_metadata_used,
2747                         used_bytes(),
2748                         reserved_bytes());
2749   } else {
2750     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2751                         "->" SIZE_FORMAT "K"
2752                         "("  SIZE_FORMAT "K)",
2753                         prev_metadata_used/K,
2754                         used_bytes()/K,
2755                         reserved_bytes()/K);
2756   }
2757 
2758   gclog_or_tty->print("]");
2759 }
2760 
2761 // This is printed when PrintGCDetails
2762 void MetaspaceAux::print_on(outputStream* out) {
2763   Metaspace::MetadataType nct = Metaspace::NonClassType;
2764 
2765   out->print_cr(" Metaspace       "
2766                 "used "      SIZE_FORMAT "K, "
2767                 "capacity "  SIZE_FORMAT "K, "
2768                 "committed " SIZE_FORMAT "K, "
2769                 "reserved "  SIZE_FORMAT "K",
2770                 used_bytes()/K,
2771                 capacity_bytes()/K,
2772                 committed_bytes()/K,
2773                 reserved_bytes()/K);
2774 
2775   if (Metaspace::using_class_space()) {
2776     Metaspace::MetadataType ct = Metaspace::ClassType;
2777     out->print_cr("  class space    "
2778                   "used "      SIZE_FORMAT "K, "
2779                   "capacity "  SIZE_FORMAT "K, "
2780                   "committed " SIZE_FORMAT "K, "
2781                   "reserved "  SIZE_FORMAT "K",
2782                   used_bytes(ct)/K,
2783                   capacity_bytes(ct)/K,
2784                   committed_bytes(ct)/K,
2785                   reserved_bytes(ct)/K);
2786   }
2787 }
2788 
2789 // Print information for class space and data space separately.
2790 // This is almost the same as above.
2791 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2792   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2793   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2794   size_t used_bytes = used_bytes_slow(mdtype);
2795   size_t free_bytes = free_bytes_slow(mdtype);
2796   size_t used_and_free = used_bytes + free_bytes +
2797                            free_chunks_capacity_bytes;
2798   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2799              "K + unused in chunks " SIZE_FORMAT "K  + "
2800              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2801              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2802              used_bytes / K,
2803              free_bytes / K,
2804              free_chunks_capacity_bytes / K,
2805              used_and_free / K,
2806              capacity_bytes / K);
2807   // Accounting can only be correct if we got the values during a safepoint
2808   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2809 }
2810 
2811 // Print total fragmentation for class metaspaces
2812 void MetaspaceAux::print_class_waste(outputStream* out) {
2813   assert(Metaspace::using_class_space(), "class metaspace not used");
2814   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2815   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2816   ClassLoaderDataGraphMetaspaceIterator iter;
2817   while (iter.repeat()) {
2818     Metaspace* msp = iter.get_next();
2819     if (msp != NULL) {
2820       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2821       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2822       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2823       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2824       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2825       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2826       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2827     }
2828   }
2829   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2830                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2831                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2832                 "large count " SIZE_FORMAT,
2833                 cls_specialized_count, cls_specialized_waste,
2834                 cls_small_count, cls_small_waste,
2835                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2836 }
2837 
2838 // Print total fragmentation for data and class metaspaces separately
2839 void MetaspaceAux::print_waste(outputStream* out) {
2840   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2841   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2842 
2843   ClassLoaderDataGraphMetaspaceIterator iter;
2844   while (iter.repeat()) {
2845     Metaspace* msp = iter.get_next();
2846     if (msp != NULL) {
2847       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2848       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2849       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2850       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2851       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2852       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2853       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2854     }
2855   }
2856   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2857   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2858                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2859                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2860                         "large count " SIZE_FORMAT,
2861              specialized_count, specialized_waste, small_count,
2862              small_waste, medium_count, medium_waste, humongous_count);
2863   if (Metaspace::using_class_space()) {
2864     print_class_waste(out);
2865   }
2866 }
2867 
2868 // Dump global metaspace things from the end of ClassLoaderDataGraph
2869 void MetaspaceAux::dump(outputStream* out) {
2870   out->print_cr("All Metaspace:");
2871   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2872   out->print("class space: "); print_on(out, Metaspace::ClassType);
2873   print_waste(out);
2874 }
2875 
2876 void MetaspaceAux::verify_free_chunks() {
2877   Metaspace::chunk_manager_metadata()->verify();
2878   if (Metaspace::using_class_space()) {
2879     Metaspace::chunk_manager_class()->verify();
2880   }
2881 }
2882 
2883 void MetaspaceAux::verify_capacity() {
2884 #ifdef ASSERT
2885   size_t running_sum_capacity_bytes = capacity_bytes();
2886   // For purposes of the running sum of capacity, verify against capacity
2887   size_t capacity_in_use_bytes = capacity_bytes_slow();
2888   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2889     err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2890             " capacity_bytes_slow()" SIZE_FORMAT,
2891             running_sum_capacity_bytes, capacity_in_use_bytes));
2892   for (Metaspace::MetadataType i = Metaspace::ClassType;
2893        i < Metaspace:: MetadataTypeCount;
2894        i = (Metaspace::MetadataType)(i + 1)) {
2895     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2896     assert(capacity_bytes(i) == capacity_in_use_bytes,
2897       err_msg("capacity_bytes(%u) " SIZE_FORMAT
2898               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2899               i, capacity_bytes(i), i, capacity_in_use_bytes));
2900   }
2901 #endif
2902 }
2903 
2904 void MetaspaceAux::verify_used() {
2905 #ifdef ASSERT
2906   size_t running_sum_used_bytes = used_bytes();
2907   // For purposes of the running sum of used, verify against used
2908   size_t used_in_use_bytes = used_bytes_slow();
2909   assert(used_bytes() == used_in_use_bytes,
2910     err_msg("used_bytes() " SIZE_FORMAT
2911             " used_bytes_slow()" SIZE_FORMAT,
2912             used_bytes(), used_in_use_bytes));
2913   for (Metaspace::MetadataType i = Metaspace::ClassType;
2914        i < Metaspace:: MetadataTypeCount;
2915        i = (Metaspace::MetadataType)(i + 1)) {
2916     size_t used_in_use_bytes = used_bytes_slow(i);
2917     assert(used_bytes(i) == used_in_use_bytes,
2918       err_msg("used_bytes(%u) " SIZE_FORMAT
2919               " used_bytes_slow(%u)" SIZE_FORMAT,
2920               i, used_bytes(i), i, used_in_use_bytes));
2921   }
2922 #endif
2923 }
2924 
2925 void MetaspaceAux::verify_metrics() {
2926   verify_capacity();
2927   verify_used();
2928 }
2929 
2930 
2931 // Metaspace methods
2932 
2933 size_t Metaspace::_first_chunk_word_size = 0;
2934 size_t Metaspace::_first_class_chunk_word_size = 0;
2935 
2936 size_t Metaspace::_commit_alignment = 0;
2937 size_t Metaspace::_reserve_alignment = 0;
2938 
2939 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2940   initialize(lock, type);
2941 }
2942 
2943 Metaspace::~Metaspace() {
2944   delete _vsm;
2945   if (using_class_space()) {
2946     delete _class_vsm;
2947   }
2948 }
2949 
2950 VirtualSpaceList* Metaspace::_space_list = NULL;
2951 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2952 
2953 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2954 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2955 
2956 #define VIRTUALSPACEMULTIPLIER 2
2957 
2958 #ifdef _LP64
2959 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2960 
2961 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2962   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2963   // narrow_klass_base is the lower of the metaspace base and the cds base
2964   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2965   // between the lower base and higher address.
2966   address lower_base;
2967   address higher_address;
2968 #if INCLUDE_CDS
2969   if (UseSharedSpaces) {
2970     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2971                           (address)(metaspace_base + compressed_class_space_size()));
2972     lower_base = MIN2(metaspace_base, cds_base);
2973   } else
2974 #endif
2975   {
2976     higher_address = metaspace_base + compressed_class_space_size();
2977     lower_base = metaspace_base;
2978 
2979     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2980     // If compressed class space fits in lower 32G, we don't need a base.
2981     if (higher_address <= (address)klass_encoding_max) {
2982       lower_base = 0; // Effectively lower base is zero.
2983     }
2984   }
2985 
2986   Universe::set_narrow_klass_base(lower_base);
2987 
2988   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2989     Universe::set_narrow_klass_shift(0);
2990   } else {
2991     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2992     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2993   }
2994 }
2995 
2996 #if INCLUDE_CDS
2997 // Return TRUE if the specified metaspace_base and cds_base are close enough
2998 // to work with compressed klass pointers.
2999 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3000   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3001   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3002   address lower_base = MIN2((address)metaspace_base, cds_base);
3003   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3004                                 (address)(metaspace_base + compressed_class_space_size()));
3005   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3006 }
3007 #endif
3008 
3009 // Try to allocate the metaspace at the requested addr.
3010 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3011   assert(using_class_space(), "called improperly");
3012   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3013   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3014          "Metaspace size is too big");
3015   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3016   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3017   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3018 
3019   // Don't use large pages for the class space.
3020   bool large_pages = false;
3021 
3022 #ifndef AARCH64
3023   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3024                                              _reserve_alignment,
3025                                              large_pages,
3026                                              requested_addr);
3027 #else // AARCH64
3028   ReservedSpace metaspace_rs;
3029 
3030   // Our compressed klass pointers may fit nicely into the lower 32
3031   // bits.
3032   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3033     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3034                                              _reserve_alignment,
3035                                              large_pages,
3036                                              requested_addr);
3037   }
3038 
3039   if (! metaspace_rs.is_reserved()) {
3040     // Try to align metaspace so that we can decode a compressed klass
3041     // with a single MOVK instruction.  We can do this iff the
3042     // compressed class base is a multiple of 4G.
3043     for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
3044          a < (char*)(1024*G);
3045          a += 4*G) {
3046 
3047 #if INCLUDE_CDS
3048       if (UseSharedSpaces
3049           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3050         // We failed to find an aligned base that will reach.  Fall
3051         // back to using our requested addr.
3052         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3053                                      _reserve_alignment,
3054                                      large_pages,
3055                                      requested_addr);
3056         break;
3057       }
3058 #endif
3059 
3060       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3061                                    _reserve_alignment,
3062                                    large_pages,
3063                                    a);
3064       if (metaspace_rs.is_reserved())
3065         break;
3066     }
3067   }
3068 
3069 #endif // AARCH64
3070 
3071   if (!metaspace_rs.is_reserved()) {
3072 #if INCLUDE_CDS
3073     if (UseSharedSpaces) {
3074       size_t increment = align_size_up(1*G, _reserve_alignment);
3075 
3076       // Keep trying to allocate the metaspace, increasing the requested_addr
3077       // by 1GB each time, until we reach an address that will no longer allow
3078       // use of CDS with compressed klass pointers.
3079       char *addr = requested_addr;
3080       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3081              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3082         addr = addr + increment;
3083         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3084                                      _reserve_alignment, large_pages, addr);
3085       }
3086     }
3087 #endif
3088     // If no successful allocation then try to allocate the space anywhere.  If
3089     // that fails then OOM doom.  At this point we cannot try allocating the
3090     // metaspace as if UseCompressedClassPointers is off because too much
3091     // initialization has happened that depends on UseCompressedClassPointers.
3092     // So, UseCompressedClassPointers cannot be turned off at this point.
3093     if (!metaspace_rs.is_reserved()) {
3094       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3095                                    _reserve_alignment, large_pages);
3096       if (!metaspace_rs.is_reserved()) {
3097         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3098                                               compressed_class_space_size()));
3099       }
3100     }
3101   }
3102 
3103   // If we got here then the metaspace got allocated.
3104   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3105 
3106 #if INCLUDE_CDS
3107   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3108   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3109     FileMapInfo::stop_sharing_and_unmap(
3110         "Could not allocate metaspace at a compatible address");
3111   }
3112 #endif
3113   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3114                                   UseSharedSpaces ? (address)cds_base : 0);
3115 
3116   initialize_class_space(metaspace_rs);
3117 
3118   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3119     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3120                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3121     gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3122                            compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3123   }
3124 }
3125 
3126 // For UseCompressedClassPointers the class space is reserved above the top of
3127 // the Java heap.  The argument passed in is at the base of the compressed space.
3128 void Metaspace::initialize_class_space(ReservedSpace rs) {
3129   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3130   assert(rs.size() >= CompressedClassSpaceSize,
3131          err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize));
3132   assert(using_class_space(), "Must be using class space");
3133   _class_space_list = new VirtualSpaceList(rs);
3134   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3135 
3136   if (!_class_space_list->initialization_succeeded()) {
3137     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3138   }
3139 }
3140 
3141 #endif
3142 
3143 void Metaspace::ergo_initialize() {
3144   if (DumpSharedSpaces) {
3145     // Using large pages when dumping the shared archive is currently not implemented.
3146     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3147   }
3148 
3149   size_t page_size = os::vm_page_size();
3150   if (UseLargePages && UseLargePagesInMetaspace) {
3151     page_size = os::large_page_size();
3152   }
3153 
3154   _commit_alignment  = page_size;
3155   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3156 
3157   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3158   // override if MaxMetaspaceSize was set on the command line or not.
3159   // This information is needed later to conform to the specification of the
3160   // java.lang.management.MemoryUsage API.
3161   //
3162   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3163   // globals.hpp to the aligned value, but this is not possible, since the
3164   // alignment depends on other flags being parsed.
3165   MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3166 
3167   if (MetaspaceSize > MaxMetaspaceSize) {
3168     MetaspaceSize = MaxMetaspaceSize;
3169   }
3170 
3171   MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3172 
3173   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3174 
3175   if (MetaspaceSize < 256*K) {
3176     vm_exit_during_initialization("Too small initial Metaspace size");
3177   }
3178 
3179   MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3180   MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3181 
3182   CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3183   set_compressed_class_space_size(CompressedClassSpaceSize);
3184 }
3185 
3186 void Metaspace::global_initialize() {
3187   MetaspaceGC::initialize();
3188 
3189   // Initialize the alignment for shared spaces.
3190   int max_alignment = os::vm_allocation_granularity();
3191   size_t cds_total = 0;
3192 
3193   MetaspaceShared::set_max_alignment(max_alignment);
3194 
3195   if (DumpSharedSpaces) {
3196 #if INCLUDE_CDS
3197     MetaspaceShared::estimate_regions_size();
3198 
3199     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3200     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3201     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3202     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3203 
3204     // make sure SharedReadOnlySize and SharedReadWriteSize are not less than
3205     // the minimum values.
3206     if (SharedReadOnlySize < MetaspaceShared::min_ro_size){
3207       report_out_of_shared_space(SharedReadOnly);
3208     }
3209 
3210     if (SharedReadWriteSize < MetaspaceShared::min_rw_size){
3211       report_out_of_shared_space(SharedReadWrite);
3212     }
3213 
3214     // the min_misc_data_size and min_misc_code_size estimates are based on
3215     // MetaspaceShared::generate_vtable_methods().
3216     // The minimum size only accounts for the vtable methods. Any size less than the
3217     // minimum required size would cause vm crash when allocating the vtable methods.
3218     uint min_misc_data_size = align_size_up(
3219       MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment);
3220 
3221     if (SharedMiscDataSize < min_misc_data_size) {
3222       report_out_of_shared_space(SharedMiscData);
3223     }
3224 
3225     uintx min_misc_code_size = align_size_up(
3226       (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3227         (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3228           max_alignment);
3229 
3230     if (SharedMiscCodeSize < min_misc_code_size) {
3231       report_out_of_shared_space(SharedMiscCode);
3232     }
3233 
3234     // Initialize with the sum of the shared space sizes.  The read-only
3235     // and read write metaspace chunks will be allocated out of this and the
3236     // remainder is the misc code and data chunks.
3237     cds_total = FileMapInfo::shared_spaces_size();
3238     cds_total = align_size_up(cds_total, _reserve_alignment);
3239     _space_list = new VirtualSpaceList(cds_total/wordSize);
3240     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3241 
3242     if (!_space_list->initialization_succeeded()) {
3243       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3244     }
3245 
3246 #ifdef _LP64
3247     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3248       vm_exit_during_initialization("Unable to dump shared archive.",
3249           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3250                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3251                   "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3252                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3253     }
3254 
3255     // Set the compressed klass pointer base so that decoding of these pointers works
3256     // properly when creating the shared archive.
3257     assert(UseCompressedOops && UseCompressedClassPointers,
3258       "UseCompressedOops and UseCompressedClassPointers must be set");
3259     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3260     if (TraceMetavirtualspaceAllocation && Verbose) {
3261       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3262                              _space_list->current_virtual_space()->bottom());
3263     }
3264 
3265     Universe::set_narrow_klass_shift(0);
3266 #endif // _LP64
3267 #endif // INCLUDE_CDS
3268   } else {
3269 #if INCLUDE_CDS
3270     // If using shared space, open the file that contains the shared space
3271     // and map in the memory before initializing the rest of metaspace (so
3272     // the addresses don't conflict)
3273     address cds_address = NULL;
3274     if (UseSharedSpaces) {
3275       FileMapInfo* mapinfo = new FileMapInfo();
3276 
3277       // Open the shared archive file, read and validate the header. If
3278       // initialization fails, shared spaces [UseSharedSpaces] are
3279       // disabled and the file is closed.
3280       // Map in spaces now also
3281       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3282         cds_total = FileMapInfo::shared_spaces_size();
3283         cds_address = (address)mapinfo->region_base(0);
3284       } else {
3285         assert(!mapinfo->is_open() && !UseSharedSpaces,
3286                "archive file not closed or shared spaces not disabled.");
3287       }
3288     }
3289 #endif // INCLUDE_CDS
3290 #ifdef _LP64
3291     // If UseCompressedClassPointers is set then allocate the metaspace area
3292     // above the heap and above the CDS area (if it exists).
3293     if (using_class_space()) {
3294       if (UseSharedSpaces) {
3295 #if INCLUDE_CDS
3296         char* cds_end = (char*)(cds_address + cds_total);
3297         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3298         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3299 #endif
3300       } else {
3301         char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3302         allocate_metaspace_compressed_klass_ptrs(base, 0);
3303       }
3304     }
3305 #endif // _LP64
3306 
3307     // Initialize these before initializing the VirtualSpaceList
3308     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3309     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3310     // Make the first class chunk bigger than a medium chunk so it's not put
3311     // on the medium chunk list.   The next chunk will be small and progress
3312     // from there.  This size calculated by -version.
3313     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3314                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3315     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3316     // Arbitrarily set the initial virtual space to a multiple
3317     // of the boot class loader size.
3318     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3319     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3320 
3321     // Initialize the list of virtual spaces.
3322     _space_list = new VirtualSpaceList(word_size);
3323     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3324 
3325     if (!_space_list->initialization_succeeded()) {
3326       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3327     }
3328   }
3329 
3330   _tracer = new MetaspaceTracer();
3331 }
3332 
3333 void Metaspace::post_initialize() {
3334   MetaspaceGC::post_initialize();
3335 }
3336 
3337 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3338                                                size_t chunk_word_size,
3339                                                size_t chunk_bunch) {
3340   // Get a chunk from the chunk freelist
3341   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3342   if (chunk != NULL) {
3343     return chunk;
3344   }
3345 
3346   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3347 }
3348 
3349 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3350 
3351   assert(space_list() != NULL,
3352     "Metadata VirtualSpaceList has not been initialized");
3353   assert(chunk_manager_metadata() != NULL,
3354     "Metadata ChunkManager has not been initialized");
3355 
3356   _vsm = new SpaceManager(NonClassType, lock);
3357   if (_vsm == NULL) {
3358     return;
3359   }
3360   size_t word_size;
3361   size_t class_word_size;
3362   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3363 
3364   if (using_class_space()) {
3365   assert(class_space_list() != NULL,
3366     "Class VirtualSpaceList has not been initialized");
3367   assert(chunk_manager_class() != NULL,
3368     "Class ChunkManager has not been initialized");
3369 
3370     // Allocate SpaceManager for classes.
3371     _class_vsm = new SpaceManager(ClassType, lock);
3372     if (_class_vsm == NULL) {
3373       return;
3374     }
3375   }
3376 
3377   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3378 
3379   // Allocate chunk for metadata objects
3380   Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3381                                                   word_size,
3382                                                   vsm()->medium_chunk_bunch());
3383   // For dumping shared archive, report error if allocation has failed.
3384   if (DumpSharedSpaces && new_chunk == NULL) {
3385     report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord);
3386   }
3387   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3388   if (new_chunk != NULL) {
3389     // Add to this manager's list of chunks in use and current_chunk().
3390     vsm()->add_chunk(new_chunk, true);
3391   }
3392 
3393   // Allocate chunk for class metadata objects
3394   if (using_class_space()) {
3395     Metachunk* class_chunk = get_initialization_chunk(ClassType,
3396                                                       class_word_size,
3397                                                       class_vsm()->medium_chunk_bunch());
3398     if (class_chunk != NULL) {
3399       class_vsm()->add_chunk(class_chunk, true);
3400     } else {
3401       // For dumping shared archive, report error if allocation has failed.
3402       if (DumpSharedSpaces) {
3403         report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord);
3404       }
3405     }
3406   }
3407 
3408   _alloc_record_head = NULL;
3409   _alloc_record_tail = NULL;
3410 }
3411 
3412 size_t Metaspace::align_word_size_up(size_t word_size) {
3413   size_t byte_size = word_size * wordSize;
3414   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3415 }
3416 
3417 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3418   // DumpSharedSpaces doesn't use class metadata area (yet)
3419   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3420   if (is_class_space_allocation(mdtype)) {
3421     return  class_vsm()->allocate(word_size);
3422   } else {
3423     return  vsm()->allocate(word_size);
3424   }
3425 }
3426 
3427 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3428   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3429   assert(delta_bytes > 0, "Must be");
3430 
3431   size_t before = 0;
3432   size_t after = 0;
3433   MetaWord* res;
3434   bool incremented;
3435 
3436   // Each thread increments the HWM at most once. Even if the thread fails to increment
3437   // the HWM, an allocation is still attempted. This is because another thread must then
3438   // have incremented the HWM and therefore the allocation might still succeed.
3439   do {
3440     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3441     res = allocate(word_size, mdtype);
3442   } while (!incremented && res == NULL);
3443 
3444   if (incremented) {
3445     tracer()->report_gc_threshold(before, after,
3446                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3447     if (PrintGCDetails && Verbose) {
3448       gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3449           " to " SIZE_FORMAT, before, after);
3450     }
3451   }
3452 
3453   return res;
3454 }
3455 
3456 // Space allocated in the Metaspace.  This may
3457 // be across several metadata virtual spaces.
3458 char* Metaspace::bottom() const {
3459   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3460   return (char*)vsm()->current_chunk()->bottom();
3461 }
3462 
3463 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3464   if (mdtype == ClassType) {
3465     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3466   } else {
3467     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3468   }
3469 }
3470 
3471 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3472   if (mdtype == ClassType) {
3473     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3474   } else {
3475     return vsm()->sum_free_in_chunks_in_use();
3476   }
3477 }
3478 
3479 // Space capacity in the Metaspace.  It includes
3480 // space in the list of chunks from which allocations
3481 // have been made. Don't include space in the global freelist and
3482 // in the space available in the dictionary which
3483 // is already counted in some chunk.
3484 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3485   if (mdtype == ClassType) {
3486     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3487   } else {
3488     return vsm()->sum_capacity_in_chunks_in_use();
3489   }
3490 }
3491 
3492 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3493   return used_words_slow(mdtype) * BytesPerWord;
3494 }
3495 
3496 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3497   return capacity_words_slow(mdtype) * BytesPerWord;
3498 }
3499 
3500 size_t Metaspace::allocated_blocks_bytes() const {
3501   return vsm()->allocated_blocks_bytes() +
3502       (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3503 }
3504 
3505 size_t Metaspace::allocated_chunks_bytes() const {
3506   return vsm()->allocated_chunks_bytes() +
3507       (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3508 }
3509 
3510 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3511   assert(!SafepointSynchronize::is_at_safepoint()
3512          || Thread::current()->is_VM_thread(), "should be the VM thread");
3513 
3514   if (DumpSharedSpaces && PrintSharedSpaces) {
3515     record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3516   }
3517 
3518   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3519 
3520   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3521     // Dark matter.  Too small for dictionary.
3522 #ifdef ASSERT
3523     Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3524 #endif
3525     return;
3526   }
3527   if (is_class && using_class_space()) {
3528     class_vsm()->deallocate(ptr, word_size);
3529   } else {
3530     vsm()->deallocate(ptr, word_size);
3531   }
3532 }
3533 
3534 
3535 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3536                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3537   if (HAS_PENDING_EXCEPTION) {
3538     assert(false, "Should not allocate with exception pending");
3539     return NULL;  // caller does a CHECK_NULL too
3540   }
3541 
3542   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3543         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3544 
3545   // Allocate in metaspaces without taking out a lock, because it deadlocks
3546   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3547   // to revisit this for application class data sharing.
3548   if (DumpSharedSpaces) {
3549     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3550     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3551     MetaWord* result = space->allocate(word_size, NonClassType);
3552     if (result == NULL) {
3553       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3554     }
3555     if (PrintSharedSpaces) {
3556       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3557     }
3558 
3559     // Zero initialize.
3560     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3561 
3562     return result;
3563   }
3564 
3565   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3566 
3567   // Try to allocate metadata.
3568   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3569 
3570   if (result == NULL) {
3571     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3572 
3573     // Allocation failed.
3574     if (is_init_completed()) {
3575       // Only start a GC if the bootstrapping has completed.
3576 
3577       // Try to clean out some memory and retry.
3578       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3579           loader_data, word_size, mdtype);
3580     }
3581   }
3582 
3583   if (result == NULL) {
3584     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3585   }
3586 
3587   // Zero initialize.
3588   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3589 
3590   return result;
3591 }
3592 
3593 size_t Metaspace::class_chunk_size(size_t word_size) {
3594   assert(using_class_space(), "Has to use class space");
3595   return class_vsm()->calc_chunk_size(word_size);
3596 }
3597 
3598 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3599   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3600 
3601   // If result is still null, we are out of memory.
3602   if (Verbose && TraceMetadataChunkAllocation) {
3603     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3604         SIZE_FORMAT, word_size);
3605     if (loader_data->metaspace_or_null() != NULL) {
3606       loader_data->dump(gclog_or_tty);
3607     }
3608     MetaspaceAux::dump(gclog_or_tty);
3609   }
3610 
3611   bool out_of_compressed_class_space = false;
3612   if (is_class_space_allocation(mdtype)) {
3613     Metaspace* metaspace = loader_data->metaspace_non_null();
3614     out_of_compressed_class_space =
3615       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3616       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3617       CompressedClassSpaceSize;
3618   }
3619 
3620   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3621   const char* space_string = out_of_compressed_class_space ?
3622     "Compressed class space" : "Metaspace";
3623 
3624   report_java_out_of_memory(space_string);
3625 
3626   if (JvmtiExport::should_post_resource_exhausted()) {
3627     JvmtiExport::post_resource_exhausted(
3628         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3629         space_string);
3630   }
3631 
3632   if (!is_init_completed()) {
3633     vm_exit_during_initialization("OutOfMemoryError", space_string);
3634   }
3635 
3636   if (out_of_compressed_class_space) {
3637     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3638   } else {
3639     THROW_OOP(Universe::out_of_memory_error_metaspace());
3640   }
3641 }
3642 
3643 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3644   switch (mdtype) {
3645     case Metaspace::ClassType: return "Class";
3646     case Metaspace::NonClassType: return "Metadata";
3647     default:
3648       assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3649       return NULL;
3650   }
3651 }
3652 
3653 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3654   assert(DumpSharedSpaces, "sanity");
3655 
3656   int byte_size = (int)word_size * HeapWordSize;
3657   AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3658 
3659   if (_alloc_record_head == NULL) {
3660     _alloc_record_head = _alloc_record_tail = rec;
3661   } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3662     _alloc_record_tail->_next = rec;
3663     _alloc_record_tail = rec;
3664   } else {
3665     // slow linear search, but this doesn't happen that often, and only when dumping
3666     for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3667       if (old->_ptr == ptr) {
3668         assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3669         int remain_bytes = old->_byte_size - byte_size;
3670         assert(remain_bytes >= 0, "sanity");
3671         old->_type = type;
3672 
3673         if (remain_bytes == 0) {
3674           delete(rec);
3675         } else {
3676           address remain_ptr = address(ptr) + byte_size;
3677           rec->_ptr = remain_ptr;
3678           rec->_byte_size = remain_bytes;
3679           rec->_type = MetaspaceObj::DeallocatedType;
3680           rec->_next = old->_next;
3681           old->_byte_size = byte_size;
3682           old->_next = rec;
3683         }
3684         return;
3685       }
3686     }
3687     assert(0, "reallocating a freed pointer that was not recorded");
3688   }
3689 }
3690 
3691 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3692   assert(DumpSharedSpaces, "sanity");
3693 
3694   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3695     if (rec->_ptr == ptr) {
3696       assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3697       rec->_type = MetaspaceObj::DeallocatedType;
3698       return;
3699     }
3700   }
3701 
3702   assert(0, "deallocating a pointer that was not recorded");
3703 }
3704 
3705 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3706   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3707 
3708   address last_addr = (address)bottom();
3709 
3710   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3711     address ptr = rec->_ptr;
3712     if (last_addr < ptr) {
3713       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3714     }
3715     closure->doit(ptr, rec->_type, rec->_byte_size);
3716     last_addr = ptr + rec->_byte_size;
3717   }
3718 
3719   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3720   if (last_addr < top) {
3721     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3722   }
3723 }
3724 
3725 void Metaspace::purge(MetadataType mdtype) {
3726   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3727 }
3728 
3729 void Metaspace::purge() {
3730   MutexLockerEx cl(SpaceManager::expand_lock(),
3731                    Mutex::_no_safepoint_check_flag);
3732   purge(NonClassType);
3733   if (using_class_space()) {
3734     purge(ClassType);
3735   }
3736 }
3737 
3738 void Metaspace::print_on(outputStream* out) const {
3739   // Print both class virtual space counts and metaspace.
3740   if (Verbose) {
3741     vsm()->print_on(out);
3742     if (using_class_space()) {
3743       class_vsm()->print_on(out);
3744     }
3745   }
3746 }
3747 
3748 bool Metaspace::contains(const void* ptr) {
3749   if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3750     return true;
3751   }
3752 
3753   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3754      return true;
3755   }
3756 
3757   return get_space_list(NonClassType)->contains(ptr);
3758 }
3759 
3760 void Metaspace::verify() {
3761   vsm()->verify();
3762   if (using_class_space()) {
3763     class_vsm()->verify();
3764   }
3765 }
3766 
3767 void Metaspace::dump(outputStream* const out) const {
3768   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3769   vsm()->dump(out);
3770   if (using_class_space()) {
3771     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3772     class_vsm()->dump(out);
3773   }
3774 }
3775 
3776 /////////////// Unit tests ///////////////
3777 
3778 #ifndef PRODUCT
3779 
3780 class TestMetaspaceAuxTest : AllStatic {
3781  public:
3782   static void test_reserved() {
3783     size_t reserved = MetaspaceAux::reserved_bytes();
3784 
3785     assert(reserved > 0, "assert");
3786 
3787     size_t committed  = MetaspaceAux::committed_bytes();
3788     assert(committed <= reserved, "assert");
3789 
3790     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3791     assert(reserved_metadata > 0, "assert");
3792     assert(reserved_metadata <= reserved, "assert");
3793 
3794     if (UseCompressedClassPointers) {
3795       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3796       assert(reserved_class > 0, "assert");
3797       assert(reserved_class < reserved, "assert");
3798     }
3799   }
3800 
3801   static void test_committed() {
3802     size_t committed = MetaspaceAux::committed_bytes();
3803 
3804     assert(committed > 0, "assert");
3805 
3806     size_t reserved  = MetaspaceAux::reserved_bytes();
3807     assert(committed <= reserved, "assert");
3808 
3809     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3810     assert(committed_metadata > 0, "assert");
3811     assert(committed_metadata <= committed, "assert");
3812 
3813     if (UseCompressedClassPointers) {
3814       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3815       assert(committed_class > 0, "assert");
3816       assert(committed_class < committed, "assert");
3817     }
3818   }
3819 
3820   static void test_virtual_space_list_large_chunk() {
3821     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3822     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3823     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3824     // vm_allocation_granularity aligned on Windows.
3825     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3826     large_size += (os::vm_page_size()/BytesPerWord);
3827     vs_list->get_new_chunk(large_size, large_size, 0);
3828   }
3829 
3830   static void test() {
3831     test_reserved();
3832     test_committed();
3833     test_virtual_space_list_large_chunk();
3834   }
3835 };
3836 
3837 void TestMetaspaceAux_test() {
3838   TestMetaspaceAuxTest::test();
3839 }
3840 
3841 class TestVirtualSpaceNodeTest {
3842   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3843                                           size_t& num_small_chunks,
3844                                           size_t& num_specialized_chunks) {
3845     num_medium_chunks = words_left / MediumChunk;
3846     words_left = words_left % MediumChunk;
3847 
3848     num_small_chunks = words_left / SmallChunk;
3849     words_left = words_left % SmallChunk;
3850     // how many specialized chunks can we get?
3851     num_specialized_chunks = words_left / SpecializedChunk;
3852     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3853   }
3854 
3855  public:
3856   static void test() {
3857     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3858     const size_t vsn_test_size_words = MediumChunk  * 4;
3859     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3860 
3861     // The chunk sizes must be multiples of eachother, or this will fail
3862     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3863     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3864 
3865     { // No committed memory in VSN
3866       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3867       VirtualSpaceNode vsn(vsn_test_size_bytes);
3868       vsn.initialize();
3869       vsn.retire(&cm);
3870       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3871     }
3872 
3873     { // All of VSN is committed, half is used by chunks
3874       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3875       VirtualSpaceNode vsn(vsn_test_size_bytes);
3876       vsn.initialize();
3877       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3878       vsn.get_chunk_vs(MediumChunk);
3879       vsn.get_chunk_vs(MediumChunk);
3880       vsn.retire(&cm);
3881       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3882       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3883     }
3884 
3885     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3886     // This doesn't work for systems with vm_page_size >= 16K.
3887     if (page_chunks < MediumChunk) {
3888       // 4 pages of VSN is committed, some is used by chunks
3889       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3890       VirtualSpaceNode vsn(vsn_test_size_bytes);
3891 
3892       vsn.initialize();
3893       vsn.expand_by(page_chunks, page_chunks);
3894       vsn.get_chunk_vs(SmallChunk);
3895       vsn.get_chunk_vs(SpecializedChunk);
3896       vsn.retire(&cm);
3897 
3898       // committed - used = words left to retire
3899       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3900 
3901       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3902       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3903 
3904       assert(num_medium_chunks == 0, "should not get any medium chunks");
3905       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3906       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3907     }
3908 
3909     { // Half of VSN is committed, a humongous chunk is used
3910       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3911       VirtualSpaceNode vsn(vsn_test_size_bytes);
3912       vsn.initialize();
3913       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3914       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3915       vsn.retire(&cm);
3916 
3917       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3918       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3919       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3920 
3921       assert(num_medium_chunks == 0, "should not get any medium chunks");
3922       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3923       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3924     }
3925 
3926   }
3927 
3928 #define assert_is_available_positive(word_size) \
3929   assert(vsn.is_available(word_size), \
3930     err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3931             "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3932             (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3933 
3934 #define assert_is_available_negative(word_size) \
3935   assert(!vsn.is_available(word_size), \
3936     err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3937             "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3938             (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3939 
3940   static void test_is_available_positive() {
3941     // Reserve some memory.
3942     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3943     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3944 
3945     // Commit some memory.
3946     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3947     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3948     assert(expanded, "Failed to commit");
3949 
3950     // Check that is_available accepts the committed size.
3951     assert_is_available_positive(commit_word_size);
3952 
3953     // Check that is_available accepts half the committed size.
3954     size_t expand_word_size = commit_word_size / 2;
3955     assert_is_available_positive(expand_word_size);
3956   }
3957 
3958   static void test_is_available_negative() {
3959     // Reserve some memory.
3960     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3961     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3962 
3963     // Commit some memory.
3964     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3965     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3966     assert(expanded, "Failed to commit");
3967 
3968     // Check that is_available doesn't accept a too large size.
3969     size_t two_times_commit_word_size = commit_word_size * 2;
3970     assert_is_available_negative(two_times_commit_word_size);
3971   }
3972 
3973   static void test_is_available_overflow() {
3974     // Reserve some memory.
3975     VirtualSpaceNode vsn(os::vm_allocation_granularity());
3976     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3977 
3978     // Commit some memory.
3979     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3980     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3981     assert(expanded, "Failed to commit");
3982 
3983     // Calculate a size that will overflow the virtual space size.
3984     void* virtual_space_max = (void*)(uintptr_t)-1;
3985     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3986     size_t overflow_size = bottom_to_max + BytesPerWord;
3987     size_t overflow_word_size = overflow_size / BytesPerWord;
3988 
3989     // Check that is_available can handle the overflow.
3990     assert_is_available_negative(overflow_word_size);
3991   }
3992 
3993   static void test_is_available() {
3994     TestVirtualSpaceNodeTest::test_is_available_positive();
3995     TestVirtualSpaceNodeTest::test_is_available_negative();
3996     TestVirtualSpaceNodeTest::test_is_available_overflow();
3997   }
3998 };
3999 
4000 void TestVirtualSpaceNode_test() {
4001   TestVirtualSpaceNodeTest::test();
4002   TestVirtualSpaceNodeTest::test_is_available();
4003 }
4004 #endif