1 /*
   2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"
  38 #include "runtime/atomic.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutex.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "services/memTracker.hpp"
  45 #include "services/memoryService.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  51 
  52 // Set this constant to enable slow integrity checking of the free chunk lists
  53 const bool metaspace_slow_verify = false;
  54 
  55 size_t const allocation_from_dictionary_limit = 4 * K;
  56 
  57 MetaWord* last_allocated = 0;
  58 
  59 size_t Metaspace::_compressed_class_space_size;
  60 
  61 // Used in declarations in SpaceManager and ChunkManager
  62 enum ChunkIndex {
  63   ZeroIndex = 0,
  64   SpecializedIndex = ZeroIndex,
  65   SmallIndex = SpecializedIndex + 1,
  66   MediumIndex = SmallIndex + 1,
  67   HumongousIndex = MediumIndex + 1,
  68   NumberOfFreeLists = 3,
  69   NumberOfInUseLists = 4
  70 };
  71 
  72 enum ChunkSizes {    // in words.
  73   ClassSpecializedChunk = 128,
  74   SpecializedChunk = 128,
  75   ClassSmallChunk = 256,
  76   SmallChunk = 512,
  77   ClassMediumChunk = 4 * K,
  78   MediumChunk = 8 * K
  79 };
  80 
  81 static ChunkIndex next_chunk_index(ChunkIndex i) {
  82   assert(i < NumberOfInUseLists, "Out of bound");
  83   return (ChunkIndex) (i+1);
  84 }
  85 
  86 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  87 uint MetaspaceGC::_shrink_factor = 0;
  88 bool MetaspaceGC::_should_concurrent_collect = false;
  89 
  90 typedef class FreeList<Metachunk> ChunkList;
  91 
  92 // Manages the global free lists of chunks.
  93 class ChunkManager : public CHeapObj<mtInternal> {
  94   friend class TestVirtualSpaceNodeTest;
  95 
  96   // Free list of chunks of different sizes.
  97   //   SpecializedChunk
  98   //   SmallChunk
  99   //   MediumChunk
 100   //   HumongousChunk
 101   ChunkList _free_chunks[NumberOfFreeLists];
 102 
 103   //   HumongousChunk
 104   ChunkTreeDictionary _humongous_dictionary;
 105 
 106   // ChunkManager in all lists of this type
 107   size_t _free_chunks_total;
 108   size_t _free_chunks_count;
 109 
 110   void dec_free_chunks_total(size_t v) {
 111     assert(_free_chunks_count > 0 &&
 112              _free_chunks_total > 0,
 113              "About to go negative");
 114     Atomic::add_ptr(-1, &_free_chunks_count);
 115     jlong minus_v = (jlong) - (jlong) v;
 116     Atomic::add_ptr(minus_v, &_free_chunks_total);
 117   }
 118 
 119   // Debug support
 120 
 121   size_t sum_free_chunks();
 122   size_t sum_free_chunks_count();
 123 
 124   void locked_verify_free_chunks_total();
 125   void slow_locked_verify_free_chunks_total() {
 126     if (metaspace_slow_verify) {
 127       locked_verify_free_chunks_total();
 128     }
 129   }
 130   void locked_verify_free_chunks_count();
 131   void slow_locked_verify_free_chunks_count() {
 132     if (metaspace_slow_verify) {
 133       locked_verify_free_chunks_count();
 134     }
 135   }
 136   void verify_free_chunks_count();
 137 
 138  public:
 139 
 140   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 141       : _free_chunks_total(0), _free_chunks_count(0) {
 142     _free_chunks[SpecializedIndex].set_size(specialized_size);
 143     _free_chunks[SmallIndex].set_size(small_size);
 144     _free_chunks[MediumIndex].set_size(medium_size);
 145   }
 146 
 147   // add or delete (return) a chunk to the global freelist.
 148   Metachunk* chunk_freelist_allocate(size_t word_size);
 149 
 150   // Map a size to a list index assuming that there are lists
 151   // for special, small, medium, and humongous chunks.
 152   static ChunkIndex list_index(size_t size);
 153 
 154   // Remove the chunk from its freelist.  It is
 155   // expected to be on one of the _free_chunks[] lists.
 156   void remove_chunk(Metachunk* chunk);
 157 
 158   // Add the simple linked list of chunks to the freelist of chunks
 159   // of type index.
 160   void return_chunks(ChunkIndex index, Metachunk* chunks);
 161 
 162   // Total of the space in the free chunks list
 163   size_t free_chunks_total_words();
 164   size_t free_chunks_total_bytes();
 165 
 166   // Number of chunks in the free chunks list
 167   size_t free_chunks_count();
 168 
 169   void inc_free_chunks_total(size_t v, size_t count = 1) {
 170     Atomic::add_ptr(count, &_free_chunks_count);
 171     Atomic::add_ptr(v, &_free_chunks_total);
 172   }
 173   ChunkTreeDictionary* humongous_dictionary() {
 174     return &_humongous_dictionary;
 175   }
 176 
 177   ChunkList* free_chunks(ChunkIndex index);
 178 
 179   // Returns the list for the given chunk word size.
 180   ChunkList* find_free_chunks_list(size_t word_size);
 181 
 182   // Remove from a list by size.  Selects list based on size of chunk.
 183   Metachunk* free_chunks_get(size_t chunk_word_size);
 184 
 185   // Debug support
 186   void verify();
 187   void slow_verify() {
 188     if (metaspace_slow_verify) {
 189       verify();
 190     }
 191   }
 192   void locked_verify();
 193   void slow_locked_verify() {
 194     if (metaspace_slow_verify) {
 195       locked_verify();
 196     }
 197   }
 198   void verify_free_chunks_total();
 199 
 200   void locked_print_free_chunks(outputStream* st);
 201   void locked_print_sum_free_chunks(outputStream* st);
 202 
 203   void print_on(outputStream* st) const;
 204 };
 205 
 206 // Used to manage the free list of Metablocks (a block corresponds
 207 // to the allocation of a quantum of metadata).
 208 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 209   BlockTreeDictionary* _dictionary;
 210 
 211   // Only allocate and split from freelist if the size of the allocation
 212   // is at least 1/4th the size of the available block.
 213   const static int WasteMultiplier = 4;
 214 
 215   // Accessors
 216   BlockTreeDictionary* dictionary() const { return _dictionary; }
 217 
 218  public:
 219   BlockFreelist();
 220   ~BlockFreelist();
 221 
 222   // Get and return a block to the free list
 223   MetaWord* get_block(size_t word_size);
 224   void return_block(MetaWord* p, size_t word_size);
 225 
 226   size_t total_size() {
 227   if (dictionary() == NULL) {
 228     return 0;
 229   } else {
 230     return dictionary()->total_size();
 231   }
 232 }
 233 
 234   void print_on(outputStream* st) const;
 235 };
 236 
 237 // A VirtualSpaceList node.
 238 class VirtualSpaceNode : public CHeapObj<mtClass> {
 239   friend class VirtualSpaceList;
 240 
 241   // Link to next VirtualSpaceNode
 242   VirtualSpaceNode* _next;
 243 
 244   // total in the VirtualSpace
 245   MemRegion _reserved;
 246   ReservedSpace _rs;
 247   VirtualSpace _virtual_space;
 248   MetaWord* _top;
 249   // count of chunks contained in this VirtualSpace
 250   uintx _container_count;
 251 
 252   // Convenience functions to access the _virtual_space
 253   char* low()  const { return virtual_space()->low(); }
 254   char* high() const { return virtual_space()->high(); }
 255 
 256   // The first Metachunk will be allocated at the bottom of the
 257   // VirtualSpace
 258   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 259 
 260   // Committed but unused space in the virtual space
 261   size_t free_words_in_vs() const;
 262  public:
 263 
 264   VirtualSpaceNode(size_t byte_size);
 265   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 266   ~VirtualSpaceNode();
 267 
 268   // Convenience functions for logical bottom and end
 269   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 270   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 271 
 272   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 273   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 274 
 275   bool is_pre_committed() const { return _virtual_space.special(); }
 276 
 277   // address of next available space in _virtual_space;
 278   // Accessors
 279   VirtualSpaceNode* next() { return _next; }
 280   void set_next(VirtualSpaceNode* v) { _next = v; }
 281 
 282   void set_reserved(MemRegion const v) { _reserved = v; }
 283   void set_top(MetaWord* v) { _top = v; }
 284 
 285   // Accessors
 286   MemRegion* reserved() { return &_reserved; }
 287   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 288 
 289   // Returns true if "word_size" is available in the VirtualSpace
 290   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 291 
 292   MetaWord* top() const { return _top; }
 293   void inc_top(size_t word_size) { _top += word_size; }
 294 
 295   uintx container_count() { return _container_count; }
 296   void inc_container_count();
 297   void dec_container_count();
 298 #ifdef ASSERT
 299   uint container_count_slow();
 300   void verify_container_count();
 301 #endif
 302 
 303   // used and capacity in this single entry in the list
 304   size_t used_words_in_vs() const;
 305   size_t capacity_words_in_vs() const;
 306 
 307   bool initialize();
 308 
 309   // get space from the virtual space
 310   Metachunk* take_from_committed(size_t chunk_word_size);
 311 
 312   // Allocate a chunk from the virtual space and return it.
 313   Metachunk* get_chunk_vs(size_t chunk_word_size);
 314 
 315   // Expands/shrinks the committed space in a virtual space.  Delegates
 316   // to Virtualspace
 317   bool expand_by(size_t min_words, size_t preferred_words);
 318 
 319   // In preparation for deleting this node, remove all the chunks
 320   // in the node from any freelist.
 321   void purge(ChunkManager* chunk_manager);
 322 
 323   // If an allocation doesn't fit in the current node a new node is created.
 324   // Allocate chunks out of the remaining committed space in this node
 325   // to avoid wasting that memory.
 326   // This always adds up because all the chunk sizes are multiples of
 327   // the smallest chunk size.
 328   void retire(ChunkManager* chunk_manager);
 329 
 330 #ifdef ASSERT
 331   // Debug support
 332   void mangle();
 333 #endif
 334 
 335   void print_on(outputStream* st) const;
 336 };
 337 
 338 #define assert_is_ptr_aligned(ptr, alignment) \
 339   assert(is_ptr_aligned(ptr, alignment),      \
 340     err_msg(PTR_FORMAT " is not aligned to "  \
 341       SIZE_FORMAT, ptr, alignment))
 342 
 343 #define assert_is_size_aligned(size, alignment) \
 344   assert(is_size_aligned(size, alignment),      \
 345     err_msg(SIZE_FORMAT " is not aligned to "   \
 346        SIZE_FORMAT, size, alignment))
 347 
 348 
 349 // Decide if large pages should be committed when the memory is reserved.
 350 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 351   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 352     size_t words = bytes / BytesPerWord;
 353     bool is_class = false; // We never reserve large pages for the class space.
 354     if (MetaspaceGC::can_expand(words, is_class) &&
 355         MetaspaceGC::allowed_expansion() >= words) {
 356       return true;
 357     }
 358   }
 359 
 360   return false;
 361 }
 362 
 363   // byte_size is the size of the associated virtualspace.
 364 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 365   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 366 
 367   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 368   // configurable address, generally at the top of the Java heap so other
 369   // memory addresses don't conflict.
 370   if (DumpSharedSpaces) {
 371     bool large_pages = false; // No large pages when dumping the CDS archive.
 372     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 373 
 374     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 375     if (_rs.is_reserved()) {
 376       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 377     } else {
 378       // Get a mmap region anywhere if the SharedBaseAddress fails.
 379       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 380     }
 381     MetaspaceShared::set_shared_rs(&_rs);
 382   } else {
 383     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 384 
 385     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 386   }
 387 
 388   if (_rs.is_reserved()) {
 389     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 390     assert(_rs.size() != 0, "Catch if we get a 0 size");
 391     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 392     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 393 
 394     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 395   }
 396 }
 397 
 398 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 399   Metachunk* chunk = first_chunk();
 400   Metachunk* invalid_chunk = (Metachunk*) top();
 401   while (chunk < invalid_chunk ) {
 402     assert(chunk->is_tagged_free(), "Should be tagged free");
 403     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 404     chunk_manager->remove_chunk(chunk);
 405     assert(chunk->next() == NULL &&
 406            chunk->prev() == NULL,
 407            "Was not removed from its list");
 408     chunk = (Metachunk*) next;
 409   }
 410 }
 411 
 412 #ifdef ASSERT
 413 uint VirtualSpaceNode::container_count_slow() {
 414   uint count = 0;
 415   Metachunk* chunk = first_chunk();
 416   Metachunk* invalid_chunk = (Metachunk*) top();
 417   while (chunk < invalid_chunk ) {
 418     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 419     // Don't count the chunks on the free lists.  Those are
 420     // still part of the VirtualSpaceNode but not currently
 421     // counted.
 422     if (!chunk->is_tagged_free()) {
 423       count++;
 424     }
 425     chunk = (Metachunk*) next;
 426   }
 427   return count;
 428 }
 429 #endif
 430 
 431 // List of VirtualSpaces for metadata allocation.
 432 class VirtualSpaceList : public CHeapObj<mtClass> {
 433   friend class VirtualSpaceNode;
 434 
 435   enum VirtualSpaceSizes {
 436     VirtualSpaceSize = 256 * K
 437   };
 438 
 439   // Head of the list
 440   VirtualSpaceNode* _virtual_space_list;
 441   // virtual space currently being used for allocations
 442   VirtualSpaceNode* _current_virtual_space;
 443 
 444   // Is this VirtualSpaceList used for the compressed class space
 445   bool _is_class;
 446 
 447   // Sum of reserved and committed memory in the virtual spaces
 448   size_t _reserved_words;
 449   size_t _committed_words;
 450 
 451   // Number of virtual spaces
 452   size_t _virtual_space_count;
 453 
 454   ~VirtualSpaceList();
 455 
 456   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 457 
 458   void set_virtual_space_list(VirtualSpaceNode* v) {
 459     _virtual_space_list = v;
 460   }
 461   void set_current_virtual_space(VirtualSpaceNode* v) {
 462     _current_virtual_space = v;
 463   }
 464 
 465   void link_vs(VirtualSpaceNode* new_entry);
 466 
 467   // Get another virtual space and add it to the list.  This
 468   // is typically prompted by a failed attempt to allocate a chunk
 469   // and is typically followed by the allocation of a chunk.
 470   bool create_new_virtual_space(size_t vs_word_size);
 471 
 472   // Chunk up the unused committed space in the current
 473   // virtual space and add the chunks to the free list.
 474   void retire_current_virtual_space();
 475 
 476  public:
 477   VirtualSpaceList(size_t word_size);
 478   VirtualSpaceList(ReservedSpace rs);
 479 
 480   size_t free_bytes();
 481 
 482   Metachunk* get_new_chunk(size_t word_size,
 483                            size_t grow_chunks_by_words,
 484                            size_t medium_chunk_bunch);
 485 
 486   bool expand_node_by(VirtualSpaceNode* node,
 487                       size_t min_words,
 488                       size_t preferred_words);
 489 
 490   bool expand_by(size_t min_words,
 491                  size_t preferred_words);
 492 
 493   VirtualSpaceNode* current_virtual_space() {
 494     return _current_virtual_space;
 495   }
 496 
 497   bool is_class() const { return _is_class; }
 498 
 499   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 500 
 501   size_t reserved_words()  { return _reserved_words; }
 502   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 503   size_t committed_words() { return _committed_words; }
 504   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 505 
 506   void inc_reserved_words(size_t v);
 507   void dec_reserved_words(size_t v);
 508   void inc_committed_words(size_t v);
 509   void dec_committed_words(size_t v);
 510   void inc_virtual_space_count();
 511   void dec_virtual_space_count();
 512 
 513   // Unlink empty VirtualSpaceNodes and free it.
 514   void purge(ChunkManager* chunk_manager);
 515 
 516   bool contains(const void *ptr);
 517 
 518   void print_on(outputStream* st) const;
 519 
 520   class VirtualSpaceListIterator : public StackObj {
 521     VirtualSpaceNode* _virtual_spaces;
 522    public:
 523     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 524       _virtual_spaces(virtual_spaces) {}
 525 
 526     bool repeat() {
 527       return _virtual_spaces != NULL;
 528     }
 529 
 530     VirtualSpaceNode* get_next() {
 531       VirtualSpaceNode* result = _virtual_spaces;
 532       if (_virtual_spaces != NULL) {
 533         _virtual_spaces = _virtual_spaces->next();
 534       }
 535       return result;
 536     }
 537   };
 538 };
 539 
 540 class Metadebug : AllStatic {
 541   // Debugging support for Metaspaces
 542   static int _allocation_fail_alot_count;
 543 
 544  public:
 545 
 546   static void init_allocation_fail_alot_count();
 547 #ifdef ASSERT
 548   static bool test_metadata_failure();
 549 #endif
 550 };
 551 
 552 int Metadebug::_allocation_fail_alot_count = 0;
 553 
 554 //  SpaceManager - used by Metaspace to handle allocations
 555 class SpaceManager : public CHeapObj<mtClass> {
 556   friend class Metaspace;
 557   friend class Metadebug;
 558 
 559  private:
 560 
 561   // protects allocations and contains.
 562   Mutex* const _lock;
 563 
 564   // Type of metadata allocated.
 565   Metaspace::MetadataType _mdtype;
 566 
 567   // List of chunks in use by this SpaceManager.  Allocations
 568   // are done from the current chunk.  The list is used for deallocating
 569   // chunks when the SpaceManager is freed.
 570   Metachunk* _chunks_in_use[NumberOfInUseLists];
 571   Metachunk* _current_chunk;
 572 
 573   // Number of small chunks to allocate to a manager
 574   // If class space manager, small chunks are unlimited
 575   static uint const _small_chunk_limit;
 576 
 577   // Sum of all space in allocated chunks
 578   size_t _allocated_blocks_words;
 579 
 580   // Sum of all allocated chunks
 581   size_t _allocated_chunks_words;
 582   size_t _allocated_chunks_count;
 583 
 584   // Free lists of blocks are per SpaceManager since they
 585   // are assumed to be in chunks in use by the SpaceManager
 586   // and all chunks in use by a SpaceManager are freed when
 587   // the class loader using the SpaceManager is collected.
 588   BlockFreelist _block_freelists;
 589 
 590   // protects virtualspace and chunk expansions
 591   static const char*  _expand_lock_name;
 592   static const int    _expand_lock_rank;
 593   static Mutex* const _expand_lock;
 594 
 595  private:
 596   // Accessors
 597   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 598   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
 599 
 600   BlockFreelist* block_freelists() const {
 601     return (BlockFreelist*) &_block_freelists;
 602   }
 603 
 604   Metaspace::MetadataType mdtype() { return _mdtype; }
 605 
 606   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 607   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 608 
 609   Metachunk* current_chunk() const { return _current_chunk; }
 610   void set_current_chunk(Metachunk* v) {
 611     _current_chunk = v;
 612   }
 613 
 614   Metachunk* find_current_chunk(size_t word_size);
 615 
 616   // Add chunk to the list of chunks in use
 617   void add_chunk(Metachunk* v, bool make_current);
 618   void retire_current_chunk();
 619 
 620   Mutex* lock() const { return _lock; }
 621 
 622   const char* chunk_size_name(ChunkIndex index) const;
 623 
 624  protected:
 625   void initialize();
 626 
 627  public:
 628   SpaceManager(Metaspace::MetadataType mdtype,
 629                Mutex* lock);
 630   ~SpaceManager();
 631 
 632   enum ChunkMultiples {
 633     MediumChunkMultiple = 4
 634   };
 635 
 636   bool is_class() { return _mdtype == Metaspace::ClassType; }
 637 
 638   // Accessors
 639   size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
 640   size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
 641   size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
 642   size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
 643 
 644   size_t smallest_chunk_size()  { return specialized_chunk_size(); }
 645 
 646   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 647   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 648   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 649   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 650 
 651   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 652 
 653   static Mutex* expand_lock() { return _expand_lock; }
 654 
 655   // Increment the per Metaspace and global running sums for Metachunks
 656   // by the given size.  This is used when a Metachunk to added to
 657   // the in-use list.
 658   void inc_size_metrics(size_t words);
 659   // Increment the per Metaspace and global running sums Metablocks by the given
 660   // size.  This is used when a Metablock is allocated.
 661   void inc_used_metrics(size_t words);
 662   // Delete the portion of the running sums for this SpaceManager. That is,
 663   // the globals running sums for the Metachunks and Metablocks are
 664   // decremented for all the Metachunks in-use by this SpaceManager.
 665   void dec_total_from_size_metrics();
 666 
 667   // Set the sizes for the initial chunks.
 668   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 669                                size_t* chunk_word_size,
 670                                size_t* class_chunk_word_size);
 671 
 672   size_t sum_capacity_in_chunks_in_use() const;
 673   size_t sum_used_in_chunks_in_use() const;
 674   size_t sum_free_in_chunks_in_use() const;
 675   size_t sum_waste_in_chunks_in_use() const;
 676   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 677 
 678   size_t sum_count_in_chunks_in_use();
 679   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 680 
 681   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 682 
 683   // Block allocation and deallocation.
 684   // Allocates a block from the current chunk
 685   MetaWord* allocate(size_t word_size);
 686 
 687   // Helper for allocations
 688   MetaWord* allocate_work(size_t word_size);
 689 
 690   // Returns a block to the per manager freelist
 691   void deallocate(MetaWord* p, size_t word_size);
 692 
 693   // Based on the allocation size and a minimum chunk size,
 694   // returned chunk size (for expanding space for chunk allocation).
 695   size_t calc_chunk_size(size_t allocation_word_size);
 696 
 697   // Called when an allocation from the current chunk fails.
 698   // Gets a new chunk (may require getting a new virtual space),
 699   // and allocates from that chunk.
 700   MetaWord* grow_and_allocate(size_t word_size);
 701 
 702   // Notify memory usage to MemoryService.
 703   void track_metaspace_memory_usage();
 704 
 705   // debugging support.
 706 
 707   void dump(outputStream* const out) const;
 708   void print_on(outputStream* st) const;
 709   void locked_print_chunks_in_use_on(outputStream* st) const;
 710 
 711   void verify();
 712   void verify_chunk_size(Metachunk* chunk);
 713   NOT_PRODUCT(void mangle_freed_chunks();)
 714 #ifdef ASSERT
 715   void verify_allocated_blocks_words();
 716 #endif
 717 
 718   size_t get_raw_word_size(size_t word_size) {
 719     size_t byte_size = word_size * BytesPerWord;
 720 
 721     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 722     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 723 
 724     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 725     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 726 
 727     return raw_word_size;
 728   }
 729 };
 730 
 731 uint const SpaceManager::_small_chunk_limit = 4;
 732 
 733 const char* SpaceManager::_expand_lock_name =
 734   "SpaceManager chunk allocation lock";
 735 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 736 Mutex* const SpaceManager::_expand_lock =
 737   new Mutex(SpaceManager::_expand_lock_rank,
 738             SpaceManager::_expand_lock_name,
 739             Mutex::_allow_vm_block_flag);
 740 
 741 void VirtualSpaceNode::inc_container_count() {
 742   assert_lock_strong(SpaceManager::expand_lock());
 743   _container_count++;
 744   assert(_container_count == container_count_slow(),
 745          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 746                  " container_count_slow() " SIZE_FORMAT,
 747                  _container_count, container_count_slow()));
 748 }
 749 
 750 void VirtualSpaceNode::dec_container_count() {
 751   assert_lock_strong(SpaceManager::expand_lock());
 752   _container_count--;
 753 }
 754 
 755 #ifdef ASSERT
 756 void VirtualSpaceNode::verify_container_count() {
 757   assert(_container_count == container_count_slow(),
 758     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 759             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 760 }
 761 #endif
 762 
 763 // BlockFreelist methods
 764 
 765 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 766 
 767 BlockFreelist::~BlockFreelist() {
 768   if (_dictionary != NULL) {
 769     if (Verbose && TraceMetadataChunkAllocation) {
 770       _dictionary->print_free_lists(gclog_or_tty);
 771     }
 772     delete _dictionary;
 773   }
 774 }
 775 
 776 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 777   Metablock* free_chunk = ::new (p) Metablock(word_size);
 778   if (dictionary() == NULL) {
 779    _dictionary = new BlockTreeDictionary();
 780   }
 781   dictionary()->return_chunk(free_chunk);
 782 }
 783 
 784 MetaWord* BlockFreelist::get_block(size_t word_size) {
 785   if (dictionary() == NULL) {
 786     return NULL;
 787   }
 788 
 789   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 790     // Dark matter.  Too small for dictionary.
 791     return NULL;
 792   }
 793 
 794   Metablock* free_block =
 795     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 796   if (free_block == NULL) {
 797     return NULL;
 798   }
 799 
 800   const size_t block_size = free_block->size();
 801   if (block_size > WasteMultiplier * word_size) {
 802     return_block((MetaWord*)free_block, block_size);
 803     return NULL;
 804   }
 805 
 806   MetaWord* new_block = (MetaWord*)free_block;
 807   assert(block_size >= word_size, "Incorrect size of block from freelist");
 808   const size_t unused = block_size - word_size;
 809   if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
 810     return_block(new_block + word_size, unused);
 811   }
 812 
 813   return new_block;
 814 }
 815 
 816 void BlockFreelist::print_on(outputStream* st) const {
 817   if (dictionary() == NULL) {
 818     return;
 819   }
 820   dictionary()->print_free_lists(st);
 821 }
 822 
 823 // VirtualSpaceNode methods
 824 
 825 VirtualSpaceNode::~VirtualSpaceNode() {
 826   _rs.release();
 827 #ifdef ASSERT
 828   size_t word_size = sizeof(*this) / BytesPerWord;
 829   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 830 #endif
 831 }
 832 
 833 size_t VirtualSpaceNode::used_words_in_vs() const {
 834   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 835 }
 836 
 837 // Space committed in the VirtualSpace
 838 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 839   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 840 }
 841 
 842 size_t VirtualSpaceNode::free_words_in_vs() const {
 843   return pointer_delta(end(), top(), sizeof(MetaWord));
 844 }
 845 
 846 // Allocates the chunk from the virtual space only.
 847 // This interface is also used internally for debugging.  Not all
 848 // chunks removed here are necessarily used for allocation.
 849 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 850   // Bottom of the new chunk
 851   MetaWord* chunk_limit = top();
 852   assert(chunk_limit != NULL, "Not safe to call this method");
 853 
 854   // The virtual spaces are always expanded by the
 855   // commit granularity to enforce the following condition.
 856   // Without this the is_available check will not work correctly.
 857   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 858       "The committed memory doesn't match the expanded memory.");
 859 
 860   if (!is_available(chunk_word_size)) {
 861     if (TraceMetadataChunkAllocation) {
 862       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 863       // Dump some information about the virtual space that is nearly full
 864       print_on(gclog_or_tty);
 865     }
 866     return NULL;
 867   }
 868 
 869   // Take the space  (bump top on the current virtual space).
 870   inc_top(chunk_word_size);
 871 
 872   // Initialize the chunk
 873   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 874   return result;
 875 }
 876 
 877 
 878 // Expand the virtual space (commit more of the reserved space)
 879 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 880   size_t min_bytes = min_words * BytesPerWord;
 881   size_t preferred_bytes = preferred_words * BytesPerWord;
 882 
 883   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 884 
 885   if (uncommitted < min_bytes) {
 886     return false;
 887   }
 888 
 889   size_t commit = MIN2(preferred_bytes, uncommitted);
 890   bool result = virtual_space()->expand_by(commit, false);
 891 
 892   assert(result, "Failed to commit memory");
 893 
 894   return result;
 895 }
 896 
 897 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 898   assert_lock_strong(SpaceManager::expand_lock());
 899   Metachunk* result = take_from_committed(chunk_word_size);
 900   if (result != NULL) {
 901     inc_container_count();
 902   }
 903   return result;
 904 }
 905 
 906 bool VirtualSpaceNode::initialize() {
 907 
 908   if (!_rs.is_reserved()) {
 909     return false;
 910   }
 911 
 912   // These are necessary restriction to make sure that the virtual space always
 913   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 914   // aligned only the middle alignment of the VirtualSpace is used.
 915   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 916   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 917 
 918   // ReservedSpaces marked as special will have the entire memory
 919   // pre-committed. Setting a committed size will make sure that
 920   // committed_size and actual_committed_size agrees.
 921   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 922 
 923   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 924                                             Metaspace::commit_alignment());
 925   if (result) {
 926     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 927         "Checking that the pre-committed memory was registered by the VirtualSpace");
 928 
 929     set_top((MetaWord*)virtual_space()->low());
 930     set_reserved(MemRegion((HeapWord*)_rs.base(),
 931                  (HeapWord*)(_rs.base() + _rs.size())));
 932 
 933     assert(reserved()->start() == (HeapWord*) _rs.base(),
 934       err_msg("Reserved start was not set properly " PTR_FORMAT
 935         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 936     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 937       err_msg("Reserved size was not set properly " SIZE_FORMAT
 938         " != " SIZE_FORMAT, reserved()->word_size(),
 939         _rs.size() / BytesPerWord));
 940   }
 941 
 942   return result;
 943 }
 944 
 945 void VirtualSpaceNode::print_on(outputStream* st) const {
 946   size_t used = used_words_in_vs();
 947   size_t capacity = capacity_words_in_vs();
 948   VirtualSpace* vs = virtual_space();
 949   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
 950            "[" PTR_FORMAT ", " PTR_FORMAT ", "
 951            PTR_FORMAT ", " PTR_FORMAT ")",
 952            vs, capacity / K,
 953            capacity == 0 ? 0 : used * 100 / capacity,
 954            bottom(), top(), end(),
 955            vs->high_boundary());
 956 }
 957 
 958 #ifdef ASSERT
 959 void VirtualSpaceNode::mangle() {
 960   size_t word_size = capacity_words_in_vs();
 961   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 962 }
 963 #endif // ASSERT
 964 
 965 // VirtualSpaceList methods
 966 // Space allocated from the VirtualSpace
 967 
 968 VirtualSpaceList::~VirtualSpaceList() {
 969   VirtualSpaceListIterator iter(virtual_space_list());
 970   while (iter.repeat()) {
 971     VirtualSpaceNode* vsl = iter.get_next();
 972     delete vsl;
 973   }
 974 }
 975 
 976 void VirtualSpaceList::inc_reserved_words(size_t v) {
 977   assert_lock_strong(SpaceManager::expand_lock());
 978   _reserved_words = _reserved_words + v;
 979 }
 980 void VirtualSpaceList::dec_reserved_words(size_t v) {
 981   assert_lock_strong(SpaceManager::expand_lock());
 982   _reserved_words = _reserved_words - v;
 983 }
 984 
 985 #define assert_committed_below_limit()                             \
 986   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
 987       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
 988               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
 989           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
 990 
 991 void VirtualSpaceList::inc_committed_words(size_t v) {
 992   assert_lock_strong(SpaceManager::expand_lock());
 993   _committed_words = _committed_words + v;
 994 
 995   assert_committed_below_limit();
 996 }
 997 void VirtualSpaceList::dec_committed_words(size_t v) {
 998   assert_lock_strong(SpaceManager::expand_lock());
 999   _committed_words = _committed_words - v;
1000 
1001   assert_committed_below_limit();
1002 }
1003 
1004 void VirtualSpaceList::inc_virtual_space_count() {
1005   assert_lock_strong(SpaceManager::expand_lock());
1006   _virtual_space_count++;
1007 }
1008 void VirtualSpaceList::dec_virtual_space_count() {
1009   assert_lock_strong(SpaceManager::expand_lock());
1010   _virtual_space_count--;
1011 }
1012 
1013 void ChunkManager::remove_chunk(Metachunk* chunk) {
1014   size_t word_size = chunk->word_size();
1015   ChunkIndex index = list_index(word_size);
1016   if (index != HumongousIndex) {
1017     free_chunks(index)->remove_chunk(chunk);
1018   } else {
1019     humongous_dictionary()->remove_chunk(chunk);
1020   }
1021 
1022   // Chunk is being removed from the chunks free list.
1023   dec_free_chunks_total(chunk->word_size());
1024 }
1025 
1026 // Walk the list of VirtualSpaceNodes and delete
1027 // nodes with a 0 container_count.  Remove Metachunks in
1028 // the node from their respective freelists.
1029 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1030   assert_lock_strong(SpaceManager::expand_lock());
1031   // Don't use a VirtualSpaceListIterator because this
1032   // list is being changed and a straightforward use of an iterator is not safe.
1033   VirtualSpaceNode* purged_vsl = NULL;
1034   VirtualSpaceNode* prev_vsl = virtual_space_list();
1035   VirtualSpaceNode* next_vsl = prev_vsl;
1036   while (next_vsl != NULL) {
1037     VirtualSpaceNode* vsl = next_vsl;
1038     next_vsl = vsl->next();
1039     // Don't free the current virtual space since it will likely
1040     // be needed soon.
1041     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1042       // Unlink it from the list
1043       if (prev_vsl == vsl) {
1044         // This is the case of the current node being the first node.
1045         assert(vsl == virtual_space_list(), "Expected to be the first node");
1046         set_virtual_space_list(vsl->next());
1047       } else {
1048         prev_vsl->set_next(vsl->next());
1049       }
1050 
1051       vsl->purge(chunk_manager);
1052       dec_reserved_words(vsl->reserved_words());
1053       dec_committed_words(vsl->committed_words());
1054       dec_virtual_space_count();
1055       purged_vsl = vsl;
1056       delete vsl;
1057     } else {
1058       prev_vsl = vsl;
1059     }
1060   }
1061 #ifdef ASSERT
1062   if (purged_vsl != NULL) {
1063   // List should be stable enough to use an iterator here.
1064   VirtualSpaceListIterator iter(virtual_space_list());
1065     while (iter.repeat()) {
1066       VirtualSpaceNode* vsl = iter.get_next();
1067       assert(vsl != purged_vsl, "Purge of vsl failed");
1068     }
1069   }
1070 #endif
1071 }
1072 
1073 void VirtualSpaceList::retire_current_virtual_space() {
1074   assert_lock_strong(SpaceManager::expand_lock());
1075 
1076   VirtualSpaceNode* vsn = current_virtual_space();
1077 
1078   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1079                                   Metaspace::chunk_manager_metadata();
1080 
1081   vsn->retire(cm);
1082 }
1083 
1084 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1085   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1086     ChunkIndex index = (ChunkIndex)i;
1087     size_t chunk_size = chunk_manager->free_chunks(index)->size();
1088 
1089     while (free_words_in_vs() >= chunk_size) {
1090       DEBUG_ONLY(verify_container_count();)
1091       Metachunk* chunk = get_chunk_vs(chunk_size);
1092       assert(chunk != NULL, "allocation should have been successful");
1093 
1094       chunk_manager->return_chunks(index, chunk);
1095       chunk_manager->inc_free_chunks_total(chunk_size);
1096       DEBUG_ONLY(verify_container_count();)
1097     }
1098   }
1099   assert(free_words_in_vs() == 0, "should be empty now");
1100 }
1101 
1102 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1103                                    _is_class(false),
1104                                    _virtual_space_list(NULL),
1105                                    _current_virtual_space(NULL),
1106                                    _reserved_words(0),
1107                                    _committed_words(0),
1108                                    _virtual_space_count(0) {
1109   MutexLockerEx cl(SpaceManager::expand_lock(),
1110                    Mutex::_no_safepoint_check_flag);
1111   create_new_virtual_space(word_size);
1112 }
1113 
1114 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1115                                    _is_class(true),
1116                                    _virtual_space_list(NULL),
1117                                    _current_virtual_space(NULL),
1118                                    _reserved_words(0),
1119                                    _committed_words(0),
1120                                    _virtual_space_count(0) {
1121   MutexLockerEx cl(SpaceManager::expand_lock(),
1122                    Mutex::_no_safepoint_check_flag);
1123   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1124   bool succeeded = class_entry->initialize();
1125   if (succeeded) {
1126     link_vs(class_entry);
1127   }
1128 }
1129 
1130 size_t VirtualSpaceList::free_bytes() {
1131   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1132 }
1133 
1134 // Allocate another meta virtual space and add it to the list.
1135 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1136   assert_lock_strong(SpaceManager::expand_lock());
1137 
1138   if (is_class()) {
1139     assert(false, "We currently don't support more than one VirtualSpace for"
1140                   " the compressed class space. The initialization of the"
1141                   " CCS uses another code path and should not hit this path.");
1142     return false;
1143   }
1144 
1145   if (vs_word_size == 0) {
1146     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1147     return false;
1148   }
1149 
1150   // Reserve the space
1151   size_t vs_byte_size = vs_word_size * BytesPerWord;
1152   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1153 
1154   // Allocate the meta virtual space and initialize it.
1155   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1156   if (!new_entry->initialize()) {
1157     delete new_entry;
1158     return false;
1159   } else {
1160     assert(new_entry->reserved_words() == vs_word_size,
1161         "Reserved memory size differs from requested memory size");
1162     // ensure lock-free iteration sees fully initialized node
1163     OrderAccess::storestore();
1164     link_vs(new_entry);
1165     return true;
1166   }
1167 }
1168 
1169 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1170   if (virtual_space_list() == NULL) {
1171       set_virtual_space_list(new_entry);
1172   } else {
1173     current_virtual_space()->set_next(new_entry);
1174   }
1175   set_current_virtual_space(new_entry);
1176   inc_reserved_words(new_entry->reserved_words());
1177   inc_committed_words(new_entry->committed_words());
1178   inc_virtual_space_count();
1179 #ifdef ASSERT
1180   new_entry->mangle();
1181 #endif
1182   if (TraceMetavirtualspaceAllocation && Verbose) {
1183     VirtualSpaceNode* vsl = current_virtual_space();
1184     vsl->print_on(gclog_or_tty);
1185   }
1186 }
1187 
1188 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1189                                       size_t min_words,
1190                                       size_t preferred_words) {
1191   size_t before = node->committed_words();
1192 
1193   bool result = node->expand_by(min_words, preferred_words);
1194 
1195   size_t after = node->committed_words();
1196 
1197   // after and before can be the same if the memory was pre-committed.
1198   assert(after >= before, "Inconsistency");
1199   inc_committed_words(after - before);
1200 
1201   return result;
1202 }
1203 
1204 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1205   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1206   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1207   assert(min_words <= preferred_words, "Invalid arguments");
1208 
1209   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1210     return  false;
1211   }
1212 
1213   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1214   if (allowed_expansion_words < min_words) {
1215     return false;
1216   }
1217 
1218   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1219 
1220   // Commit more memory from the the current virtual space.
1221   bool vs_expanded = expand_node_by(current_virtual_space(),
1222                                     min_words,
1223                                     max_expansion_words);
1224   if (vs_expanded) {
1225     return true;
1226   }
1227   retire_current_virtual_space();
1228 
1229   // Get another virtual space.
1230   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1231   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1232 
1233   if (create_new_virtual_space(grow_vs_words)) {
1234     if (current_virtual_space()->is_pre_committed()) {
1235       // The memory was pre-committed, so we are done here.
1236       assert(min_words <= current_virtual_space()->committed_words(),
1237           "The new VirtualSpace was pre-committed, so it"
1238           "should be large enough to fit the alloc request.");
1239       return true;
1240     }
1241 
1242     return expand_node_by(current_virtual_space(),
1243                           min_words,
1244                           max_expansion_words);
1245   }
1246 
1247   return false;
1248 }
1249 
1250 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1251                                            size_t grow_chunks_by_words,
1252                                            size_t medium_chunk_bunch) {
1253 
1254   // Allocate a chunk out of the current virtual space.
1255   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1256 
1257   if (next != NULL) {
1258     return next;
1259   }
1260 
1261   // The expand amount is currently only determined by the requested sizes
1262   // and not how much committed memory is left in the current virtual space.
1263 
1264   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1265   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1266   if (min_word_size >= preferred_word_size) {
1267     // Can happen when humongous chunks are allocated.
1268     preferred_word_size = min_word_size;
1269   }
1270 
1271   bool expanded = expand_by(min_word_size, preferred_word_size);
1272   if (expanded) {
1273     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1274     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1275   }
1276 
1277    return next;
1278 }
1279 
1280 void VirtualSpaceList::print_on(outputStream* st) const {
1281   if (TraceMetadataChunkAllocation && Verbose) {
1282     VirtualSpaceListIterator iter(virtual_space_list());
1283     while (iter.repeat()) {
1284       VirtualSpaceNode* node = iter.get_next();
1285       node->print_on(st);
1286     }
1287   }
1288 }
1289 
1290 bool VirtualSpaceList::contains(const void *ptr) {
1291   VirtualSpaceNode* list = virtual_space_list();
1292   VirtualSpaceListIterator iter(list);
1293   while (iter.repeat()) {
1294     VirtualSpaceNode* node = iter.get_next();
1295     if (node->reserved()->contains(ptr)) {
1296       return true;
1297     }
1298   }
1299   return false;
1300 }
1301 
1302 
1303 // MetaspaceGC methods
1304 
1305 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1306 // Within the VM operation after the GC the attempt to allocate the metadata
1307 // should succeed.  If the GC did not free enough space for the metaspace
1308 // allocation, the HWM is increased so that another virtualspace will be
1309 // allocated for the metadata.  With perm gen the increase in the perm
1310 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1311 // metaspace policy uses those as the small and large steps for the HWM.
1312 //
1313 // After the GC the compute_new_size() for MetaspaceGC is called to
1314 // resize the capacity of the metaspaces.  The current implementation
1315 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1316 // to resize the Java heap by some GC's.  New flags can be implemented
1317 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1318 // free space is desirable in the metaspace capacity to decide how much
1319 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1320 // free space is desirable in the metaspace capacity before decreasing
1321 // the HWM.
1322 
1323 // Calculate the amount to increase the high water mark (HWM).
1324 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1325 // another expansion is not requested too soon.  If that is not
1326 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1327 // If that is still not enough, expand by the size of the allocation
1328 // plus some.
1329 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1330   size_t min_delta = MinMetaspaceExpansion;
1331   size_t max_delta = MaxMetaspaceExpansion;
1332   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1333 
1334   if (delta <= min_delta) {
1335     delta = min_delta;
1336   } else if (delta <= max_delta) {
1337     // Don't want to hit the high water mark on the next
1338     // allocation so make the delta greater than just enough
1339     // for this allocation.
1340     delta = max_delta;
1341   } else {
1342     // This allocation is large but the next ones are probably not
1343     // so increase by the minimum.
1344     delta = delta + min_delta;
1345   }
1346 
1347   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1348 
1349   return delta;
1350 }
1351 
1352 size_t MetaspaceGC::capacity_until_GC() {
1353   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1354   assert(value >= MetaspaceSize, "Not initialied properly?");
1355   return value;
1356 }
1357 
1358 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1359   assert_is_size_aligned(v, Metaspace::commit_alignment());
1360 
1361   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1362 }
1363 
1364 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1365   assert_is_size_aligned(v, Metaspace::commit_alignment());
1366 
1367   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1368 }
1369 
1370 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1371   // Check if the compressed class space is full.
1372   if (is_class && Metaspace::using_class_space()) {
1373     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1374     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1375       return false;
1376     }
1377   }
1378 
1379   // Check if the user has imposed a limit on the metaspace memory.
1380   size_t committed_bytes = MetaspaceAux::committed_bytes();
1381   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1382     return false;
1383   }
1384 
1385   return true;
1386 }
1387 
1388 size_t MetaspaceGC::allowed_expansion() {
1389   size_t committed_bytes = MetaspaceAux::committed_bytes();
1390 
1391   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1392 
1393   // Always grant expansion if we are initiating the JVM,
1394   // or if the GC_locker is preventing GCs.
1395   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1396     return left_until_max / BytesPerWord;
1397   }
1398 
1399   size_t capacity_until_gc = capacity_until_GC();
1400 
1401   if (capacity_until_gc <= committed_bytes) {
1402     return 0;
1403   }
1404 
1405   size_t left_until_GC = capacity_until_gc - committed_bytes;
1406   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1407 
1408   return left_to_commit / BytesPerWord;
1409 }
1410 
1411 void MetaspaceGC::compute_new_size() {
1412   assert(_shrink_factor <= 100, "invalid shrink factor");
1413   uint current_shrink_factor = _shrink_factor;
1414   _shrink_factor = 0;
1415 
1416   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1417   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1418 
1419   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1420   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1421 
1422   const double min_tmp = used_after_gc / maximum_used_percentage;
1423   size_t minimum_desired_capacity =
1424     (size_t)MIN2(min_tmp, double(max_uintx));
1425   // Don't shrink less than the initial generation size
1426   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1427                                   MetaspaceSize);
1428 
1429   if (PrintGCDetails && Verbose) {
1430     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1431     gclog_or_tty->print_cr("  "
1432                   "  minimum_free_percentage: %6.2f"
1433                   "  maximum_used_percentage: %6.2f",
1434                   minimum_free_percentage,
1435                   maximum_used_percentage);
1436     gclog_or_tty->print_cr("  "
1437                   "   used_after_gc       : %6.1fKB",
1438                   used_after_gc / (double) K);
1439   }
1440 
1441 
1442   size_t shrink_bytes = 0;
1443   if (capacity_until_GC < minimum_desired_capacity) {
1444     // If we have less capacity below the metaspace HWM, then
1445     // increment the HWM.
1446     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1447     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1448     // Don't expand unless it's significant
1449     if (expand_bytes >= MinMetaspaceExpansion) {
1450       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1451     }
1452     if (PrintGCDetails && Verbose) {
1453       size_t new_capacity_until_GC = capacity_until_GC;
1454       gclog_or_tty->print_cr("    expanding:"
1455                     "  minimum_desired_capacity: %6.1fKB"
1456                     "  expand_bytes: %6.1fKB"
1457                     "  MinMetaspaceExpansion: %6.1fKB"
1458                     "  new metaspace HWM:  %6.1fKB",
1459                     minimum_desired_capacity / (double) K,
1460                     expand_bytes / (double) K,
1461                     MinMetaspaceExpansion / (double) K,
1462                     new_capacity_until_GC / (double) K);
1463     }
1464     return;
1465   }
1466 
1467   // No expansion, now see if we want to shrink
1468   // We would never want to shrink more than this
1469   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1470   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1471     max_shrink_bytes));
1472 
1473   // Should shrinking be considered?
1474   if (MaxMetaspaceFreeRatio < 100) {
1475     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1476     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1477     const double max_tmp = used_after_gc / minimum_used_percentage;
1478     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1479     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1480                                     MetaspaceSize);
1481     if (PrintGCDetails && Verbose) {
1482       gclog_or_tty->print_cr("  "
1483                              "  maximum_free_percentage: %6.2f"
1484                              "  minimum_used_percentage: %6.2f",
1485                              maximum_free_percentage,
1486                              minimum_used_percentage);
1487       gclog_or_tty->print_cr("  "
1488                              "  minimum_desired_capacity: %6.1fKB"
1489                              "  maximum_desired_capacity: %6.1fKB",
1490                              minimum_desired_capacity / (double) K,
1491                              maximum_desired_capacity / (double) K);
1492     }
1493 
1494     assert(minimum_desired_capacity <= maximum_desired_capacity,
1495            "sanity check");
1496 
1497     if (capacity_until_GC > maximum_desired_capacity) {
1498       // Capacity too large, compute shrinking size
1499       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1500       // We don't want shrink all the way back to initSize if people call
1501       // System.gc(), because some programs do that between "phases" and then
1502       // we'd just have to grow the heap up again for the next phase.  So we
1503       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1504       // on the third call, and 100% by the fourth call.  But if we recompute
1505       // size without shrinking, it goes back to 0%.
1506       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1507 
1508       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1509 
1510       assert(shrink_bytes <= max_shrink_bytes,
1511         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1512           shrink_bytes, max_shrink_bytes));
1513       if (current_shrink_factor == 0) {
1514         _shrink_factor = 10;
1515       } else {
1516         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1517       }
1518       if (PrintGCDetails && Verbose) {
1519         gclog_or_tty->print_cr("  "
1520                       "  shrinking:"
1521                       "  initSize: %.1fK"
1522                       "  maximum_desired_capacity: %.1fK",
1523                       MetaspaceSize / (double) K,
1524                       maximum_desired_capacity / (double) K);
1525         gclog_or_tty->print_cr("  "
1526                       "  shrink_bytes: %.1fK"
1527                       "  current_shrink_factor: %d"
1528                       "  new shrink factor: %d"
1529                       "  MinMetaspaceExpansion: %.1fK",
1530                       shrink_bytes / (double) K,
1531                       current_shrink_factor,
1532                       _shrink_factor,
1533                       MinMetaspaceExpansion / (double) K);
1534       }
1535     }
1536   }
1537 
1538   // Don't shrink unless it's significant
1539   if (shrink_bytes >= MinMetaspaceExpansion &&
1540       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1541     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1542   }
1543 }
1544 
1545 // Metadebug methods
1546 
1547 void Metadebug::init_allocation_fail_alot_count() {
1548   if (MetadataAllocationFailALot) {
1549     _allocation_fail_alot_count =
1550       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1551   }
1552 }
1553 
1554 #ifdef ASSERT
1555 bool Metadebug::test_metadata_failure() {
1556   if (MetadataAllocationFailALot &&
1557       Threads::is_vm_complete()) {
1558     if (_allocation_fail_alot_count > 0) {
1559       _allocation_fail_alot_count--;
1560     } else {
1561       if (TraceMetadataChunkAllocation && Verbose) {
1562         gclog_or_tty->print_cr("Metadata allocation failing for "
1563                                "MetadataAllocationFailALot");
1564       }
1565       init_allocation_fail_alot_count();
1566       return true;
1567     }
1568   }
1569   return false;
1570 }
1571 #endif
1572 
1573 // ChunkManager methods
1574 
1575 size_t ChunkManager::free_chunks_total_words() {
1576   return _free_chunks_total;
1577 }
1578 
1579 size_t ChunkManager::free_chunks_total_bytes() {
1580   return free_chunks_total_words() * BytesPerWord;
1581 }
1582 
1583 size_t ChunkManager::free_chunks_count() {
1584 #ifdef ASSERT
1585   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1586     MutexLockerEx cl(SpaceManager::expand_lock(),
1587                      Mutex::_no_safepoint_check_flag);
1588     // This lock is only needed in debug because the verification
1589     // of the _free_chunks_totals walks the list of free chunks
1590     slow_locked_verify_free_chunks_count();
1591   }
1592 #endif
1593   return _free_chunks_count;
1594 }
1595 
1596 void ChunkManager::locked_verify_free_chunks_total() {
1597   assert_lock_strong(SpaceManager::expand_lock());
1598   assert(sum_free_chunks() == _free_chunks_total,
1599     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1600            " same as sum " SIZE_FORMAT, _free_chunks_total,
1601            sum_free_chunks()));
1602 }
1603 
1604 void ChunkManager::verify_free_chunks_total() {
1605   MutexLockerEx cl(SpaceManager::expand_lock(),
1606                      Mutex::_no_safepoint_check_flag);
1607   locked_verify_free_chunks_total();
1608 }
1609 
1610 void ChunkManager::locked_verify_free_chunks_count() {
1611   assert_lock_strong(SpaceManager::expand_lock());
1612   assert(sum_free_chunks_count() == _free_chunks_count,
1613     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1614            " same as sum " SIZE_FORMAT, _free_chunks_count,
1615            sum_free_chunks_count()));
1616 }
1617 
1618 void ChunkManager::verify_free_chunks_count() {
1619 #ifdef ASSERT
1620   MutexLockerEx cl(SpaceManager::expand_lock(),
1621                      Mutex::_no_safepoint_check_flag);
1622   locked_verify_free_chunks_count();
1623 #endif
1624 }
1625 
1626 void ChunkManager::verify() {
1627   MutexLockerEx cl(SpaceManager::expand_lock(),
1628                      Mutex::_no_safepoint_check_flag);
1629   locked_verify();
1630 }
1631 
1632 void ChunkManager::locked_verify() {
1633   locked_verify_free_chunks_count();
1634   locked_verify_free_chunks_total();
1635 }
1636 
1637 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1638   assert_lock_strong(SpaceManager::expand_lock());
1639   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1640                 _free_chunks_total, _free_chunks_count);
1641 }
1642 
1643 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1644   assert_lock_strong(SpaceManager::expand_lock());
1645   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1646                 sum_free_chunks(), sum_free_chunks_count());
1647 }
1648 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1649   return &_free_chunks[index];
1650 }
1651 
1652 // These methods that sum the free chunk lists are used in printing
1653 // methods that are used in product builds.
1654 size_t ChunkManager::sum_free_chunks() {
1655   assert_lock_strong(SpaceManager::expand_lock());
1656   size_t result = 0;
1657   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1658     ChunkList* list = free_chunks(i);
1659 
1660     if (list == NULL) {
1661       continue;
1662     }
1663 
1664     result = result + list->count() * list->size();
1665   }
1666   result = result + humongous_dictionary()->total_size();
1667   return result;
1668 }
1669 
1670 size_t ChunkManager::sum_free_chunks_count() {
1671   assert_lock_strong(SpaceManager::expand_lock());
1672   size_t count = 0;
1673   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1674     ChunkList* list = free_chunks(i);
1675     if (list == NULL) {
1676       continue;
1677     }
1678     count = count + list->count();
1679   }
1680   count = count + humongous_dictionary()->total_free_blocks();
1681   return count;
1682 }
1683 
1684 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1685   ChunkIndex index = list_index(word_size);
1686   assert(index < HumongousIndex, "No humongous list");
1687   return free_chunks(index);
1688 }
1689 
1690 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1691   assert_lock_strong(SpaceManager::expand_lock());
1692 
1693   slow_locked_verify();
1694 
1695   Metachunk* chunk = NULL;
1696   if (list_index(word_size) != HumongousIndex) {
1697     ChunkList* free_list = find_free_chunks_list(word_size);
1698     assert(free_list != NULL, "Sanity check");
1699 
1700     chunk = free_list->head();
1701 
1702     if (chunk == NULL) {
1703       return NULL;
1704     }
1705 
1706     // Remove the chunk as the head of the list.
1707     free_list->remove_chunk(chunk);
1708 
1709     if (TraceMetadataChunkAllocation && Verbose) {
1710       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1711                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1712                              free_list, chunk, chunk->word_size());
1713     }
1714   } else {
1715     chunk = humongous_dictionary()->get_chunk(
1716       word_size,
1717       FreeBlockDictionary<Metachunk>::atLeast);
1718 
1719     if (chunk == NULL) {
1720       return NULL;
1721     }
1722 
1723     if (TraceMetadataHumongousAllocation) {
1724       size_t waste = chunk->word_size() - word_size;
1725       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1726                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1727                              " waste " SIZE_FORMAT,
1728                              chunk->word_size(), word_size, waste);
1729     }
1730   }
1731 
1732   // Chunk is being removed from the chunks free list.
1733   dec_free_chunks_total(chunk->word_size());
1734 
1735   // Remove it from the links to this freelist
1736   chunk->set_next(NULL);
1737   chunk->set_prev(NULL);
1738 #ifdef ASSERT
1739   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1740   // work.
1741   chunk->set_is_tagged_free(false);
1742 #endif
1743   chunk->container()->inc_container_count();
1744 
1745   slow_locked_verify();
1746   return chunk;
1747 }
1748 
1749 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1750   assert_lock_strong(SpaceManager::expand_lock());
1751   slow_locked_verify();
1752 
1753   // Take from the beginning of the list
1754   Metachunk* chunk = free_chunks_get(word_size);
1755   if (chunk == NULL) {
1756     return NULL;
1757   }
1758 
1759   assert((word_size <= chunk->word_size()) ||
1760          list_index(chunk->word_size() == HumongousIndex),
1761          "Non-humongous variable sized chunk");
1762   if (TraceMetadataChunkAllocation) {
1763     size_t list_count;
1764     if (list_index(word_size) < HumongousIndex) {
1765       ChunkList* list = find_free_chunks_list(word_size);
1766       list_count = list->count();
1767     } else {
1768       list_count = humongous_dictionary()->total_count();
1769     }
1770     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1771                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1772                         this, chunk, chunk->word_size(), list_count);
1773     locked_print_free_chunks(gclog_or_tty);
1774   }
1775 
1776   return chunk;
1777 }
1778 
1779 void ChunkManager::print_on(outputStream* out) const {
1780   if (PrintFLSStatistics != 0) {
1781     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1782   }
1783 }
1784 
1785 // SpaceManager methods
1786 
1787 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1788                                            size_t* chunk_word_size,
1789                                            size_t* class_chunk_word_size) {
1790   switch (type) {
1791   case Metaspace::BootMetaspaceType:
1792     *chunk_word_size = Metaspace::first_chunk_word_size();
1793     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1794     break;
1795   case Metaspace::ROMetaspaceType:
1796     *chunk_word_size = SharedReadOnlySize / wordSize;
1797     *class_chunk_word_size = ClassSpecializedChunk;
1798     break;
1799   case Metaspace::ReadWriteMetaspaceType:
1800     *chunk_word_size = SharedReadWriteSize / wordSize;
1801     *class_chunk_word_size = ClassSpecializedChunk;
1802     break;
1803   case Metaspace::AnonymousMetaspaceType:
1804   case Metaspace::ReflectionMetaspaceType:
1805     *chunk_word_size = SpecializedChunk;
1806     *class_chunk_word_size = ClassSpecializedChunk;
1807     break;
1808   default:
1809     *chunk_word_size = SmallChunk;
1810     *class_chunk_word_size = ClassSmallChunk;
1811     break;
1812   }
1813   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1814     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1815             " class " SIZE_FORMAT,
1816             *chunk_word_size, *class_chunk_word_size));
1817 }
1818 
1819 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1820   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1821   size_t free = 0;
1822   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1823     Metachunk* chunk = chunks_in_use(i);
1824     while (chunk != NULL) {
1825       free += chunk->free_word_size();
1826       chunk = chunk->next();
1827     }
1828   }
1829   return free;
1830 }
1831 
1832 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1833   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1834   size_t result = 0;
1835   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1836    result += sum_waste_in_chunks_in_use(i);
1837   }
1838 
1839   return result;
1840 }
1841 
1842 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1843   size_t result = 0;
1844   Metachunk* chunk = chunks_in_use(index);
1845   // Count the free space in all the chunk but not the
1846   // current chunk from which allocations are still being done.
1847   while (chunk != NULL) {
1848     if (chunk != current_chunk()) {
1849       result += chunk->free_word_size();
1850     }
1851     chunk = chunk->next();
1852   }
1853   return result;
1854 }
1855 
1856 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1857   // For CMS use "allocated_chunks_words()" which does not need the
1858   // Metaspace lock.  For the other collectors sum over the
1859   // lists.  Use both methods as a check that "allocated_chunks_words()"
1860   // is correct.  That is, sum_capacity_in_chunks() is too expensive
1861   // to use in the product and allocated_chunks_words() should be used
1862   // but allow for  checking that allocated_chunks_words() returns the same
1863   // value as sum_capacity_in_chunks_in_use() which is the definitive
1864   // answer.
1865   if (UseConcMarkSweepGC) {
1866     return allocated_chunks_words();
1867   } else {
1868     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1869     size_t sum = 0;
1870     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1871       Metachunk* chunk = chunks_in_use(i);
1872       while (chunk != NULL) {
1873         sum += chunk->word_size();
1874         chunk = chunk->next();
1875       }
1876     }
1877   return sum;
1878   }
1879 }
1880 
1881 size_t SpaceManager::sum_count_in_chunks_in_use() {
1882   size_t count = 0;
1883   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1884     count = count + sum_count_in_chunks_in_use(i);
1885   }
1886 
1887   return count;
1888 }
1889 
1890 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1891   size_t count = 0;
1892   Metachunk* chunk = chunks_in_use(i);
1893   while (chunk != NULL) {
1894     count++;
1895     chunk = chunk->next();
1896   }
1897   return count;
1898 }
1899 
1900 
1901 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1902   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1903   size_t used = 0;
1904   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1905     Metachunk* chunk = chunks_in_use(i);
1906     while (chunk != NULL) {
1907       used += chunk->used_word_size();
1908       chunk = chunk->next();
1909     }
1910   }
1911   return used;
1912 }
1913 
1914 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1915 
1916   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1917     Metachunk* chunk = chunks_in_use(i);
1918     st->print("SpaceManager: %s " PTR_FORMAT,
1919                  chunk_size_name(i), chunk);
1920     if (chunk != NULL) {
1921       st->print_cr(" free " SIZE_FORMAT,
1922                    chunk->free_word_size());
1923     } else {
1924       st->print_cr("");
1925     }
1926   }
1927 
1928   chunk_manager()->locked_print_free_chunks(st);
1929   chunk_manager()->locked_print_sum_free_chunks(st);
1930 }
1931 
1932 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1933 
1934   // Decide between a small chunk and a medium chunk.  Up to
1935   // _small_chunk_limit small chunks can be allocated but
1936   // once a medium chunk has been allocated, no more small
1937   // chunks will be allocated.
1938   size_t chunk_word_size;
1939   if (chunks_in_use(MediumIndex) == NULL &&
1940       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1941     chunk_word_size = (size_t) small_chunk_size();
1942     if (word_size + Metachunk::overhead() > small_chunk_size()) {
1943       chunk_word_size = medium_chunk_size();
1944     }
1945   } else {
1946     chunk_word_size = medium_chunk_size();
1947   }
1948 
1949   // Might still need a humongous chunk.  Enforce
1950   // humongous allocations sizes to be aligned up to
1951   // the smallest chunk size.
1952   size_t if_humongous_sized_chunk =
1953     align_size_up(word_size + Metachunk::overhead(),
1954                   smallest_chunk_size());
1955   chunk_word_size =
1956     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1957 
1958   assert(!SpaceManager::is_humongous(word_size) ||
1959          chunk_word_size == if_humongous_sized_chunk,
1960          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1961                  " chunk_word_size " SIZE_FORMAT,
1962                  word_size, chunk_word_size));
1963   if (TraceMetadataHumongousAllocation &&
1964       SpaceManager::is_humongous(word_size)) {
1965     gclog_or_tty->print_cr("Metadata humongous allocation:");
1966     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
1967     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
1968                            chunk_word_size);
1969     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
1970                            Metachunk::overhead());
1971   }
1972   return chunk_word_size;
1973 }
1974 
1975 void SpaceManager::track_metaspace_memory_usage() {
1976   if (is_init_completed()) {
1977     if (is_class()) {
1978       MemoryService::track_compressed_class_memory_usage();
1979     }
1980     MemoryService::track_metaspace_memory_usage();
1981   }
1982 }
1983 
1984 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1985   assert(vs_list()->current_virtual_space() != NULL,
1986          "Should have been set");
1987   assert(current_chunk() == NULL ||
1988          current_chunk()->allocate(word_size) == NULL,
1989          "Don't need to expand");
1990   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1991 
1992   if (TraceMetadataChunkAllocation && Verbose) {
1993     size_t words_left = 0;
1994     size_t words_used = 0;
1995     if (current_chunk() != NULL) {
1996       words_left = current_chunk()->free_word_size();
1997       words_used = current_chunk()->used_word_size();
1998     }
1999     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2000                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2001                            " words left",
2002                             word_size, words_used, words_left);
2003   }
2004 
2005   // Get another chunk out of the virtual space
2006   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2007   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2008 
2009   MetaWord* mem = NULL;
2010 
2011   // If a chunk was available, add it to the in-use chunk list
2012   // and do an allocation from it.
2013   if (next != NULL) {
2014     // Add to this manager's list of chunks in use.
2015     add_chunk(next, false);
2016     mem = next->allocate(word_size);
2017   }
2018 
2019   // Track metaspace memory usage statistic.
2020   track_metaspace_memory_usage();
2021 
2022   return mem;
2023 }
2024 
2025 void SpaceManager::print_on(outputStream* st) const {
2026 
2027   for (ChunkIndex i = ZeroIndex;
2028        i < NumberOfInUseLists ;
2029        i = next_chunk_index(i) ) {
2030     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2031                  chunks_in_use(i),
2032                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2033   }
2034   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2035                " Humongous " SIZE_FORMAT,
2036                sum_waste_in_chunks_in_use(SmallIndex),
2037                sum_waste_in_chunks_in_use(MediumIndex),
2038                sum_waste_in_chunks_in_use(HumongousIndex));
2039   // block free lists
2040   if (block_freelists() != NULL) {
2041     st->print_cr("total in block free lists " SIZE_FORMAT,
2042       block_freelists()->total_size());
2043   }
2044 }
2045 
2046 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2047                            Mutex* lock) :
2048   _mdtype(mdtype),
2049   _allocated_blocks_words(0),
2050   _allocated_chunks_words(0),
2051   _allocated_chunks_count(0),
2052   _lock(lock)
2053 {
2054   initialize();
2055 }
2056 
2057 void SpaceManager::inc_size_metrics(size_t words) {
2058   assert_lock_strong(SpaceManager::expand_lock());
2059   // Total of allocated Metachunks and allocated Metachunks count
2060   // for each SpaceManager
2061   _allocated_chunks_words = _allocated_chunks_words + words;
2062   _allocated_chunks_count++;
2063   // Global total of capacity in allocated Metachunks
2064   MetaspaceAux::inc_capacity(mdtype(), words);
2065   // Global total of allocated Metablocks.
2066   // used_words_slow() includes the overhead in each
2067   // Metachunk so include it in the used when the
2068   // Metachunk is first added (so only added once per
2069   // Metachunk).
2070   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2071 }
2072 
2073 void SpaceManager::inc_used_metrics(size_t words) {
2074   // Add to the per SpaceManager total
2075   Atomic::add_ptr(words, &_allocated_blocks_words);
2076   // Add to the global total
2077   MetaspaceAux::inc_used(mdtype(), words);
2078 }
2079 
2080 void SpaceManager::dec_total_from_size_metrics() {
2081   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2082   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2083   // Also deduct the overhead per Metachunk
2084   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2085 }
2086 
2087 void SpaceManager::initialize() {
2088   Metadebug::init_allocation_fail_alot_count();
2089   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2090     _chunks_in_use[i] = NULL;
2091   }
2092   _current_chunk = NULL;
2093   if (TraceMetadataChunkAllocation && Verbose) {
2094     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2095   }
2096 }
2097 
2098 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2099   if (chunks == NULL) {
2100     return;
2101   }
2102   ChunkList* list = free_chunks(index);
2103   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2104   assert_lock_strong(SpaceManager::expand_lock());
2105   Metachunk* cur = chunks;
2106 
2107   // This returns chunks one at a time.  If a new
2108   // class List can be created that is a base class
2109   // of FreeList then something like FreeList::prepend()
2110   // can be used in place of this loop
2111   while (cur != NULL) {
2112     assert(cur->container() != NULL, "Container should have been set");
2113     cur->container()->dec_container_count();
2114     // Capture the next link before it is changed
2115     // by the call to return_chunk_at_head();
2116     Metachunk* next = cur->next();
2117     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2118     list->return_chunk_at_head(cur);
2119     cur = next;
2120   }
2121 }
2122 
2123 SpaceManager::~SpaceManager() {
2124   // This call this->_lock which can't be done while holding expand_lock()
2125   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2126     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2127             " allocated_chunks_words() " SIZE_FORMAT,
2128             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2129 
2130   MutexLockerEx fcl(SpaceManager::expand_lock(),
2131                     Mutex::_no_safepoint_check_flag);
2132 
2133   chunk_manager()->slow_locked_verify();
2134 
2135   dec_total_from_size_metrics();
2136 
2137   if (TraceMetadataChunkAllocation && Verbose) {
2138     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2139     locked_print_chunks_in_use_on(gclog_or_tty);
2140   }
2141 
2142   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2143   // is during the freeing of a VirtualSpaceNodes.
2144 
2145   // Have to update before the chunks_in_use lists are emptied
2146   // below.
2147   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2148                                          sum_count_in_chunks_in_use());
2149 
2150   // Add all the chunks in use by this space manager
2151   // to the global list of free chunks.
2152 
2153   // Follow each list of chunks-in-use and add them to the
2154   // free lists.  Each list is NULL terminated.
2155 
2156   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2157     if (TraceMetadataChunkAllocation && Verbose) {
2158       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2159                              sum_count_in_chunks_in_use(i),
2160                              chunk_size_name(i));
2161     }
2162     Metachunk* chunks = chunks_in_use(i);
2163     chunk_manager()->return_chunks(i, chunks);
2164     set_chunks_in_use(i, NULL);
2165     if (TraceMetadataChunkAllocation && Verbose) {
2166       gclog_or_tty->print_cr("updated freelist count %d %s",
2167                              chunk_manager()->free_chunks(i)->count(),
2168                              chunk_size_name(i));
2169     }
2170     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2171   }
2172 
2173   // The medium chunk case may be optimized by passing the head and
2174   // tail of the medium chunk list to add_at_head().  The tail is often
2175   // the current chunk but there are probably exceptions.
2176 
2177   // Humongous chunks
2178   if (TraceMetadataChunkAllocation && Verbose) {
2179     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2180                             sum_count_in_chunks_in_use(HumongousIndex),
2181                             chunk_size_name(HumongousIndex));
2182     gclog_or_tty->print("Humongous chunk dictionary: ");
2183   }
2184   // Humongous chunks are never the current chunk.
2185   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2186 
2187   while (humongous_chunks != NULL) {
2188 #ifdef ASSERT
2189     humongous_chunks->set_is_tagged_free(true);
2190 #endif
2191     if (TraceMetadataChunkAllocation && Verbose) {
2192       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2193                           humongous_chunks,
2194                           humongous_chunks->word_size());
2195     }
2196     assert(humongous_chunks->word_size() == (size_t)
2197            align_size_up(humongous_chunks->word_size(),
2198                              smallest_chunk_size()),
2199            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2200                    " granularity %d",
2201                    humongous_chunks->word_size(), smallest_chunk_size()));
2202     Metachunk* next_humongous_chunks = humongous_chunks->next();
2203     humongous_chunks->container()->dec_container_count();
2204     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2205     humongous_chunks = next_humongous_chunks;
2206   }
2207   if (TraceMetadataChunkAllocation && Verbose) {
2208     gclog_or_tty->print_cr("");
2209     gclog_or_tty->print_cr("updated dictionary count %d %s",
2210                      chunk_manager()->humongous_dictionary()->total_count(),
2211                      chunk_size_name(HumongousIndex));
2212   }
2213   chunk_manager()->slow_locked_verify();
2214 }
2215 
2216 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2217   switch (index) {
2218     case SpecializedIndex:
2219       return "Specialized";
2220     case SmallIndex:
2221       return "Small";
2222     case MediumIndex:
2223       return "Medium";
2224     case HumongousIndex:
2225       return "Humongous";
2226     default:
2227       return NULL;
2228   }
2229 }
2230 
2231 ChunkIndex ChunkManager::list_index(size_t size) {
2232   switch (size) {
2233     case SpecializedChunk:
2234       assert(SpecializedChunk == ClassSpecializedChunk,
2235              "Need branch for ClassSpecializedChunk");
2236       return SpecializedIndex;
2237     case SmallChunk:
2238     case ClassSmallChunk:
2239       return SmallIndex;
2240     case MediumChunk:
2241     case ClassMediumChunk:
2242       return MediumIndex;
2243     default:
2244       assert(size > MediumChunk || size > ClassMediumChunk,
2245              "Not a humongous chunk");
2246       return HumongousIndex;
2247   }
2248 }
2249 
2250 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2251   assert_lock_strong(_lock);
2252   size_t raw_word_size = get_raw_word_size(word_size);
2253   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2254   assert(raw_word_size >= min_size,
2255          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2256   block_freelists()->return_block(p, raw_word_size);
2257 }
2258 
2259 // Adds a chunk to the list of chunks in use.
2260 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2261 
2262   assert(new_chunk != NULL, "Should not be NULL");
2263   assert(new_chunk->next() == NULL, "Should not be on a list");
2264 
2265   new_chunk->reset_empty();
2266 
2267   // Find the correct list and and set the current
2268   // chunk for that list.
2269   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2270 
2271   if (index != HumongousIndex) {
2272     retire_current_chunk();
2273     set_current_chunk(new_chunk);
2274     new_chunk->set_next(chunks_in_use(index));
2275     set_chunks_in_use(index, new_chunk);
2276   } else {
2277     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2278     // small, so small will be null.  Link this first chunk as the current
2279     // chunk.
2280     if (make_current) {
2281       // Set as the current chunk but otherwise treat as a humongous chunk.
2282       set_current_chunk(new_chunk);
2283     }
2284     // Link at head.  The _current_chunk only points to a humongous chunk for
2285     // the null class loader metaspace (class and data virtual space managers)
2286     // any humongous chunks so will not point to the tail
2287     // of the humongous chunks list.
2288     new_chunk->set_next(chunks_in_use(HumongousIndex));
2289     set_chunks_in_use(HumongousIndex, new_chunk);
2290 
2291     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2292   }
2293 
2294   // Add to the running sum of capacity
2295   inc_size_metrics(new_chunk->word_size());
2296 
2297   assert(new_chunk->is_empty(), "Not ready for reuse");
2298   if (TraceMetadataChunkAllocation && Verbose) {
2299     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2300                         sum_count_in_chunks_in_use());
2301     new_chunk->print_on(gclog_or_tty);
2302     chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2303   }
2304 }
2305 
2306 void SpaceManager::retire_current_chunk() {
2307   if (current_chunk() != NULL) {
2308     size_t remaining_words = current_chunk()->free_word_size();
2309     if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
2310       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2311       inc_used_metrics(remaining_words);
2312     }
2313   }
2314 }
2315 
2316 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2317                                        size_t grow_chunks_by_words) {
2318   // Get a chunk from the chunk freelist
2319   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2320 
2321   if (next == NULL) {
2322     next = vs_list()->get_new_chunk(word_size,
2323                                     grow_chunks_by_words,
2324                                     medium_chunk_bunch());
2325   }
2326 
2327   if (TraceMetadataHumongousAllocation && next != NULL &&
2328       SpaceManager::is_humongous(next->word_size())) {
2329     gclog_or_tty->print_cr("  new humongous chunk word size "
2330                            PTR_FORMAT, next->word_size());
2331   }
2332 
2333   return next;
2334 }
2335 
2336 MetaWord* SpaceManager::allocate(size_t word_size) {
2337   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2338 
2339   size_t raw_word_size = get_raw_word_size(word_size);
2340   BlockFreelist* fl =  block_freelists();
2341   MetaWord* p = NULL;
2342   // Allocation from the dictionary is expensive in the sense that
2343   // the dictionary has to be searched for a size.  Don't allocate
2344   // from the dictionary until it starts to get fat.  Is this
2345   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2346   // for allocations.  Do some profiling.  JJJ
2347   if (fl->total_size() > allocation_from_dictionary_limit) {
2348     p = fl->get_block(raw_word_size);
2349   }
2350   if (p == NULL) {
2351     p = allocate_work(raw_word_size);
2352   }
2353 
2354   return p;
2355 }
2356 
2357 // Returns the address of spaced allocated for "word_size".
2358 // This methods does not know about blocks (Metablocks)
2359 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2360   assert_lock_strong(_lock);
2361 #ifdef ASSERT
2362   if (Metadebug::test_metadata_failure()) {
2363     return NULL;
2364   }
2365 #endif
2366   // Is there space in the current chunk?
2367   MetaWord* result = NULL;
2368 
2369   // For DumpSharedSpaces, only allocate out of the current chunk which is
2370   // never null because we gave it the size we wanted.   Caller reports out
2371   // of memory if this returns null.
2372   if (DumpSharedSpaces) {
2373     assert(current_chunk() != NULL, "should never happen");
2374     inc_used_metrics(word_size);
2375     return current_chunk()->allocate(word_size); // caller handles null result
2376   }
2377 
2378   if (current_chunk() != NULL) {
2379     result = current_chunk()->allocate(word_size);
2380   }
2381 
2382   if (result == NULL) {
2383     result = grow_and_allocate(word_size);
2384   }
2385 
2386   if (result != NULL) {
2387     inc_used_metrics(word_size);
2388     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2389            "Head of the list is being allocated");
2390   }
2391 
2392   return result;
2393 }
2394 
2395 void SpaceManager::verify() {
2396   // If there are blocks in the dictionary, then
2397   // verfication of chunks does not work since
2398   // being in the dictionary alters a chunk.
2399   if (block_freelists()->total_size() == 0) {
2400     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2401       Metachunk* curr = chunks_in_use(i);
2402       while (curr != NULL) {
2403         curr->verify();
2404         verify_chunk_size(curr);
2405         curr = curr->next();
2406       }
2407     }
2408   }
2409 }
2410 
2411 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2412   assert(is_humongous(chunk->word_size()) ||
2413          chunk->word_size() == medium_chunk_size() ||
2414          chunk->word_size() == small_chunk_size() ||
2415          chunk->word_size() == specialized_chunk_size(),
2416          "Chunk size is wrong");
2417   return;
2418 }
2419 
2420 #ifdef ASSERT
2421 void SpaceManager::verify_allocated_blocks_words() {
2422   // Verification is only guaranteed at a safepoint.
2423   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2424     "Verification can fail if the applications is running");
2425   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2426     err_msg("allocation total is not consistent " SIZE_FORMAT
2427             " vs " SIZE_FORMAT,
2428             allocated_blocks_words(), sum_used_in_chunks_in_use()));
2429 }
2430 
2431 #endif
2432 
2433 void SpaceManager::dump(outputStream* const out) const {
2434   size_t curr_total = 0;
2435   size_t waste = 0;
2436   uint i = 0;
2437   size_t used = 0;
2438   size_t capacity = 0;
2439 
2440   // Add up statistics for all chunks in this SpaceManager.
2441   for (ChunkIndex index = ZeroIndex;
2442        index < NumberOfInUseLists;
2443        index = next_chunk_index(index)) {
2444     for (Metachunk* curr = chunks_in_use(index);
2445          curr != NULL;
2446          curr = curr->next()) {
2447       out->print("%d) ", i++);
2448       curr->print_on(out);
2449       curr_total += curr->word_size();
2450       used += curr->used_word_size();
2451       capacity += curr->word_size();
2452       waste += curr->free_word_size() + curr->overhead();;
2453     }
2454   }
2455 
2456   if (TraceMetadataChunkAllocation && Verbose) {
2457     block_freelists()->print_on(out);
2458   }
2459 
2460   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2461   // Free space isn't wasted.
2462   waste -= free;
2463 
2464   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2465                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2466                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2467 }
2468 
2469 #ifndef PRODUCT
2470 void SpaceManager::mangle_freed_chunks() {
2471   for (ChunkIndex index = ZeroIndex;
2472        index < NumberOfInUseLists;
2473        index = next_chunk_index(index)) {
2474     for (Metachunk* curr = chunks_in_use(index);
2475          curr != NULL;
2476          curr = curr->next()) {
2477       curr->mangle();
2478     }
2479   }
2480 }
2481 #endif // PRODUCT
2482 
2483 // MetaspaceAux
2484 
2485 
2486 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2487 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2488 
2489 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2490   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2491   return list == NULL ? 0 : list->free_bytes();
2492 }
2493 
2494 size_t MetaspaceAux::free_bytes() {
2495   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2496 }
2497 
2498 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2499   assert_lock_strong(SpaceManager::expand_lock());
2500   assert(words <= allocated_capacity_words(mdtype),
2501     err_msg("About to decrement below 0: words " SIZE_FORMAT
2502             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2503             words, mdtype, allocated_capacity_words(mdtype)));
2504   _allocated_capacity_words[mdtype] -= words;
2505 }
2506 
2507 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2508   assert_lock_strong(SpaceManager::expand_lock());
2509   // Needs to be atomic
2510   _allocated_capacity_words[mdtype] += words;
2511 }
2512 
2513 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2514   assert(words <= allocated_used_words(mdtype),
2515     err_msg("About to decrement below 0: words " SIZE_FORMAT
2516             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2517             words, mdtype, allocated_used_words(mdtype)));
2518   // For CMS deallocation of the Metaspaces occurs during the
2519   // sweep which is a concurrent phase.  Protection by the expand_lock()
2520   // is not enough since allocation is on a per Metaspace basis
2521   // and protected by the Metaspace lock.
2522   jlong minus_words = (jlong) - (jlong) words;
2523   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2524 }
2525 
2526 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2527   // _allocated_used_words tracks allocations for
2528   // each piece of metadata.  Those allocations are
2529   // generally done concurrently by different application
2530   // threads so must be done atomically.
2531   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2532 }
2533 
2534 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2535   size_t used = 0;
2536   ClassLoaderDataGraphMetaspaceIterator iter;
2537   while (iter.repeat()) {
2538     Metaspace* msp = iter.get_next();
2539     // Sum allocated_blocks_words for each metaspace
2540     if (msp != NULL) {
2541       used += msp->used_words_slow(mdtype);
2542     }
2543   }
2544   return used * BytesPerWord;
2545 }
2546 
2547 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2548   size_t free = 0;
2549   ClassLoaderDataGraphMetaspaceIterator iter;
2550   while (iter.repeat()) {
2551     Metaspace* msp = iter.get_next();
2552     if (msp != NULL) {
2553       free += msp->free_words_slow(mdtype);
2554     }
2555   }
2556   return free * BytesPerWord;
2557 }
2558 
2559 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2560   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2561     return 0;
2562   }
2563   // Don't count the space in the freelists.  That space will be
2564   // added to the capacity calculation as needed.
2565   size_t capacity = 0;
2566   ClassLoaderDataGraphMetaspaceIterator iter;
2567   while (iter.repeat()) {
2568     Metaspace* msp = iter.get_next();
2569     if (msp != NULL) {
2570       capacity += msp->capacity_words_slow(mdtype);
2571     }
2572   }
2573   return capacity * BytesPerWord;
2574 }
2575 
2576 size_t MetaspaceAux::capacity_bytes_slow() {
2577 #ifdef PRODUCT
2578   // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2579   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2580 #endif
2581   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2582   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2583   assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2584       err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2585         " class_capacity + non_class_capacity " SIZE_FORMAT
2586         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2587         allocated_capacity_bytes(), class_capacity + non_class_capacity,
2588         class_capacity, non_class_capacity));
2589 
2590   return class_capacity + non_class_capacity;
2591 }
2592 
2593 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2594   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2595   return list == NULL ? 0 : list->reserved_bytes();
2596 }
2597 
2598 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2599   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2600   return list == NULL ? 0 : list->committed_bytes();
2601 }
2602 
2603 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2604 
2605 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2606   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2607   if (chunk_manager == NULL) {
2608     return 0;
2609   }
2610   chunk_manager->slow_verify();
2611   return chunk_manager->free_chunks_total_words();
2612 }
2613 
2614 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2615   return free_chunks_total_words(mdtype) * BytesPerWord;
2616 }
2617 
2618 size_t MetaspaceAux::free_chunks_total_words() {
2619   return free_chunks_total_words(Metaspace::ClassType) +
2620          free_chunks_total_words(Metaspace::NonClassType);
2621 }
2622 
2623 size_t MetaspaceAux::free_chunks_total_bytes() {
2624   return free_chunks_total_words() * BytesPerWord;
2625 }
2626 
2627 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2628   gclog_or_tty->print(", [Metaspace:");
2629   if (PrintGCDetails && Verbose) {
2630     gclog_or_tty->print(" "  SIZE_FORMAT
2631                         "->" SIZE_FORMAT
2632                         "("  SIZE_FORMAT ")",
2633                         prev_metadata_used,
2634                         allocated_used_bytes(),
2635                         reserved_bytes());
2636   } else {
2637     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2638                         "->" SIZE_FORMAT "K"
2639                         "("  SIZE_FORMAT "K)",
2640                         prev_metadata_used/K,
2641                         allocated_used_bytes()/K,
2642                         reserved_bytes()/K);
2643   }
2644 
2645   gclog_or_tty->print("]");
2646 }
2647 
2648 // This is printed when PrintGCDetails
2649 void MetaspaceAux::print_on(outputStream* out) {
2650   Metaspace::MetadataType nct = Metaspace::NonClassType;
2651 
2652   out->print_cr(" Metaspace       "
2653                 "used "      SIZE_FORMAT "K, "
2654                 "capacity "  SIZE_FORMAT "K, "
2655                 "committed " SIZE_FORMAT "K, "
2656                 "reserved "  SIZE_FORMAT "K",
2657                 allocated_used_bytes()/K,
2658                 allocated_capacity_bytes()/K,
2659                 committed_bytes()/K,
2660                 reserved_bytes()/K);
2661 
2662   if (Metaspace::using_class_space()) {
2663     Metaspace::MetadataType ct = Metaspace::ClassType;
2664     out->print_cr("  class space    "
2665                   "used "      SIZE_FORMAT "K, "
2666                   "capacity "  SIZE_FORMAT "K, "
2667                   "committed " SIZE_FORMAT "K, "
2668                   "reserved "  SIZE_FORMAT "K",
2669                   allocated_used_bytes(ct)/K,
2670                   allocated_capacity_bytes(ct)/K,
2671                   committed_bytes(ct)/K,
2672                   reserved_bytes(ct)/K);
2673   }
2674 }
2675 
2676 // Print information for class space and data space separately.
2677 // This is almost the same as above.
2678 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2679   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2680   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2681   size_t used_bytes = used_bytes_slow(mdtype);
2682   size_t free_bytes = free_bytes_slow(mdtype);
2683   size_t used_and_free = used_bytes + free_bytes +
2684                            free_chunks_capacity_bytes;
2685   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2686              "K + unused in chunks " SIZE_FORMAT "K  + "
2687              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2688              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2689              used_bytes / K,
2690              free_bytes / K,
2691              free_chunks_capacity_bytes / K,
2692              used_and_free / K,
2693              capacity_bytes / K);
2694   // Accounting can only be correct if we got the values during a safepoint
2695   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2696 }
2697 
2698 // Print total fragmentation for class metaspaces
2699 void MetaspaceAux::print_class_waste(outputStream* out) {
2700   assert(Metaspace::using_class_space(), "class metaspace not used");
2701   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2702   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2703   ClassLoaderDataGraphMetaspaceIterator iter;
2704   while (iter.repeat()) {
2705     Metaspace* msp = iter.get_next();
2706     if (msp != NULL) {
2707       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2708       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2709       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2710       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2711       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2712       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2713       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2714     }
2715   }
2716   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2717                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2718                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2719                 "large count " SIZE_FORMAT,
2720                 cls_specialized_count, cls_specialized_waste,
2721                 cls_small_count, cls_small_waste,
2722                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2723 }
2724 
2725 // Print total fragmentation for data and class metaspaces separately
2726 void MetaspaceAux::print_waste(outputStream* out) {
2727   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2728   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2729 
2730   ClassLoaderDataGraphMetaspaceIterator iter;
2731   while (iter.repeat()) {
2732     Metaspace* msp = iter.get_next();
2733     if (msp != NULL) {
2734       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2735       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2736       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2737       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2738       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2739       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2740       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2741     }
2742   }
2743   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2744   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2745                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2746                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2747                         "large count " SIZE_FORMAT,
2748              specialized_count, specialized_waste, small_count,
2749              small_waste, medium_count, medium_waste, humongous_count);
2750   if (Metaspace::using_class_space()) {
2751     print_class_waste(out);
2752   }
2753 }
2754 
2755 // Dump global metaspace things from the end of ClassLoaderDataGraph
2756 void MetaspaceAux::dump(outputStream* out) {
2757   out->print_cr("All Metaspace:");
2758   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2759   out->print("class space: "); print_on(out, Metaspace::ClassType);
2760   print_waste(out);
2761 }
2762 
2763 void MetaspaceAux::verify_free_chunks() {
2764   Metaspace::chunk_manager_metadata()->verify();
2765   if (Metaspace::using_class_space()) {
2766     Metaspace::chunk_manager_class()->verify();
2767   }
2768 }
2769 
2770 void MetaspaceAux::verify_capacity() {
2771 #ifdef ASSERT
2772   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2773   // For purposes of the running sum of capacity, verify against capacity
2774   size_t capacity_in_use_bytes = capacity_bytes_slow();
2775   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2776     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2777             " capacity_bytes_slow()" SIZE_FORMAT,
2778             running_sum_capacity_bytes, capacity_in_use_bytes));
2779   for (Metaspace::MetadataType i = Metaspace::ClassType;
2780        i < Metaspace:: MetadataTypeCount;
2781        i = (Metaspace::MetadataType)(i + 1)) {
2782     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2783     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2784       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2785               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2786               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2787   }
2788 #endif
2789 }
2790 
2791 void MetaspaceAux::verify_used() {
2792 #ifdef ASSERT
2793   size_t running_sum_used_bytes = allocated_used_bytes();
2794   // For purposes of the running sum of used, verify against used
2795   size_t used_in_use_bytes = used_bytes_slow();
2796   assert(allocated_used_bytes() == used_in_use_bytes,
2797     err_msg("allocated_used_bytes() " SIZE_FORMAT
2798             " used_bytes_slow()" SIZE_FORMAT,
2799             allocated_used_bytes(), used_in_use_bytes));
2800   for (Metaspace::MetadataType i = Metaspace::ClassType;
2801        i < Metaspace:: MetadataTypeCount;
2802        i = (Metaspace::MetadataType)(i + 1)) {
2803     size_t used_in_use_bytes = used_bytes_slow(i);
2804     assert(allocated_used_bytes(i) == used_in_use_bytes,
2805       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2806               " used_bytes_slow(%u)" SIZE_FORMAT,
2807               i, allocated_used_bytes(i), i, used_in_use_bytes));
2808   }
2809 #endif
2810 }
2811 
2812 void MetaspaceAux::verify_metrics() {
2813   verify_capacity();
2814   verify_used();
2815 }
2816 
2817 
2818 // Metaspace methods
2819 
2820 size_t Metaspace::_first_chunk_word_size = 0;
2821 size_t Metaspace::_first_class_chunk_word_size = 0;
2822 
2823 size_t Metaspace::_commit_alignment = 0;
2824 size_t Metaspace::_reserve_alignment = 0;
2825 
2826 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2827   initialize(lock, type);
2828 }
2829 
2830 Metaspace::~Metaspace() {
2831   delete _vsm;
2832   if (using_class_space()) {
2833     delete _class_vsm;
2834   }
2835 }
2836 
2837 VirtualSpaceList* Metaspace::_space_list = NULL;
2838 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2839 
2840 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2841 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2842 
2843 #define VIRTUALSPACEMULTIPLIER 2
2844 
2845 #ifdef _LP64
2846 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2847 
2848 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2849   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2850   // narrow_klass_base is the lower of the metaspace base and the cds base
2851   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2852   // between the lower base and higher address.
2853   address lower_base;
2854   address higher_address;
2855   if (UseSharedSpaces) {
2856     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2857                           (address)(metaspace_base + compressed_class_space_size()));
2858     lower_base = MIN2(metaspace_base, cds_base);
2859   } else {
2860     higher_address = metaspace_base + compressed_class_space_size();
2861     lower_base = metaspace_base;
2862 
2863     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2864     // If compressed class space fits in lower 32G, we don't need a base.
2865     if (higher_address <= (address)klass_encoding_max) {
2866       lower_base = 0; // effectively lower base is zero.
2867     }
2868   }
2869 
2870   Universe::set_narrow_klass_base(lower_base);
2871 
2872   if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2873     Universe::set_narrow_klass_shift(0);
2874   } else {
2875     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2876     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2877   }
2878 }
2879 
2880 // Return TRUE if the specified metaspace_base and cds_base are close enough
2881 // to work with compressed klass pointers.
2882 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2883   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2884   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2885   address lower_base = MIN2((address)metaspace_base, cds_base);
2886   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2887                                 (address)(metaspace_base + compressed_class_space_size()));
2888   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2889 }
2890 
2891 // Try to allocate the metaspace at the requested addr.
2892 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2893   assert(using_class_space(), "called improperly");
2894   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2895   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2896          "Metaspace size is too big");
2897   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2898   assert_is_ptr_aligned(cds_base, _reserve_alignment);
2899   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2900 
2901   // Don't use large pages for the class space.
2902   bool large_pages = false;
2903 
2904   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2905                                              _reserve_alignment,
2906                                              large_pages,
2907                                              requested_addr, 0);
2908   if (!metaspace_rs.is_reserved()) {
2909     if (UseSharedSpaces) {
2910       size_t increment = align_size_up(1*G, _reserve_alignment);
2911 
2912       // Keep trying to allocate the metaspace, increasing the requested_addr
2913       // by 1GB each time, until we reach an address that will no longer allow
2914       // use of CDS with compressed klass pointers.
2915       char *addr = requested_addr;
2916       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2917              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2918         addr = addr + increment;
2919         metaspace_rs = ReservedSpace(compressed_class_space_size(),
2920                                      _reserve_alignment, large_pages, addr, 0);
2921       }
2922     }
2923 
2924     // If no successful allocation then try to allocate the space anywhere.  If
2925     // that fails then OOM doom.  At this point we cannot try allocating the
2926     // metaspace as if UseCompressedClassPointers is off because too much
2927     // initialization has happened that depends on UseCompressedClassPointers.
2928     // So, UseCompressedClassPointers cannot be turned off at this point.
2929     if (!metaspace_rs.is_reserved()) {
2930       metaspace_rs = ReservedSpace(compressed_class_space_size(),
2931                                    _reserve_alignment, large_pages);
2932       if (!metaspace_rs.is_reserved()) {
2933         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2934                                               compressed_class_space_size()));
2935       }
2936     }
2937   }
2938 
2939   // If we got here then the metaspace got allocated.
2940   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
2941 
2942   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
2943   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
2944     FileMapInfo::stop_sharing_and_unmap(
2945         "Could not allocate metaspace at a compatible address");
2946   }
2947 
2948   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
2949                                   UseSharedSpaces ? (address)cds_base : 0);
2950 
2951   initialize_class_space(metaspace_rs);
2952 
2953   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2954     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2955                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2956     gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2957                            compressed_class_space_size(), metaspace_rs.base(), requested_addr);
2958   }
2959 }
2960 
2961 // For UseCompressedClassPointers the class space is reserved above the top of
2962 // the Java heap.  The argument passed in is at the base of the compressed space.
2963 void Metaspace::initialize_class_space(ReservedSpace rs) {
2964   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2965   assert(rs.size() >= CompressedClassSpaceSize,
2966          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2967   assert(using_class_space(), "Must be using class space");
2968   _class_space_list = new VirtualSpaceList(rs);
2969   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
2970 
2971   if (!_class_space_list->initialization_succeeded()) {
2972     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
2973   }
2974 }
2975 
2976 #endif
2977 
2978 void Metaspace::ergo_initialize() {
2979   if (DumpSharedSpaces) {
2980     // Using large pages when dumping the shared archive is currently not implemented.
2981     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
2982   }
2983 
2984   size_t page_size = os::vm_page_size();
2985   if (UseLargePages && UseLargePagesInMetaspace) {
2986     page_size = os::large_page_size();
2987   }
2988 
2989   _commit_alignment  = page_size;
2990   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
2991 
2992   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
2993   // override if MaxMetaspaceSize was set on the command line or not.
2994   // This information is needed later to conform to the specification of the
2995   // java.lang.management.MemoryUsage API.
2996   //
2997   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
2998   // globals.hpp to the aligned value, but this is not possible, since the
2999   // alignment depends on other flags being parsed.
3000   MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3001 
3002   if (MetaspaceSize > MaxMetaspaceSize) {
3003     MetaspaceSize = MaxMetaspaceSize;
3004   }
3005 
3006   MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3007 
3008   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3009 
3010   if (MetaspaceSize < 256*K) {
3011     vm_exit_during_initialization("Too small initial Metaspace size");
3012   }
3013 
3014   MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3015   MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3016 
3017   CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3018   set_compressed_class_space_size(CompressedClassSpaceSize);
3019 }
3020 
3021 void Metaspace::global_initialize() {
3022   // Initialize the alignment for shared spaces.
3023   int max_alignment = os::vm_page_size();
3024   size_t cds_total = 0;
3025 
3026   MetaspaceShared::set_max_alignment(max_alignment);
3027 
3028   if (DumpSharedSpaces) {
3029     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3030     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3031     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3032     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3033 
3034     // Initialize with the sum of the shared space sizes.  The read-only
3035     // and read write metaspace chunks will be allocated out of this and the
3036     // remainder is the misc code and data chunks.
3037     cds_total = FileMapInfo::shared_spaces_size();
3038     cds_total = align_size_up(cds_total, _reserve_alignment);
3039     _space_list = new VirtualSpaceList(cds_total/wordSize);
3040     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3041 
3042     if (!_space_list->initialization_succeeded()) {
3043       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3044     }
3045 
3046 #ifdef _LP64
3047     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3048       vm_exit_during_initialization("Unable to dump shared archive.",
3049           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3050                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3051                   "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3052                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3053     }
3054 
3055     // Set the compressed klass pointer base so that decoding of these pointers works
3056     // properly when creating the shared archive.
3057     assert(UseCompressedOops && UseCompressedClassPointers,
3058       "UseCompressedOops and UseCompressedClassPointers must be set");
3059     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3060     if (TraceMetavirtualspaceAllocation && Verbose) {
3061       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3062                              _space_list->current_virtual_space()->bottom());
3063     }
3064 
3065     Universe::set_narrow_klass_shift(0);
3066 #endif
3067 
3068   } else {
3069     // If using shared space, open the file that contains the shared space
3070     // and map in the memory before initializing the rest of metaspace (so
3071     // the addresses don't conflict)
3072     address cds_address = NULL;
3073     if (UseSharedSpaces) {
3074       FileMapInfo* mapinfo = new FileMapInfo();
3075       memset(mapinfo, 0, sizeof(FileMapInfo));
3076 
3077       // Open the shared archive file, read and validate the header. If
3078       // initialization fails, shared spaces [UseSharedSpaces] are
3079       // disabled and the file is closed.
3080       // Map in spaces now also
3081       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3082         FileMapInfo::set_current_info(mapinfo);
3083         cds_total = FileMapInfo::shared_spaces_size();
3084         cds_address = (address)mapinfo->region_base(0);
3085       } else {
3086         assert(!mapinfo->is_open() && !UseSharedSpaces,
3087                "archive file not closed or shared spaces not disabled.");
3088       }
3089     }
3090 
3091 #ifdef _LP64
3092     // If UseCompressedClassPointers is set then allocate the metaspace area
3093     // above the heap and above the CDS area (if it exists).
3094     if (using_class_space()) {
3095       if (UseSharedSpaces) {
3096         char* cds_end = (char*)(cds_address + cds_total);
3097         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3098         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3099       } else {
3100         char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3101         allocate_metaspace_compressed_klass_ptrs(base, 0);
3102       }
3103     }
3104 #endif
3105 
3106     // Initialize these before initializing the VirtualSpaceList
3107     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3108     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3109     // Make the first class chunk bigger than a medium chunk so it's not put
3110     // on the medium chunk list.   The next chunk will be small and progress
3111     // from there.  This size calculated by -version.
3112     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3113                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3114     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3115     // Arbitrarily set the initial virtual space to a multiple
3116     // of the boot class loader size.
3117     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3118     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3119 
3120     // Initialize the list of virtual spaces.
3121     _space_list = new VirtualSpaceList(word_size);
3122     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3123 
3124     if (!_space_list->initialization_succeeded()) {
3125       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3126     }
3127   }
3128 
3129   MetaspaceGC::initialize();
3130 }
3131 
3132 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3133                                                size_t chunk_word_size,
3134                                                size_t chunk_bunch) {
3135   // Get a chunk from the chunk freelist
3136   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3137   if (chunk != NULL) {
3138     return chunk;
3139   }
3140 
3141   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3142 }
3143 
3144 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3145 
3146   assert(space_list() != NULL,
3147     "Metadata VirtualSpaceList has not been initialized");
3148   assert(chunk_manager_metadata() != NULL,
3149     "Metadata ChunkManager has not been initialized");
3150 
3151   _vsm = new SpaceManager(NonClassType, lock);
3152   if (_vsm == NULL) {
3153     return;
3154   }
3155   size_t word_size;
3156   size_t class_word_size;
3157   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3158 
3159   if (using_class_space()) {
3160   assert(class_space_list() != NULL,
3161     "Class VirtualSpaceList has not been initialized");
3162   assert(chunk_manager_class() != NULL,
3163     "Class ChunkManager has not been initialized");
3164 
3165     // Allocate SpaceManager for classes.
3166     _class_vsm = new SpaceManager(ClassType, lock);
3167     if (_class_vsm == NULL) {
3168       return;
3169     }
3170   }
3171 
3172   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3173 
3174   // Allocate chunk for metadata objects
3175   Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3176                                                   word_size,
3177                                                   vsm()->medium_chunk_bunch());
3178   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3179   if (new_chunk != NULL) {
3180     // Add to this manager's list of chunks in use and current_chunk().
3181     vsm()->add_chunk(new_chunk, true);
3182   }
3183 
3184   // Allocate chunk for class metadata objects
3185   if (using_class_space()) {
3186     Metachunk* class_chunk = get_initialization_chunk(ClassType,
3187                                                       class_word_size,
3188                                                       class_vsm()->medium_chunk_bunch());
3189     if (class_chunk != NULL) {
3190       class_vsm()->add_chunk(class_chunk, true);
3191     }
3192   }
3193 
3194   _alloc_record_head = NULL;
3195   _alloc_record_tail = NULL;
3196 }
3197 
3198 size_t Metaspace::align_word_size_up(size_t word_size) {
3199   size_t byte_size = word_size * wordSize;
3200   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3201 }
3202 
3203 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3204   // DumpSharedSpaces doesn't use class metadata area (yet)
3205   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3206   if (is_class_space_allocation(mdtype)) {
3207     return  class_vsm()->allocate(word_size);
3208   } else {
3209     return  vsm()->allocate(word_size);
3210   }
3211 }
3212 
3213 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3214   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3215   assert(delta_bytes > 0, "Must be");
3216 
3217   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3218   size_t before_inc = after_inc - delta_bytes;
3219 
3220   if (PrintGCDetails && Verbose) {
3221     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3222         " to " SIZE_FORMAT, before_inc, after_inc);
3223   }
3224 
3225   return allocate(word_size, mdtype);
3226 }
3227 
3228 // Space allocated in the Metaspace.  This may
3229 // be across several metadata virtual spaces.
3230 char* Metaspace::bottom() const {
3231   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3232   return (char*)vsm()->current_chunk()->bottom();
3233 }
3234 
3235 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3236   if (mdtype == ClassType) {
3237     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3238   } else {
3239     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3240   }
3241 }
3242 
3243 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3244   if (mdtype == ClassType) {
3245     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3246   } else {
3247     return vsm()->sum_free_in_chunks_in_use();
3248   }
3249 }
3250 
3251 // Space capacity in the Metaspace.  It includes
3252 // space in the list of chunks from which allocations
3253 // have been made. Don't include space in the global freelist and
3254 // in the space available in the dictionary which
3255 // is already counted in some chunk.
3256 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3257   if (mdtype == ClassType) {
3258     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3259   } else {
3260     return vsm()->sum_capacity_in_chunks_in_use();
3261   }
3262 }
3263 
3264 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3265   return used_words_slow(mdtype) * BytesPerWord;
3266 }
3267 
3268 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3269   return capacity_words_slow(mdtype) * BytesPerWord;
3270 }
3271 
3272 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3273   if (SafepointSynchronize::is_at_safepoint()) {
3274     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3275     // Don't take Heap_lock
3276     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3277     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3278       // Dark matter.  Too small for dictionary.
3279 #ifdef ASSERT
3280       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3281 #endif
3282       return;
3283     }
3284     if (is_class && using_class_space()) {
3285       class_vsm()->deallocate(ptr, word_size);
3286     } else {
3287       vsm()->deallocate(ptr, word_size);
3288     }
3289   } else {
3290     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3291 
3292     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3293       // Dark matter.  Too small for dictionary.
3294 #ifdef ASSERT
3295       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3296 #endif
3297       return;
3298     }
3299     if (is_class && using_class_space()) {
3300       class_vsm()->deallocate(ptr, word_size);
3301     } else {
3302       vsm()->deallocate(ptr, word_size);
3303     }
3304   }
3305 }
3306 
3307 
3308 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3309                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3310   if (HAS_PENDING_EXCEPTION) {
3311     assert(false, "Should not allocate with exception pending");
3312     return NULL;  // caller does a CHECK_NULL too
3313   }
3314 
3315   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3316         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3317 
3318   // Allocate in metaspaces without taking out a lock, because it deadlocks
3319   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3320   // to revisit this for application class data sharing.
3321   if (DumpSharedSpaces) {
3322     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3323     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3324     MetaWord* result = space->allocate(word_size, NonClassType);
3325     if (result == NULL) {
3326       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3327     }
3328 
3329     space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3330 
3331     // Zero initialize.
3332     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3333 
3334     return result;
3335   }
3336 
3337   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3338 
3339   // Try to allocate metadata.
3340   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3341 
3342   if (result == NULL) {
3343     // Allocation failed.
3344     if (is_init_completed()) {
3345       // Only start a GC if the bootstrapping has completed.
3346 
3347       // Try to clean out some memory and retry.
3348       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3349           loader_data, word_size, mdtype);
3350     }
3351   }
3352 
3353   if (result == NULL) {
3354     report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
3355   }
3356 
3357   // Zero initialize.
3358   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3359 
3360   return result;
3361 }
3362 
3363 size_t Metaspace::class_chunk_size(size_t word_size) {
3364   assert(using_class_space(), "Has to use class space");
3365   return class_vsm()->calc_chunk_size(word_size);
3366 }
3367 
3368 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3369   // If result is still null, we are out of memory.
3370   if (Verbose && TraceMetadataChunkAllocation) {
3371     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3372         SIZE_FORMAT, word_size);
3373     if (loader_data->metaspace_or_null() != NULL) {
3374       loader_data->dump(gclog_or_tty);
3375     }
3376     MetaspaceAux::dump(gclog_or_tty);
3377   }
3378 
3379   bool out_of_compressed_class_space = false;
3380   if (is_class_space_allocation(mdtype)) {
3381     Metaspace* metaspace = loader_data->metaspace_non_null();
3382     out_of_compressed_class_space =
3383       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3384       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3385       CompressedClassSpaceSize;
3386   }
3387 
3388   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3389   const char* space_string = out_of_compressed_class_space ?
3390     "Compressed class space" : "Metaspace";
3391 
3392   report_java_out_of_memory(space_string);
3393 
3394   if (JvmtiExport::should_post_resource_exhausted()) {
3395     JvmtiExport::post_resource_exhausted(
3396         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3397         space_string);
3398   }
3399 
3400   if (!is_init_completed()) {
3401     vm_exit_during_initialization("OutOfMemoryError", space_string);
3402   }
3403 
3404   if (out_of_compressed_class_space) {
3405     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3406   } else {
3407     THROW_OOP(Universe::out_of_memory_error_metaspace());
3408   }
3409 }
3410 
3411 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3412   assert(DumpSharedSpaces, "sanity");
3413 
3414   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3415   if (_alloc_record_head == NULL) {
3416     _alloc_record_head = _alloc_record_tail = rec;
3417   } else {
3418     _alloc_record_tail->_next = rec;
3419     _alloc_record_tail = rec;
3420   }
3421 }
3422 
3423 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3424   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3425 
3426   address last_addr = (address)bottom();
3427 
3428   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3429     address ptr = rec->_ptr;
3430     if (last_addr < ptr) {
3431       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3432     }
3433     closure->doit(ptr, rec->_type, rec->_byte_size);
3434     last_addr = ptr + rec->_byte_size;
3435   }
3436 
3437   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3438   if (last_addr < top) {
3439     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3440   }
3441 }
3442 
3443 void Metaspace::purge(MetadataType mdtype) {
3444   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3445 }
3446 
3447 void Metaspace::purge() {
3448   MutexLockerEx cl(SpaceManager::expand_lock(),
3449                    Mutex::_no_safepoint_check_flag);
3450   purge(NonClassType);
3451   if (using_class_space()) {
3452     purge(ClassType);
3453   }
3454 }
3455 
3456 void Metaspace::print_on(outputStream* out) const {
3457   // Print both class virtual space counts and metaspace.
3458   if (Verbose) {
3459     vsm()->print_on(out);
3460     if (using_class_space()) {
3461       class_vsm()->print_on(out);
3462     }
3463   }
3464 }
3465 
3466 bool Metaspace::contains(const void * ptr) {
3467   if (MetaspaceShared::is_in_shared_space(ptr)) {
3468     return true;
3469   }
3470   // This is checked while unlocked.  As long as the virtualspaces are added
3471   // at the end, the pointer will be in one of them.  The virtual spaces
3472   // aren't deleted presently.  When they are, some sort of locking might
3473   // be needed.  Note, locking this can cause inversion problems with the
3474   // caller in MetaspaceObj::is_metadata() function.
3475   return space_list()->contains(ptr) ||
3476          (using_class_space() && class_space_list()->contains(ptr));
3477 }
3478 
3479 void Metaspace::verify() {
3480   vsm()->verify();
3481   if (using_class_space()) {
3482     class_vsm()->verify();
3483   }
3484 }
3485 
3486 void Metaspace::dump(outputStream* const out) const {
3487   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3488   vsm()->dump(out);
3489   if (using_class_space()) {
3490     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3491     class_vsm()->dump(out);
3492   }
3493 }
3494 
3495 /////////////// Unit tests ///////////////
3496 
3497 #ifndef PRODUCT
3498 
3499 class TestMetaspaceAuxTest : AllStatic {
3500  public:
3501   static void test_reserved() {
3502     size_t reserved = MetaspaceAux::reserved_bytes();
3503 
3504     assert(reserved > 0, "assert");
3505 
3506     size_t committed  = MetaspaceAux::committed_bytes();
3507     assert(committed <= reserved, "assert");
3508 
3509     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3510     assert(reserved_metadata > 0, "assert");
3511     assert(reserved_metadata <= reserved, "assert");
3512 
3513     if (UseCompressedClassPointers) {
3514       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3515       assert(reserved_class > 0, "assert");
3516       assert(reserved_class < reserved, "assert");
3517     }
3518   }
3519 
3520   static void test_committed() {
3521     size_t committed = MetaspaceAux::committed_bytes();
3522 
3523     assert(committed > 0, "assert");
3524 
3525     size_t reserved  = MetaspaceAux::reserved_bytes();
3526     assert(committed <= reserved, "assert");
3527 
3528     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3529     assert(committed_metadata > 0, "assert");
3530     assert(committed_metadata <= committed, "assert");
3531 
3532     if (UseCompressedClassPointers) {
3533       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3534       assert(committed_class > 0, "assert");
3535       assert(committed_class < committed, "assert");
3536     }
3537   }
3538 
3539   static void test_virtual_space_list_large_chunk() {
3540     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3541     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3542     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3543     // vm_allocation_granularity aligned on Windows.
3544     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3545     large_size += (os::vm_page_size()/BytesPerWord);
3546     vs_list->get_new_chunk(large_size, large_size, 0);
3547   }
3548 
3549   static void test() {
3550     test_reserved();
3551     test_committed();
3552     test_virtual_space_list_large_chunk();
3553   }
3554 };
3555 
3556 void TestMetaspaceAux_test() {
3557   TestMetaspaceAuxTest::test();
3558 }
3559 
3560 class TestVirtualSpaceNodeTest {
3561   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3562                                           size_t& num_small_chunks,
3563                                           size_t& num_specialized_chunks) {
3564     num_medium_chunks = words_left / MediumChunk;
3565     words_left = words_left % MediumChunk;
3566 
3567     num_small_chunks = words_left / SmallChunk;
3568     words_left = words_left % SmallChunk;
3569     // how many specialized chunks can we get?
3570     num_specialized_chunks = words_left / SpecializedChunk;
3571     assert(words_left % SpecializedChunk == 0, "should be nothing left");
3572   }
3573 
3574  public:
3575   static void test() {
3576     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3577     const size_t vsn_test_size_words = MediumChunk  * 4;
3578     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3579 
3580     // The chunk sizes must be multiples of eachother, or this will fail
3581     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3582     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3583 
3584     { // No committed memory in VSN
3585       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3586       VirtualSpaceNode vsn(vsn_test_size_bytes);
3587       vsn.initialize();
3588       vsn.retire(&cm);
3589       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3590     }
3591 
3592     { // All of VSN is committed, half is used by chunks
3593       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3594       VirtualSpaceNode vsn(vsn_test_size_bytes);
3595       vsn.initialize();
3596       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3597       vsn.get_chunk_vs(MediumChunk);
3598       vsn.get_chunk_vs(MediumChunk);
3599       vsn.retire(&cm);
3600       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3601       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3602     }
3603 
3604     { // 4 pages of VSN is committed, some is used by chunks
3605       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3606       VirtualSpaceNode vsn(vsn_test_size_bytes);
3607       const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3608       assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3609       vsn.initialize();
3610       vsn.expand_by(page_chunks, page_chunks);
3611       vsn.get_chunk_vs(SmallChunk);
3612       vsn.get_chunk_vs(SpecializedChunk);
3613       vsn.retire(&cm);
3614 
3615       // committed - used = words left to retire
3616       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3617 
3618       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3619       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3620 
3621       assert(num_medium_chunks == 0, "should not get any medium chunks");
3622       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3623       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3624     }
3625 
3626     { // Half of VSN is committed, a humongous chunk is used
3627       ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3628       VirtualSpaceNode vsn(vsn_test_size_bytes);
3629       vsn.initialize();
3630       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3631       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3632       vsn.retire(&cm);
3633 
3634       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3635       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3636       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3637 
3638       assert(num_medium_chunks == 0, "should not get any medium chunks");
3639       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3640       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3641     }
3642 
3643   }
3644 };
3645 
3646 void TestVirtualSpaceNode_test() {
3647   TestVirtualSpaceNodeTest::test();
3648 }
3649 
3650 #endif