1 /*
   2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"
  38 #include "runtime/atomic.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutex.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "services/memTracker.hpp"
  45 #include "services/memoryService.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  51 
  52 // Set this constant to enable slow integrity checking of the free chunk lists
  53 const bool metaspace_slow_verify = false;
  54 
  55 // Parameters for stress mode testing
  56 size_t const allocation_from_dictionary_limit = 4 * K;
  57 
  58 MetaWord* last_allocated = 0;
  59 
  60 size_t Metaspace::_class_metaspace_size;
  61 
  62 // Used in declarations in SpaceManager and ChunkManager
  63 enum ChunkIndex {
  64   ZeroIndex = 0,
  65   SpecializedIndex = ZeroIndex,
  66   SmallIndex = SpecializedIndex + 1,
  67   MediumIndex = SmallIndex + 1,
  68   HumongousIndex = MediumIndex + 1,
  69   NumberOfFreeLists = 3,
  70   NumberOfInUseLists = 4
  71 };
  72 
  73 enum ChunkSizes {    // in words.
  74   ClassSpecializedChunk = 128,
  75   SpecializedChunk = 128,
  76   ClassSmallChunk = 256,
  77   SmallChunk = 512,
  78   ClassMediumChunk = 4 * K,
  79   MediumChunk = 8 * K,
  80   HumongousChunkGranularity = 8
  81 };
  82 
  83 static ChunkIndex next_chunk_index(ChunkIndex i) {
  84   assert(i < NumberOfInUseLists, "Out of bound");
  85   return (ChunkIndex) (i+1);
  86 }
  87 
  88 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  89 uint MetaspaceGC::_shrink_factor = 0;
  90 bool MetaspaceGC::_should_concurrent_collect = false;
  91 
  92 typedef class FreeList<Metachunk> ChunkList;
  93 
  94 // Manages the global free lists of chunks.
  95 class ChunkManager : public CHeapObj<mtInternal> {
  96 
  97   // Free list of chunks of different sizes.
  98   //   SpecializedChunk
  99   //   SmallChunk
 100   //   MediumChunk
 101   //   HumongousChunk
 102   ChunkList _free_chunks[NumberOfFreeLists];
 103 
 104   //   HumongousChunk
 105   ChunkTreeDictionary _humongous_dictionary;
 106 
 107   // ChunkManager in all lists of this type
 108   size_t _free_chunks_total;
 109   size_t _free_chunks_count;
 110 
 111   void dec_free_chunks_total(size_t v) {
 112     assert(_free_chunks_count > 0 &&
 113              _free_chunks_total > 0,
 114              "About to go negative");
 115     Atomic::add_ptr(-1, &_free_chunks_count);
 116     jlong minus_v = (jlong) - (jlong) v;
 117     Atomic::add_ptr(minus_v, &_free_chunks_total);
 118   }
 119 
 120   // Debug support
 121 
 122   size_t sum_free_chunks();
 123   size_t sum_free_chunks_count();
 124 
 125   void locked_verify_free_chunks_total();
 126   void slow_locked_verify_free_chunks_total() {
 127     if (metaspace_slow_verify) {
 128       locked_verify_free_chunks_total();
 129     }
 130   }
 131   void locked_verify_free_chunks_count();
 132   void slow_locked_verify_free_chunks_count() {
 133     if (metaspace_slow_verify) {
 134       locked_verify_free_chunks_count();
 135     }
 136   }
 137   void verify_free_chunks_count();
 138 
 139  public:
 140 
 141   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 142       : _free_chunks_total(0), _free_chunks_count(0) {
 143     _free_chunks[SpecializedIndex].set_size(specialized_size);
 144     _free_chunks[SmallIndex].set_size(small_size);
 145     _free_chunks[MediumIndex].set_size(medium_size);
 146   }
 147 
 148   // add or delete (return) a chunk to the global freelist.
 149   Metachunk* chunk_freelist_allocate(size_t word_size);
 150 
 151   // Map a size to a list index assuming that there are lists
 152   // for special, small, medium, and humongous chunks.
 153   static ChunkIndex list_index(size_t size);
 154 
 155   // Remove the chunk from its freelist.  It is
 156   // expected to be on one of the _free_chunks[] lists.
 157   void remove_chunk(Metachunk* chunk);
 158 
 159   // Add the simple linked list of chunks to the freelist of chunks
 160   // of type index.
 161   void return_chunks(ChunkIndex index, Metachunk* chunks);
 162 
 163   // Total of the space in the free chunks list
 164   size_t free_chunks_total_words();
 165   size_t free_chunks_total_bytes();
 166 
 167   // Number of chunks in the free chunks list
 168   size_t free_chunks_count();
 169 
 170   void inc_free_chunks_total(size_t v, size_t count = 1) {
 171     Atomic::add_ptr(count, &_free_chunks_count);
 172     Atomic::add_ptr(v, &_free_chunks_total);
 173   }
 174   ChunkTreeDictionary* humongous_dictionary() {
 175     return &_humongous_dictionary;
 176   }
 177 
 178   ChunkList* free_chunks(ChunkIndex index);
 179 
 180   // Returns the list for the given chunk word size.
 181   ChunkList* find_free_chunks_list(size_t word_size);
 182 
 183   // Remove from a list by size.  Selects list based on size of chunk.
 184   Metachunk* free_chunks_get(size_t chunk_word_size);
 185 
 186   // Debug support
 187   void verify();
 188   void slow_verify() {
 189     if (metaspace_slow_verify) {
 190       verify();
 191     }
 192   }
 193   void locked_verify();
 194   void slow_locked_verify() {
 195     if (metaspace_slow_verify) {
 196       locked_verify();
 197     }
 198   }
 199   void verify_free_chunks_total();
 200 
 201   void locked_print_free_chunks(outputStream* st);
 202   void locked_print_sum_free_chunks(outputStream* st);
 203 
 204   void print_on(outputStream* st) const;
 205 };
 206 
 207 // Used to manage the free list of Metablocks (a block corresponds
 208 // to the allocation of a quantum of metadata).
 209 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 210   BlockTreeDictionary* _dictionary;
 211 
 212   // Only allocate and split from freelist if the size of the allocation
 213   // is at least 1/4th the size of the available block.
 214   const static int WasteMultiplier = 4;
 215 
 216   // Accessors
 217   BlockTreeDictionary* dictionary() const { return _dictionary; }
 218 
 219  public:
 220   BlockFreelist();
 221   ~BlockFreelist();
 222 
 223   // Get and return a block to the free list
 224   MetaWord* get_block(size_t word_size);
 225   void return_block(MetaWord* p, size_t word_size);
 226 
 227   size_t total_size() {
 228   if (dictionary() == NULL) {
 229     return 0;
 230   } else {
 231     return dictionary()->total_size();
 232   }
 233 }
 234 
 235   void print_on(outputStream* st) const;
 236 };
 237 
 238 // A VirtualSpaceList node.
 239 class VirtualSpaceNode : public CHeapObj<mtClass> {
 240   friend class VirtualSpaceList;
 241 
 242   // Link to next VirtualSpaceNode
 243   VirtualSpaceNode* _next;
 244 
 245   // total in the VirtualSpace
 246   MemRegion _reserved;
 247   ReservedSpace _rs;
 248   VirtualSpace _virtual_space;
 249   MetaWord* _top;
 250   // count of chunks contained in this VirtualSpace
 251   uintx _container_count;
 252 
 253   // Convenience functions to access the _virtual_space
 254   char* low()  const { return virtual_space()->low(); }
 255   char* high() const { return virtual_space()->high(); }
 256 
 257   // The first Metachunk will be allocated at the bottom of the
 258   // VirtualSpace
 259   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 260 
 261  public:
 262 
 263   VirtualSpaceNode(size_t byte_size);
 264   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 265   ~VirtualSpaceNode();
 266 
 267   // Convenience functions for logical bottom and end
 268   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 269   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 270 
 271   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 272   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 273 
 274   bool is_pre_committed() const { return _virtual_space.special(); }
 275 
 276   // address of next available space in _virtual_space;
 277   // Accessors
 278   VirtualSpaceNode* next() { return _next; }
 279   void set_next(VirtualSpaceNode* v) { _next = v; }
 280 
 281   void set_reserved(MemRegion const v) { _reserved = v; }
 282   void set_top(MetaWord* v) { _top = v; }
 283 
 284   // Accessors
 285   MemRegion* reserved() { return &_reserved; }
 286   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 287 
 288   // Returns true if "word_size" is available in the VirtualSpace
 289   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 290 
 291   MetaWord* top() const { return _top; }
 292   void inc_top(size_t word_size) { _top += word_size; }
 293 
 294   uintx container_count() { return _container_count; }
 295   void inc_container_count();
 296   void dec_container_count();
 297 #ifdef ASSERT
 298   uint container_count_slow();
 299   void verify_container_count();
 300 #endif
 301 
 302   // used and capacity in this single entry in the list
 303   size_t used_words_in_vs() const;
 304   size_t capacity_words_in_vs() const;
 305   size_t free_words_in_vs() const;
 306 
 307   bool initialize();
 308 
 309   // get space from the virtual space
 310   Metachunk* take_from_committed(size_t chunk_word_size);
 311 
 312   // Allocate a chunk from the virtual space and return it.
 313   Metachunk* get_chunk_vs(size_t chunk_word_size);
 314 
 315   // Expands/shrinks the committed space in a virtual space.  Delegates
 316   // to Virtualspace
 317   bool expand_by(size_t min_words, size_t preferred_words);
 318 
 319   // In preparation for deleting this node, remove all the chunks
 320   // in the node from any freelist.
 321   void purge(ChunkManager* chunk_manager);
 322 
 323 #ifdef ASSERT
 324   // Debug support
 325   void mangle();
 326 #endif
 327 
 328   void print_on(outputStream* st) const;
 329 };
 330 
 331 #define assert_is_ptr_aligned(ptr, alignment) \
 332   assert(is_ptr_aligned(ptr, alignment),      \
 333     err_msg(PTR_FORMAT " is not aligned to "  \
 334       SIZE_FORMAT, ptr, alignment))
 335 
 336 #define assert_is_size_aligned(size, alignment) \
 337   assert(is_size_aligned(size, alignment),      \
 338     err_msg(SIZE_FORMAT " is not aligned to "   \
 339        SIZE_FORMAT, size, alignment))
 340 
 341 
 342 // Decide if large pages should be committed when the memory is reserved.
 343 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 344   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 345     size_t words = bytes / BytesPerWord;
 346     bool is_class = false; // We never reserve large pages for the class space.
 347     if (MetaspaceGC::can_expand(words, is_class) &&
 348         MetaspaceGC::allowed_expansion() >= words) {
 349       return true;
 350     }
 351   }
 352 
 353   return false;
 354 }
 355 
 356   // byte_size is the size of the associated virtualspace.
 357 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 358   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 359 
 360   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 361   // configurable address, generally at the top of the Java heap so other
 362   // memory addresses don't conflict.
 363   if (DumpSharedSpaces) {
 364     bool large_pages = false; // No large pages when dumping the CDS archive.
 365     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 366 
 367     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 368     if (_rs.is_reserved()) {
 369       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 370     } else {
 371       // Get a mmap region anywhere if the SharedBaseAddress fails.
 372       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 373     }
 374     MetaspaceShared::set_shared_rs(&_rs);
 375   } else {
 376     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 377 
 378     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 379   }
 380 
 381   if (_rs.is_reserved()) {
 382     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 383     assert(_rs.size() != 0, "Catch if we get a 0 size");
 384     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 385     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 386 
 387     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 388   }
 389 }
 390 
 391 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 392   Metachunk* chunk = first_chunk();
 393   Metachunk* invalid_chunk = (Metachunk*) top();
 394   while (chunk < invalid_chunk ) {
 395     assert(chunk->is_tagged_free(), "Should be tagged free");
 396     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 397     chunk_manager->remove_chunk(chunk);
 398     assert(chunk->next() == NULL &&
 399            chunk->prev() == NULL,
 400            "Was not removed from its list");
 401     chunk = (Metachunk*) next;
 402   }
 403 }
 404 
 405 #ifdef ASSERT
 406 uint VirtualSpaceNode::container_count_slow() {
 407   uint count = 0;
 408   Metachunk* chunk = first_chunk();
 409   Metachunk* invalid_chunk = (Metachunk*) top();
 410   while (chunk < invalid_chunk ) {
 411     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 412     // Don't count the chunks on the free lists.  Those are
 413     // still part of the VirtualSpaceNode but not currently
 414     // counted.
 415     if (!chunk->is_tagged_free()) {
 416       count++;
 417     }
 418     chunk = (Metachunk*) next;
 419   }
 420   return count;
 421 }
 422 #endif
 423 
 424 // List of VirtualSpaces for metadata allocation.
 425 class VirtualSpaceList : public CHeapObj<mtClass> {
 426   friend class VirtualSpaceNode;
 427 
 428   enum VirtualSpaceSizes {
 429     VirtualSpaceSize = 256 * K
 430   };
 431 
 432   // Head of the list
 433   VirtualSpaceNode* _virtual_space_list;
 434   // virtual space currently being used for allocations
 435   VirtualSpaceNode* _current_virtual_space;
 436 
 437   // Is this VirtualSpaceList used for the compressed class space
 438   bool _is_class;
 439 
 440   // Sum of reserved and committed memory in the virtual spaces
 441   size_t _reserved_words;
 442   size_t _committed_words;
 443 
 444   // Number of virtual spaces
 445   size_t _virtual_space_count;
 446 
 447   ~VirtualSpaceList();
 448 
 449   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 450 
 451   void set_virtual_space_list(VirtualSpaceNode* v) {
 452     _virtual_space_list = v;
 453   }
 454   void set_current_virtual_space(VirtualSpaceNode* v) {
 455     _current_virtual_space = v;
 456   }
 457 
 458   void link_vs(VirtualSpaceNode* new_entry);
 459 
 460   // Get another virtual space and add it to the list.  This
 461   // is typically prompted by a failed attempt to allocate a chunk
 462   // and is typically followed by the allocation of a chunk.
 463   bool create_new_virtual_space(size_t vs_word_size);
 464 
 465  public:
 466   VirtualSpaceList(size_t word_size);
 467   VirtualSpaceList(ReservedSpace rs);
 468 
 469   size_t free_bytes();
 470 
 471   Metachunk* get_new_chunk(size_t word_size,
 472                            size_t grow_chunks_by_words,
 473                            size_t medium_chunk_bunch);
 474 
 475   bool expand_node_by(VirtualSpaceNode* node,
 476                       size_t min_words,
 477                       size_t preferred_words);
 478 
 479   bool expand_by(size_t min_words,
 480                  size_t preferred_words);
 481 
 482   VirtualSpaceNode* current_virtual_space() {
 483     return _current_virtual_space;
 484   }
 485 
 486   bool is_class() const { return _is_class; }
 487 
 488   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 489 
 490   size_t reserved_words()  { return _reserved_words; }
 491   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 492   size_t committed_words() { return _committed_words; }
 493   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 494 
 495   void inc_reserved_words(size_t v);
 496   void dec_reserved_words(size_t v);
 497   void inc_committed_words(size_t v);
 498   void dec_committed_words(size_t v);
 499   void inc_virtual_space_count();
 500   void dec_virtual_space_count();
 501 
 502   // Unlink empty VirtualSpaceNodes and free it.
 503   void purge(ChunkManager* chunk_manager);
 504 
 505   bool contains(const void *ptr);
 506 
 507   void print_on(outputStream* st) const;
 508 
 509   class VirtualSpaceListIterator : public StackObj {
 510     VirtualSpaceNode* _virtual_spaces;
 511    public:
 512     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 513       _virtual_spaces(virtual_spaces) {}
 514 
 515     bool repeat() {
 516       return _virtual_spaces != NULL;
 517     }
 518 
 519     VirtualSpaceNode* get_next() {
 520       VirtualSpaceNode* result = _virtual_spaces;
 521       if (_virtual_spaces != NULL) {
 522         _virtual_spaces = _virtual_spaces->next();
 523       }
 524       return result;
 525     }
 526   };
 527 };
 528 
 529 class Metadebug : AllStatic {
 530   // Debugging support for Metaspaces
 531   static int _allocation_fail_alot_count;
 532 
 533  public:
 534 
 535   static void init_allocation_fail_alot_count();
 536 #ifdef ASSERT
 537   static bool test_metadata_failure();
 538 #endif
 539 };
 540 
 541 int Metadebug::_allocation_fail_alot_count = 0;
 542 
 543 //  SpaceManager - used by Metaspace to handle allocations
 544 class SpaceManager : public CHeapObj<mtClass> {
 545   friend class Metaspace;
 546   friend class Metadebug;
 547 
 548  private:
 549 
 550   // protects allocations and contains.
 551   Mutex* const _lock;
 552 
 553   // Type of metadata allocated.
 554   Metaspace::MetadataType _mdtype;
 555 
 556   // List of chunks in use by this SpaceManager.  Allocations
 557   // are done from the current chunk.  The list is used for deallocating
 558   // chunks when the SpaceManager is freed.
 559   Metachunk* _chunks_in_use[NumberOfInUseLists];
 560   Metachunk* _current_chunk;
 561 
 562   // Number of small chunks to allocate to a manager
 563   // If class space manager, small chunks are unlimited
 564   static uint const _small_chunk_limit;
 565 
 566   // Sum of all space in allocated chunks
 567   size_t _allocated_blocks_words;
 568 
 569   // Sum of all allocated chunks
 570   size_t _allocated_chunks_words;
 571   size_t _allocated_chunks_count;
 572 
 573   // Free lists of blocks are per SpaceManager since they
 574   // are assumed to be in chunks in use by the SpaceManager
 575   // and all chunks in use by a SpaceManager are freed when
 576   // the class loader using the SpaceManager is collected.
 577   BlockFreelist _block_freelists;
 578 
 579   // protects virtualspace and chunk expansions
 580   static const char*  _expand_lock_name;
 581   static const int    _expand_lock_rank;
 582   static Mutex* const _expand_lock;
 583 
 584  private:
 585   // Accessors
 586   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 587   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
 588 
 589   BlockFreelist* block_freelists() const {
 590     return (BlockFreelist*) &_block_freelists;
 591   }
 592 
 593   Metaspace::MetadataType mdtype() { return _mdtype; }
 594 
 595   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 596   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 597 
 598   Metachunk* current_chunk() const { return _current_chunk; }
 599   void set_current_chunk(Metachunk* v) {
 600     _current_chunk = v;
 601   }
 602 
 603   Metachunk* find_current_chunk(size_t word_size);
 604 
 605   // Add chunk to the list of chunks in use
 606   void add_chunk(Metachunk* v, bool make_current);
 607   void retire_current_chunk();
 608 
 609   Mutex* lock() const { return _lock; }
 610 
 611   const char* chunk_size_name(ChunkIndex index) const;
 612 
 613  protected:
 614   void initialize();
 615 
 616  public:
 617   SpaceManager(Metaspace::MetadataType mdtype,
 618                Mutex* lock);
 619   ~SpaceManager();
 620 
 621   enum ChunkMultiples {
 622     MediumChunkMultiple = 4
 623   };
 624 
 625   bool is_class() { return _mdtype == Metaspace::ClassType; }
 626 
 627   // Accessors
 628   size_t specialized_chunk_size() { return SpecializedChunk; }
 629   size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
 630   size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
 631   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 632 
 633   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 634   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 635   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 636   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 637 
 638   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 639 
 640   static Mutex* expand_lock() { return _expand_lock; }
 641 
 642   // Increment the per Metaspace and global running sums for Metachunks
 643   // by the given size.  This is used when a Metachunk to added to
 644   // the in-use list.
 645   void inc_size_metrics(size_t words);
 646   // Increment the per Metaspace and global running sums Metablocks by the given
 647   // size.  This is used when a Metablock is allocated.
 648   void inc_used_metrics(size_t words);
 649   // Delete the portion of the running sums for this SpaceManager. That is,
 650   // the globals running sums for the Metachunks and Metablocks are
 651   // decremented for all the Metachunks in-use by this SpaceManager.
 652   void dec_total_from_size_metrics();
 653 
 654   // Set the sizes for the initial chunks.
 655   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 656                                size_t* chunk_word_size,
 657                                size_t* class_chunk_word_size);
 658 
 659   size_t sum_capacity_in_chunks_in_use() const;
 660   size_t sum_used_in_chunks_in_use() const;
 661   size_t sum_free_in_chunks_in_use() const;
 662   size_t sum_waste_in_chunks_in_use() const;
 663   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 664 
 665   size_t sum_count_in_chunks_in_use();
 666   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 667 
 668   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 669 
 670   // Block allocation and deallocation.
 671   // Allocates a block from the current chunk
 672   MetaWord* allocate(size_t word_size);
 673 
 674   // Helper for allocations
 675   MetaWord* allocate_work(size_t word_size);
 676 
 677   // Returns a block to the per manager freelist
 678   void deallocate(MetaWord* p, size_t word_size);
 679 
 680   // Based on the allocation size and a minimum chunk size,
 681   // returned chunk size (for expanding space for chunk allocation).
 682   size_t calc_chunk_size(size_t allocation_word_size);
 683 
 684   // Called when an allocation from the current chunk fails.
 685   // Gets a new chunk (may require getting a new virtual space),
 686   // and allocates from that chunk.
 687   MetaWord* grow_and_allocate(size_t word_size);
 688 
 689   // Notify memory usage to MemoryService.
 690   void track_metaspace_memory_usage();
 691 
 692   // debugging support.
 693 
 694   void dump(outputStream* const out) const;
 695   void print_on(outputStream* st) const;
 696   void locked_print_chunks_in_use_on(outputStream* st) const;
 697 
 698   void verify();
 699   void verify_chunk_size(Metachunk* chunk);
 700   NOT_PRODUCT(void mangle_freed_chunks();)
 701 #ifdef ASSERT
 702   void verify_allocated_blocks_words();
 703 #endif
 704 
 705   size_t get_raw_word_size(size_t word_size) {
 706     size_t byte_size = word_size * BytesPerWord;
 707 
 708     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 709     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 710 
 711     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 712     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 713 
 714     return raw_word_size;
 715   }
 716 };
 717 
 718 uint const SpaceManager::_small_chunk_limit = 4;
 719 
 720 const char* SpaceManager::_expand_lock_name =
 721   "SpaceManager chunk allocation lock";
 722 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 723 Mutex* const SpaceManager::_expand_lock =
 724   new Mutex(SpaceManager::_expand_lock_rank,
 725             SpaceManager::_expand_lock_name,
 726             Mutex::_allow_vm_block_flag);
 727 
 728 void VirtualSpaceNode::inc_container_count() {
 729   assert_lock_strong(SpaceManager::expand_lock());
 730   _container_count++;
 731   assert(_container_count == container_count_slow(),
 732          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 733                  " container_count_slow() " SIZE_FORMAT,
 734                  _container_count, container_count_slow()));
 735 }
 736 
 737 void VirtualSpaceNode::dec_container_count() {
 738   assert_lock_strong(SpaceManager::expand_lock());
 739   _container_count--;
 740 }
 741 
 742 #ifdef ASSERT
 743 void VirtualSpaceNode::verify_container_count() {
 744   assert(_container_count == container_count_slow(),
 745     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 746             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 747 }
 748 #endif
 749 
 750 // BlockFreelist methods
 751 
 752 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 753 
 754 BlockFreelist::~BlockFreelist() {
 755   if (_dictionary != NULL) {
 756     if (Verbose && TraceMetadataChunkAllocation) {
 757       _dictionary->print_free_lists(gclog_or_tty);
 758     }
 759     delete _dictionary;
 760   }
 761 }
 762 
 763 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 764   Metablock* free_chunk = ::new (p) Metablock(word_size);
 765   if (dictionary() == NULL) {
 766    _dictionary = new BlockTreeDictionary();
 767   }
 768   dictionary()->return_chunk(free_chunk);
 769 }
 770 
 771 MetaWord* BlockFreelist::get_block(size_t word_size) {
 772   if (dictionary() == NULL) {
 773     return NULL;
 774   }
 775 
 776   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 777     // Dark matter.  Too small for dictionary.
 778     return NULL;
 779   }
 780 
 781   Metablock* free_block =
 782     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 783   if (free_block == NULL) {
 784     return NULL;
 785   }
 786 
 787   const size_t block_size = free_block->size();
 788   if (block_size > WasteMultiplier * word_size) {
 789     return_block((MetaWord*)free_block, block_size);
 790     return NULL;
 791   }
 792 
 793   MetaWord* new_block = (MetaWord*)free_block;
 794   assert(block_size >= word_size, "Incorrect size of block from freelist");
 795   const size_t unused = block_size - word_size;
 796   if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
 797     return_block(new_block + word_size, unused);
 798   }
 799 
 800   return new_block;
 801 }
 802 
 803 void BlockFreelist::print_on(outputStream* st) const {
 804   if (dictionary() == NULL) {
 805     return;
 806   }
 807   dictionary()->print_free_lists(st);
 808 }
 809 
 810 // VirtualSpaceNode methods
 811 
 812 VirtualSpaceNode::~VirtualSpaceNode() {
 813   _rs.release();
 814 #ifdef ASSERT
 815   size_t word_size = sizeof(*this) / BytesPerWord;
 816   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 817 #endif
 818 }
 819 
 820 size_t VirtualSpaceNode::used_words_in_vs() const {
 821   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 822 }
 823 
 824 // Space committed in the VirtualSpace
 825 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 826   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 827 }
 828 
 829 size_t VirtualSpaceNode::free_words_in_vs() const {
 830   return pointer_delta(end(), top(), sizeof(MetaWord));
 831 }
 832 
 833 // Allocates the chunk from the virtual space only.
 834 // This interface is also used internally for debugging.  Not all
 835 // chunks removed here are necessarily used for allocation.
 836 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 837   // Bottom of the new chunk
 838   MetaWord* chunk_limit = top();
 839   assert(chunk_limit != NULL, "Not safe to call this method");
 840 
 841   // The virtual spaces are always expanded by the
 842   // commit granularity to enforce the following condition.
 843   // Without this the is_available check will not work correctly.
 844   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 845       "The committed memory doesn't match the expanded memory.");
 846 
 847   if (!is_available(chunk_word_size)) {
 848     if (TraceMetadataChunkAllocation) {
 849       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 850       // Dump some information about the virtual space that is nearly full
 851       print_on(gclog_or_tty);
 852     }
 853     return NULL;
 854   }
 855 
 856   // Take the space  (bump top on the current virtual space).
 857   inc_top(chunk_word_size);
 858 
 859   // Initialize the chunk
 860   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 861   return result;
 862 }
 863 
 864 
 865 // Expand the virtual space (commit more of the reserved space)
 866 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 867   size_t min_bytes = min_words * BytesPerWord;
 868   size_t preferred_bytes = preferred_words * BytesPerWord;
 869 
 870   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 871 
 872   if (uncommitted < min_bytes) {
 873     return false;
 874   }
 875 
 876   size_t commit = MIN2(preferred_bytes, uncommitted);
 877   bool result = virtual_space()->expand_by(commit, false);
 878 
 879   assert(result, "Failed to commit memory");
 880 
 881   return result;
 882 }
 883 
 884 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 885   assert_lock_strong(SpaceManager::expand_lock());
 886   Metachunk* result = take_from_committed(chunk_word_size);
 887   if (result != NULL) {
 888     inc_container_count();
 889   }
 890   return result;
 891 }
 892 
 893 bool VirtualSpaceNode::initialize() {
 894 
 895   if (!_rs.is_reserved()) {
 896     return false;
 897   }
 898 
 899   // These are necessary restriction to make sure that the virtual space always
 900   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 901   // aligned only the middle alignment of the VirtualSpace is used.
 902   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 903   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 904 
 905   // ReservedSpaces marked as special will have the entire memory
 906   // pre-committed. Setting a committed size will make sure that
 907   // committed_size and actual_committed_size agrees.
 908   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 909 
 910   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 911                                             Metaspace::commit_alignment());
 912   if (result) {
 913     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 914         "Checking that the pre-committed memory was registered by the VirtualSpace");
 915 
 916     set_top((MetaWord*)virtual_space()->low());
 917     set_reserved(MemRegion((HeapWord*)_rs.base(),
 918                  (HeapWord*)(_rs.base() + _rs.size())));
 919 
 920     assert(reserved()->start() == (HeapWord*) _rs.base(),
 921       err_msg("Reserved start was not set properly " PTR_FORMAT
 922         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 923     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 924       err_msg("Reserved size was not set properly " SIZE_FORMAT
 925         " != " SIZE_FORMAT, reserved()->word_size(),
 926         _rs.size() / BytesPerWord));
 927   }
 928 
 929   return result;
 930 }
 931 
 932 void VirtualSpaceNode::print_on(outputStream* st) const {
 933   size_t used = used_words_in_vs();
 934   size_t capacity = capacity_words_in_vs();
 935   VirtualSpace* vs = virtual_space();
 936   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
 937            "[" PTR_FORMAT ", " PTR_FORMAT ", "
 938            PTR_FORMAT ", " PTR_FORMAT ")",
 939            vs, capacity / K,
 940            capacity == 0 ? 0 : used * 100 / capacity,
 941            bottom(), top(), end(),
 942            vs->high_boundary());
 943 }
 944 
 945 #ifdef ASSERT
 946 void VirtualSpaceNode::mangle() {
 947   size_t word_size = capacity_words_in_vs();
 948   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 949 }
 950 #endif // ASSERT
 951 
 952 // VirtualSpaceList methods
 953 // Space allocated from the VirtualSpace
 954 
 955 VirtualSpaceList::~VirtualSpaceList() {
 956   VirtualSpaceListIterator iter(virtual_space_list());
 957   while (iter.repeat()) {
 958     VirtualSpaceNode* vsl = iter.get_next();
 959     delete vsl;
 960   }
 961 }
 962 
 963 void VirtualSpaceList::inc_reserved_words(size_t v) {
 964   assert_lock_strong(SpaceManager::expand_lock());
 965   _reserved_words = _reserved_words + v;
 966 }
 967 void VirtualSpaceList::dec_reserved_words(size_t v) {
 968   assert_lock_strong(SpaceManager::expand_lock());
 969   _reserved_words = _reserved_words - v;
 970 }
 971 
 972 #define assert_committed_below_limit()                             \
 973   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
 974       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
 975               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
 976           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
 977 
 978 void VirtualSpaceList::inc_committed_words(size_t v) {
 979   assert_lock_strong(SpaceManager::expand_lock());
 980   _committed_words = _committed_words + v;
 981 
 982   assert_committed_below_limit();
 983 }
 984 void VirtualSpaceList::dec_committed_words(size_t v) {
 985   assert_lock_strong(SpaceManager::expand_lock());
 986   _committed_words = _committed_words - v;
 987 
 988   assert_committed_below_limit();
 989 }
 990 
 991 void VirtualSpaceList::inc_virtual_space_count() {
 992   assert_lock_strong(SpaceManager::expand_lock());
 993   _virtual_space_count++;
 994 }
 995 void VirtualSpaceList::dec_virtual_space_count() {
 996   assert_lock_strong(SpaceManager::expand_lock());
 997   _virtual_space_count--;
 998 }
 999 
1000 void ChunkManager::remove_chunk(Metachunk* chunk) {
1001   size_t word_size = chunk->word_size();
1002   ChunkIndex index = list_index(word_size);
1003   if (index != HumongousIndex) {
1004     free_chunks(index)->remove_chunk(chunk);
1005   } else {
1006     humongous_dictionary()->remove_chunk(chunk);
1007   }
1008 
1009   // Chunk is being removed from the chunks free list.
1010   dec_free_chunks_total(chunk->word_size());
1011 }
1012 
1013 // Walk the list of VirtualSpaceNodes and delete
1014 // nodes with a 0 container_count.  Remove Metachunks in
1015 // the node from their respective freelists.
1016 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1017   assert_lock_strong(SpaceManager::expand_lock());
1018   // Don't use a VirtualSpaceListIterator because this
1019   // list is being changed and a straightforward use of an iterator is not safe.
1020   VirtualSpaceNode* purged_vsl = NULL;
1021   VirtualSpaceNode* prev_vsl = virtual_space_list();
1022   VirtualSpaceNode* next_vsl = prev_vsl;
1023   while (next_vsl != NULL) {
1024     VirtualSpaceNode* vsl = next_vsl;
1025     next_vsl = vsl->next();
1026     // Don't free the current virtual space since it will likely
1027     // be needed soon.
1028     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1029       // Unlink it from the list
1030       if (prev_vsl == vsl) {
1031         // This is the case of the current node being the first node.
1032         assert(vsl == virtual_space_list(), "Expected to be the first node");
1033         set_virtual_space_list(vsl->next());
1034       } else {
1035         prev_vsl->set_next(vsl->next());
1036       }
1037 
1038       vsl->purge(chunk_manager);
1039       dec_reserved_words(vsl->reserved_words());
1040       dec_committed_words(vsl->committed_words());
1041       dec_virtual_space_count();
1042       purged_vsl = vsl;
1043       delete vsl;
1044     } else {
1045       prev_vsl = vsl;
1046     }
1047   }
1048 #ifdef ASSERT
1049   if (purged_vsl != NULL) {
1050   // List should be stable enough to use an iterator here.
1051   VirtualSpaceListIterator iter(virtual_space_list());
1052     while (iter.repeat()) {
1053       VirtualSpaceNode* vsl = iter.get_next();
1054       assert(vsl != purged_vsl, "Purge of vsl failed");
1055     }
1056   }
1057 #endif
1058 }
1059 
1060 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1061                                    _is_class(false),
1062                                    _virtual_space_list(NULL),
1063                                    _current_virtual_space(NULL),
1064                                    _reserved_words(0),
1065                                    _committed_words(0),
1066                                    _virtual_space_count(0) {
1067   MutexLockerEx cl(SpaceManager::expand_lock(),
1068                    Mutex::_no_safepoint_check_flag);
1069   create_new_virtual_space(word_size);
1070 }
1071 
1072 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1073                                    _is_class(true),
1074                                    _virtual_space_list(NULL),
1075                                    _current_virtual_space(NULL),
1076                                    _reserved_words(0),
1077                                    _committed_words(0),
1078                                    _virtual_space_count(0) {
1079   MutexLockerEx cl(SpaceManager::expand_lock(),
1080                    Mutex::_no_safepoint_check_flag);
1081   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1082   bool succeeded = class_entry->initialize();
1083   if (succeeded) {
1084     link_vs(class_entry);
1085   }
1086 }
1087 
1088 size_t VirtualSpaceList::free_bytes() {
1089   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1090 }
1091 
1092 // Allocate another meta virtual space and add it to the list.
1093 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1094   assert_lock_strong(SpaceManager::expand_lock());
1095 
1096   if (is_class()) {
1097     assert(false, "We currently don't support more than one VirtualSpace for"
1098                   " the compressed class space. The initialization of the"
1099                   " CCS uses another code path and should not hit this path.");
1100     return false;
1101   }
1102 
1103   if (vs_word_size == 0) {
1104     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1105     return false;
1106   }
1107 
1108   // Reserve the space
1109   size_t vs_byte_size = vs_word_size * BytesPerWord;
1110   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1111 
1112   // Allocate the meta virtual space and initialize it.
1113   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1114   if (!new_entry->initialize()) {
1115     delete new_entry;
1116     return false;
1117   } else {
1118     assert(new_entry->reserved_words() == vs_word_size,
1119         "Reserved memory size differs from requested memory size");
1120     // ensure lock-free iteration sees fully initialized node
1121     OrderAccess::storestore();
1122     link_vs(new_entry);
1123     return true;
1124   }
1125 }
1126 
1127 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1128   if (virtual_space_list() == NULL) {
1129       set_virtual_space_list(new_entry);
1130   } else {
1131     current_virtual_space()->set_next(new_entry);
1132   }
1133   set_current_virtual_space(new_entry);
1134   inc_reserved_words(new_entry->reserved_words());
1135   inc_committed_words(new_entry->committed_words());
1136   inc_virtual_space_count();
1137 #ifdef ASSERT
1138   new_entry->mangle();
1139 #endif
1140   if (TraceMetavirtualspaceAllocation && Verbose) {
1141     VirtualSpaceNode* vsl = current_virtual_space();
1142     vsl->print_on(gclog_or_tty);
1143   }
1144 }
1145 
1146 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1147                                       size_t min_words,
1148                                       size_t preferred_words) {
1149   size_t before = node->committed_words();
1150 
1151   bool result = node->expand_by(min_words, preferred_words);
1152 
1153   size_t after = node->committed_words();
1154 
1155   // after and before can be the same if the memory was pre-committed.
1156   assert(after >= before, "Inconsistency");
1157   inc_committed_words(after - before);
1158 
1159   return result;
1160 }
1161 
1162 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1163   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1164   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1165   assert(min_words <= preferred_words, "Invalid arguments");
1166 
1167   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1168     return  false;
1169   }
1170 
1171   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1172   if (allowed_expansion_words < min_words) {
1173     return false;
1174   }
1175 
1176   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1177 
1178   // Commit more memory from the the current virtual space.
1179   bool vs_expanded = expand_node_by(current_virtual_space(),
1180                                     min_words,
1181                                     max_expansion_words);
1182   if (vs_expanded) {
1183     return true;
1184   }
1185 
1186   // Get another virtual space.
1187   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1188   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1189 
1190   if (create_new_virtual_space(grow_vs_words)) {
1191     if (current_virtual_space()->is_pre_committed()) {
1192       // The memory was pre-committed, so we are done here.
1193       assert(min_words <= current_virtual_space()->committed_words(),
1194           "The new VirtualSpace was pre-committed, so it"
1195           "should be large enough to fit the alloc request.");
1196       return true;
1197     }
1198 
1199     return expand_node_by(current_virtual_space(),
1200                           min_words,
1201                           max_expansion_words);
1202   }
1203 
1204   return false;
1205 }
1206 
1207 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1208                                            size_t grow_chunks_by_words,
1209                                            size_t medium_chunk_bunch) {
1210 
1211   // Allocate a chunk out of the current virtual space.
1212   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1213 
1214   if (next != NULL) {
1215     return next;
1216   }
1217 
1218   // The expand amount is currently only determined by the requested sizes
1219   // and not how much committed memory is left in the current virtual space.
1220 
1221   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1222   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1223   if (min_word_size >= preferred_word_size) {
1224     // Can happen when humongous chunks are allocated.
1225     preferred_word_size = min_word_size;
1226   }
1227 
1228   bool expanded = expand_by(min_word_size, preferred_word_size);
1229   if (expanded) {
1230     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1231     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1232   }
1233 
1234    return next;
1235 }
1236 
1237 void VirtualSpaceList::print_on(outputStream* st) const {
1238   if (TraceMetadataChunkAllocation && Verbose) {
1239     VirtualSpaceListIterator iter(virtual_space_list());
1240     while (iter.repeat()) {
1241       VirtualSpaceNode* node = iter.get_next();
1242       node->print_on(st);
1243     }
1244   }
1245 }
1246 
1247 bool VirtualSpaceList::contains(const void *ptr) {
1248   VirtualSpaceNode* list = virtual_space_list();
1249   VirtualSpaceListIterator iter(list);
1250   while (iter.repeat()) {
1251     VirtualSpaceNode* node = iter.get_next();
1252     if (node->reserved()->contains(ptr)) {
1253       return true;
1254     }
1255   }
1256   return false;
1257 }
1258 
1259 
1260 // MetaspaceGC methods
1261 
1262 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1263 // Within the VM operation after the GC the attempt to allocate the metadata
1264 // should succeed.  If the GC did not free enough space for the metaspace
1265 // allocation, the HWM is increased so that another virtualspace will be
1266 // allocated for the metadata.  With perm gen the increase in the perm
1267 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1268 // metaspace policy uses those as the small and large steps for the HWM.
1269 //
1270 // After the GC the compute_new_size() for MetaspaceGC is called to
1271 // resize the capacity of the metaspaces.  The current implementation
1272 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1273 // to resize the Java heap by some GC's.  New flags can be implemented
1274 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1275 // free space is desirable in the metaspace capacity to decide how much
1276 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1277 // free space is desirable in the metaspace capacity before decreasing
1278 // the HWM.
1279 
1280 // Calculate the amount to increase the high water mark (HWM).
1281 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1282 // another expansion is not requested too soon.  If that is not
1283 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1284 // If that is still not enough, expand by the size of the allocation
1285 // plus some.
1286 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1287   size_t min_delta = MinMetaspaceExpansion;
1288   size_t max_delta = MaxMetaspaceExpansion;
1289   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1290 
1291   if (delta <= min_delta) {
1292     delta = min_delta;
1293   } else if (delta <= max_delta) {
1294     // Don't want to hit the high water mark on the next
1295     // allocation so make the delta greater than just enough
1296     // for this allocation.
1297     delta = max_delta;
1298   } else {
1299     // This allocation is large but the next ones are probably not
1300     // so increase by the minimum.
1301     delta = delta + min_delta;
1302   }
1303 
1304   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1305 
1306   return delta;
1307 }
1308 
1309 size_t MetaspaceGC::capacity_until_GC() {
1310   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1311   assert(value >= MetaspaceSize, "Not initialied properly?");
1312   return value;
1313 }
1314 
1315 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1316   assert_is_size_aligned(v, Metaspace::commit_alignment());
1317 
1318   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1319 }
1320 
1321 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1322   assert_is_size_aligned(v, Metaspace::commit_alignment());
1323 
1324   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1325 }
1326 
1327 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1328   // Check if the compressed class space is full.
1329   if (is_class && Metaspace::using_class_space()) {
1330     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1331     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1332       return false;
1333     }
1334   }
1335 
1336   // Check if the user has imposed a limit on the metaspace memory.
1337   size_t committed_bytes = MetaspaceAux::committed_bytes();
1338   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1339     return false;
1340   }
1341 
1342   return true;
1343 }
1344 
1345 size_t MetaspaceGC::allowed_expansion() {
1346   size_t committed_bytes = MetaspaceAux::committed_bytes();
1347 
1348   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1349 
1350   // Always grant expansion if we are initiating the JVM,
1351   // or if the GC_locker is preventing GCs.
1352   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1353     return left_until_max / BytesPerWord;
1354   }
1355 
1356   size_t capacity_until_gc = capacity_until_GC();
1357 
1358   if (capacity_until_gc <= committed_bytes) {
1359     return 0;
1360   }
1361 
1362   size_t left_until_GC = capacity_until_gc - committed_bytes;
1363   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1364 
1365   return left_to_commit / BytesPerWord;
1366 }
1367 
1368 void MetaspaceGC::compute_new_size() {
1369   assert(_shrink_factor <= 100, "invalid shrink factor");
1370   uint current_shrink_factor = _shrink_factor;
1371   _shrink_factor = 0;
1372 
1373   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1374   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1375 
1376   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1377   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1378 
1379   const double min_tmp = used_after_gc / maximum_used_percentage;
1380   size_t minimum_desired_capacity =
1381     (size_t)MIN2(min_tmp, double(max_uintx));
1382   // Don't shrink less than the initial generation size
1383   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1384                                   MetaspaceSize);
1385 
1386   if (PrintGCDetails && Verbose) {
1387     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1388     gclog_or_tty->print_cr("  "
1389                   "  minimum_free_percentage: %6.2f"
1390                   "  maximum_used_percentage: %6.2f",
1391                   minimum_free_percentage,
1392                   maximum_used_percentage);
1393     gclog_or_tty->print_cr("  "
1394                   "   used_after_gc       : %6.1fKB",
1395                   used_after_gc / (double) K);
1396   }
1397 
1398 
1399   size_t shrink_bytes = 0;
1400   if (capacity_until_GC < minimum_desired_capacity) {
1401     // If we have less capacity below the metaspace HWM, then
1402     // increment the HWM.
1403     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1404     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1405     // Don't expand unless it's significant
1406     if (expand_bytes >= MinMetaspaceExpansion) {
1407       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1408     }
1409     if (PrintGCDetails && Verbose) {
1410       size_t new_capacity_until_GC = capacity_until_GC;
1411       gclog_or_tty->print_cr("    expanding:"
1412                     "  minimum_desired_capacity: %6.1fKB"
1413                     "  expand_bytes: %6.1fKB"
1414                     "  MinMetaspaceExpansion: %6.1fKB"
1415                     "  new metaspace HWM:  %6.1fKB",
1416                     minimum_desired_capacity / (double) K,
1417                     expand_bytes / (double) K,
1418                     MinMetaspaceExpansion / (double) K,
1419                     new_capacity_until_GC / (double) K);
1420     }
1421     return;
1422   }
1423 
1424   // No expansion, now see if we want to shrink
1425   // We would never want to shrink more than this
1426   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1427   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1428     max_shrink_bytes));
1429 
1430   // Should shrinking be considered?
1431   if (MaxMetaspaceFreeRatio < 100) {
1432     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1433     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1434     const double max_tmp = used_after_gc / minimum_used_percentage;
1435     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1436     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1437                                     MetaspaceSize);
1438     if (PrintGCDetails && Verbose) {
1439       gclog_or_tty->print_cr("  "
1440                              "  maximum_free_percentage: %6.2f"
1441                              "  minimum_used_percentage: %6.2f",
1442                              maximum_free_percentage,
1443                              minimum_used_percentage);
1444       gclog_or_tty->print_cr("  "
1445                              "  minimum_desired_capacity: %6.1fKB"
1446                              "  maximum_desired_capacity: %6.1fKB",
1447                              minimum_desired_capacity / (double) K,
1448                              maximum_desired_capacity / (double) K);
1449     }
1450 
1451     assert(minimum_desired_capacity <= maximum_desired_capacity,
1452            "sanity check");
1453 
1454     if (capacity_until_GC > maximum_desired_capacity) {
1455       // Capacity too large, compute shrinking size
1456       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1457       // We don't want shrink all the way back to initSize if people call
1458       // System.gc(), because some programs do that between "phases" and then
1459       // we'd just have to grow the heap up again for the next phase.  So we
1460       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1461       // on the third call, and 100% by the fourth call.  But if we recompute
1462       // size without shrinking, it goes back to 0%.
1463       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1464 
1465       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1466 
1467       assert(shrink_bytes <= max_shrink_bytes,
1468         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1469           shrink_bytes, max_shrink_bytes));
1470       if (current_shrink_factor == 0) {
1471         _shrink_factor = 10;
1472       } else {
1473         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1474       }
1475       if (PrintGCDetails && Verbose) {
1476         gclog_or_tty->print_cr("  "
1477                       "  shrinking:"
1478                       "  initSize: %.1fK"
1479                       "  maximum_desired_capacity: %.1fK",
1480                       MetaspaceSize / (double) K,
1481                       maximum_desired_capacity / (double) K);
1482         gclog_or_tty->print_cr("  "
1483                       "  shrink_bytes: %.1fK"
1484                       "  current_shrink_factor: %d"
1485                       "  new shrink factor: %d"
1486                       "  MinMetaspaceExpansion: %.1fK",
1487                       shrink_bytes / (double) K,
1488                       current_shrink_factor,
1489                       _shrink_factor,
1490                       MinMetaspaceExpansion / (double) K);
1491       }
1492     }
1493   }
1494 
1495   // Don't shrink unless it's significant
1496   if (shrink_bytes >= MinMetaspaceExpansion &&
1497       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1498     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1499   }
1500 }
1501 
1502 // Metadebug methods
1503 
1504 void Metadebug::init_allocation_fail_alot_count() {
1505   if (MetadataAllocationFailALot) {
1506     _allocation_fail_alot_count =
1507       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1508   }
1509 }
1510 
1511 #ifdef ASSERT
1512 bool Metadebug::test_metadata_failure() {
1513   if (MetadataAllocationFailALot &&
1514       Threads::is_vm_complete()) {
1515     if (_allocation_fail_alot_count > 0) {
1516       _allocation_fail_alot_count--;
1517     } else {
1518       if (TraceMetadataChunkAllocation && Verbose) {
1519         gclog_or_tty->print_cr("Metadata allocation failing for "
1520                                "MetadataAllocationFailALot");
1521       }
1522       init_allocation_fail_alot_count();
1523       return true;
1524     }
1525   }
1526   return false;
1527 }
1528 #endif
1529 
1530 // ChunkManager methods
1531 
1532 size_t ChunkManager::free_chunks_total_words() {
1533   return _free_chunks_total;
1534 }
1535 
1536 size_t ChunkManager::free_chunks_total_bytes() {
1537   return free_chunks_total_words() * BytesPerWord;
1538 }
1539 
1540 size_t ChunkManager::free_chunks_count() {
1541 #ifdef ASSERT
1542   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1543     MutexLockerEx cl(SpaceManager::expand_lock(),
1544                      Mutex::_no_safepoint_check_flag);
1545     // This lock is only needed in debug because the verification
1546     // of the _free_chunks_totals walks the list of free chunks
1547     slow_locked_verify_free_chunks_count();
1548   }
1549 #endif
1550   return _free_chunks_count;
1551 }
1552 
1553 void ChunkManager::locked_verify_free_chunks_total() {
1554   assert_lock_strong(SpaceManager::expand_lock());
1555   assert(sum_free_chunks() == _free_chunks_total,
1556     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1557            " same as sum " SIZE_FORMAT, _free_chunks_total,
1558            sum_free_chunks()));
1559 }
1560 
1561 void ChunkManager::verify_free_chunks_total() {
1562   MutexLockerEx cl(SpaceManager::expand_lock(),
1563                      Mutex::_no_safepoint_check_flag);
1564   locked_verify_free_chunks_total();
1565 }
1566 
1567 void ChunkManager::locked_verify_free_chunks_count() {
1568   assert_lock_strong(SpaceManager::expand_lock());
1569   assert(sum_free_chunks_count() == _free_chunks_count,
1570     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1571            " same as sum " SIZE_FORMAT, _free_chunks_count,
1572            sum_free_chunks_count()));
1573 }
1574 
1575 void ChunkManager::verify_free_chunks_count() {
1576 #ifdef ASSERT
1577   MutexLockerEx cl(SpaceManager::expand_lock(),
1578                      Mutex::_no_safepoint_check_flag);
1579   locked_verify_free_chunks_count();
1580 #endif
1581 }
1582 
1583 void ChunkManager::verify() {
1584   MutexLockerEx cl(SpaceManager::expand_lock(),
1585                      Mutex::_no_safepoint_check_flag);
1586   locked_verify();
1587 }
1588 
1589 void ChunkManager::locked_verify() {
1590   locked_verify_free_chunks_count();
1591   locked_verify_free_chunks_total();
1592 }
1593 
1594 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1595   assert_lock_strong(SpaceManager::expand_lock());
1596   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1597                 _free_chunks_total, _free_chunks_count);
1598 }
1599 
1600 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1601   assert_lock_strong(SpaceManager::expand_lock());
1602   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1603                 sum_free_chunks(), sum_free_chunks_count());
1604 }
1605 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1606   return &_free_chunks[index];
1607 }
1608 
1609 // These methods that sum the free chunk lists are used in printing
1610 // methods that are used in product builds.
1611 size_t ChunkManager::sum_free_chunks() {
1612   assert_lock_strong(SpaceManager::expand_lock());
1613   size_t result = 0;
1614   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1615     ChunkList* list = free_chunks(i);
1616 
1617     if (list == NULL) {
1618       continue;
1619     }
1620 
1621     result = result + list->count() * list->size();
1622   }
1623   result = result + humongous_dictionary()->total_size();
1624   return result;
1625 }
1626 
1627 size_t ChunkManager::sum_free_chunks_count() {
1628   assert_lock_strong(SpaceManager::expand_lock());
1629   size_t count = 0;
1630   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1631     ChunkList* list = free_chunks(i);
1632     if (list == NULL) {
1633       continue;
1634     }
1635     count = count + list->count();
1636   }
1637   count = count + humongous_dictionary()->total_free_blocks();
1638   return count;
1639 }
1640 
1641 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1642   ChunkIndex index = list_index(word_size);
1643   assert(index < HumongousIndex, "No humongous list");
1644   return free_chunks(index);
1645 }
1646 
1647 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1648   assert_lock_strong(SpaceManager::expand_lock());
1649 
1650   slow_locked_verify();
1651 
1652   Metachunk* chunk = NULL;
1653   if (list_index(word_size) != HumongousIndex) {
1654     ChunkList* free_list = find_free_chunks_list(word_size);
1655     assert(free_list != NULL, "Sanity check");
1656 
1657     chunk = free_list->head();
1658 
1659     if (chunk == NULL) {
1660       return NULL;
1661     }
1662 
1663     // Remove the chunk as the head of the list.
1664     free_list->remove_chunk(chunk);
1665 
1666     if (TraceMetadataChunkAllocation && Verbose) {
1667       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1668                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1669                              free_list, chunk, chunk->word_size());
1670     }
1671   } else {
1672     chunk = humongous_dictionary()->get_chunk(
1673       word_size,
1674       FreeBlockDictionary<Metachunk>::atLeast);
1675 
1676     if (chunk == NULL) {
1677       return NULL;
1678     }
1679 
1680     if (TraceMetadataHumongousAllocation) {
1681       size_t waste = chunk->word_size() - word_size;
1682       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1683                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1684                              " waste " SIZE_FORMAT,
1685                              chunk->word_size(), word_size, waste);
1686     }
1687   }
1688 
1689   // Chunk is being removed from the chunks free list.
1690   dec_free_chunks_total(chunk->word_size());
1691 
1692   // Remove it from the links to this freelist
1693   chunk->set_next(NULL);
1694   chunk->set_prev(NULL);
1695 #ifdef ASSERT
1696   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1697   // work.
1698   chunk->set_is_tagged_free(false);
1699 #endif
1700   chunk->container()->inc_container_count();
1701 
1702   slow_locked_verify();
1703   return chunk;
1704 }
1705 
1706 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1707   assert_lock_strong(SpaceManager::expand_lock());
1708   slow_locked_verify();
1709 
1710   // Take from the beginning of the list
1711   Metachunk* chunk = free_chunks_get(word_size);
1712   if (chunk == NULL) {
1713     return NULL;
1714   }
1715 
1716   assert((word_size <= chunk->word_size()) ||
1717          list_index(chunk->word_size() == HumongousIndex),
1718          "Non-humongous variable sized chunk");
1719   if (TraceMetadataChunkAllocation) {
1720     size_t list_count;
1721     if (list_index(word_size) < HumongousIndex) {
1722       ChunkList* list = find_free_chunks_list(word_size);
1723       list_count = list->count();
1724     } else {
1725       list_count = humongous_dictionary()->total_count();
1726     }
1727     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1728                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1729                         this, chunk, chunk->word_size(), list_count);
1730     locked_print_free_chunks(gclog_or_tty);
1731   }
1732 
1733   return chunk;
1734 }
1735 
1736 void ChunkManager::print_on(outputStream* out) const {
1737   if (PrintFLSStatistics != 0) {
1738     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1739   }
1740 }
1741 
1742 // SpaceManager methods
1743 
1744 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1745                                            size_t* chunk_word_size,
1746                                            size_t* class_chunk_word_size) {
1747   switch (type) {
1748   case Metaspace::BootMetaspaceType:
1749     *chunk_word_size = Metaspace::first_chunk_word_size();
1750     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1751     break;
1752   case Metaspace::ROMetaspaceType:
1753     *chunk_word_size = SharedReadOnlySize / wordSize;
1754     *class_chunk_word_size = ClassSpecializedChunk;
1755     break;
1756   case Metaspace::ReadWriteMetaspaceType:
1757     *chunk_word_size = SharedReadWriteSize / wordSize;
1758     *class_chunk_word_size = ClassSpecializedChunk;
1759     break;
1760   case Metaspace::AnonymousMetaspaceType:
1761   case Metaspace::ReflectionMetaspaceType:
1762     *chunk_word_size = SpecializedChunk;
1763     *class_chunk_word_size = ClassSpecializedChunk;
1764     break;
1765   default:
1766     *chunk_word_size = SmallChunk;
1767     *class_chunk_word_size = ClassSmallChunk;
1768     break;
1769   }
1770   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1771     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1772             " class " SIZE_FORMAT,
1773             *chunk_word_size, *class_chunk_word_size));
1774 }
1775 
1776 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1777   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1778   size_t free = 0;
1779   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1780     Metachunk* chunk = chunks_in_use(i);
1781     while (chunk != NULL) {
1782       free += chunk->free_word_size();
1783       chunk = chunk->next();
1784     }
1785   }
1786   return free;
1787 }
1788 
1789 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1790   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1791   size_t result = 0;
1792   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1793    result += sum_waste_in_chunks_in_use(i);
1794   }
1795 
1796   return result;
1797 }
1798 
1799 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1800   size_t result = 0;
1801   Metachunk* chunk = chunks_in_use(index);
1802   // Count the free space in all the chunk but not the
1803   // current chunk from which allocations are still being done.
1804   while (chunk != NULL) {
1805     if (chunk != current_chunk()) {
1806       result += chunk->free_word_size();
1807     }
1808     chunk = chunk->next();
1809   }
1810   return result;
1811 }
1812 
1813 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1814   // For CMS use "allocated_chunks_words()" which does not need the
1815   // Metaspace lock.  For the other collectors sum over the
1816   // lists.  Use both methods as a check that "allocated_chunks_words()"
1817   // is correct.  That is, sum_capacity_in_chunks() is too expensive
1818   // to use in the product and allocated_chunks_words() should be used
1819   // but allow for  checking that allocated_chunks_words() returns the same
1820   // value as sum_capacity_in_chunks_in_use() which is the definitive
1821   // answer.
1822   if (UseConcMarkSweepGC) {
1823     return allocated_chunks_words();
1824   } else {
1825     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1826     size_t sum = 0;
1827     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1828       Metachunk* chunk = chunks_in_use(i);
1829       while (chunk != NULL) {
1830         sum += chunk->word_size();
1831         chunk = chunk->next();
1832       }
1833     }
1834   return sum;
1835   }
1836 }
1837 
1838 size_t SpaceManager::sum_count_in_chunks_in_use() {
1839   size_t count = 0;
1840   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1841     count = count + sum_count_in_chunks_in_use(i);
1842   }
1843 
1844   return count;
1845 }
1846 
1847 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1848   size_t count = 0;
1849   Metachunk* chunk = chunks_in_use(i);
1850   while (chunk != NULL) {
1851     count++;
1852     chunk = chunk->next();
1853   }
1854   return count;
1855 }
1856 
1857 
1858 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1859   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1860   size_t used = 0;
1861   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1862     Metachunk* chunk = chunks_in_use(i);
1863     while (chunk != NULL) {
1864       used += chunk->used_word_size();
1865       chunk = chunk->next();
1866     }
1867   }
1868   return used;
1869 }
1870 
1871 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1872 
1873   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1874     Metachunk* chunk = chunks_in_use(i);
1875     st->print("SpaceManager: %s " PTR_FORMAT,
1876                  chunk_size_name(i), chunk);
1877     if (chunk != NULL) {
1878       st->print_cr(" free " SIZE_FORMAT,
1879                    chunk->free_word_size());
1880     } else {
1881       st->print_cr("");
1882     }
1883   }
1884 
1885   chunk_manager()->locked_print_free_chunks(st);
1886   chunk_manager()->locked_print_sum_free_chunks(st);
1887 }
1888 
1889 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1890 
1891   // Decide between a small chunk and a medium chunk.  Up to
1892   // _small_chunk_limit small chunks can be allocated but
1893   // once a medium chunk has been allocated, no more small
1894   // chunks will be allocated.
1895   size_t chunk_word_size;
1896   if (chunks_in_use(MediumIndex) == NULL &&
1897       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1898     chunk_word_size = (size_t) small_chunk_size();
1899     if (word_size + Metachunk::overhead() > small_chunk_size()) {
1900       chunk_word_size = medium_chunk_size();
1901     }
1902   } else {
1903     chunk_word_size = medium_chunk_size();
1904   }
1905 
1906   // Might still need a humongous chunk.  Enforce an
1907   // eight word granularity to facilitate reuse (some
1908   // wastage but better chance of reuse).
1909   size_t if_humongous_sized_chunk =
1910     align_size_up(word_size + Metachunk::overhead(),
1911                   HumongousChunkGranularity);
1912   chunk_word_size =
1913     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1914 
1915   assert(!SpaceManager::is_humongous(word_size) ||
1916          chunk_word_size == if_humongous_sized_chunk,
1917          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1918                  " chunk_word_size " SIZE_FORMAT,
1919                  word_size, chunk_word_size));
1920   if (TraceMetadataHumongousAllocation &&
1921       SpaceManager::is_humongous(word_size)) {
1922     gclog_or_tty->print_cr("Metadata humongous allocation:");
1923     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
1924     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
1925                            chunk_word_size);
1926     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
1927                            Metachunk::overhead());
1928   }
1929   return chunk_word_size;
1930 }
1931 
1932 void SpaceManager::track_metaspace_memory_usage() {
1933   if (is_init_completed()) {
1934     if (is_class()) {
1935       MemoryService::track_compressed_class_memory_usage();
1936     }
1937     MemoryService::track_metaspace_memory_usage();
1938   }
1939 }
1940 
1941 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1942   assert(vs_list()->current_virtual_space() != NULL,
1943          "Should have been set");
1944   assert(current_chunk() == NULL ||
1945          current_chunk()->allocate(word_size) == NULL,
1946          "Don't need to expand");
1947   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1948 
1949   if (TraceMetadataChunkAllocation && Verbose) {
1950     size_t words_left = 0;
1951     size_t words_used = 0;
1952     if (current_chunk() != NULL) {
1953       words_left = current_chunk()->free_word_size();
1954       words_used = current_chunk()->used_word_size();
1955     }
1956     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1957                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
1958                            " words left",
1959                             word_size, words_used, words_left);
1960   }
1961 
1962   // Get another chunk out of the virtual space
1963   size_t grow_chunks_by_words = calc_chunk_size(word_size);
1964   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
1965 
1966   MetaWord* mem = NULL;
1967 
1968   // If a chunk was available, add it to the in-use chunk list
1969   // and do an allocation from it.
1970   if (next != NULL) {
1971     // Add to this manager's list of chunks in use.
1972     add_chunk(next, false);
1973     mem = next->allocate(word_size);
1974   }
1975 
1976   // Track metaspace memory usage statistic.
1977   track_metaspace_memory_usage();
1978 
1979   return mem;
1980 }
1981 
1982 void SpaceManager::print_on(outputStream* st) const {
1983 
1984   for (ChunkIndex i = ZeroIndex;
1985        i < NumberOfInUseLists ;
1986        i = next_chunk_index(i) ) {
1987     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
1988                  chunks_in_use(i),
1989                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
1990   }
1991   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
1992                " Humongous " SIZE_FORMAT,
1993                sum_waste_in_chunks_in_use(SmallIndex),
1994                sum_waste_in_chunks_in_use(MediumIndex),
1995                sum_waste_in_chunks_in_use(HumongousIndex));
1996   // block free lists
1997   if (block_freelists() != NULL) {
1998     st->print_cr("total in block free lists " SIZE_FORMAT,
1999       block_freelists()->total_size());
2000   }
2001 }
2002 
2003 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2004                            Mutex* lock) :
2005   _mdtype(mdtype),
2006   _allocated_blocks_words(0),
2007   _allocated_chunks_words(0),
2008   _allocated_chunks_count(0),
2009   _lock(lock)
2010 {
2011   initialize();
2012 }
2013 
2014 void SpaceManager::inc_size_metrics(size_t words) {
2015   assert_lock_strong(SpaceManager::expand_lock());
2016   // Total of allocated Metachunks and allocated Metachunks count
2017   // for each SpaceManager
2018   _allocated_chunks_words = _allocated_chunks_words + words;
2019   _allocated_chunks_count++;
2020   // Global total of capacity in allocated Metachunks
2021   MetaspaceAux::inc_capacity(mdtype(), words);
2022   // Global total of allocated Metablocks.
2023   // used_words_slow() includes the overhead in each
2024   // Metachunk so include it in the used when the
2025   // Metachunk is first added (so only added once per
2026   // Metachunk).
2027   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2028 }
2029 
2030 void SpaceManager::inc_used_metrics(size_t words) {
2031   // Add to the per SpaceManager total
2032   Atomic::add_ptr(words, &_allocated_blocks_words);
2033   // Add to the global total
2034   MetaspaceAux::inc_used(mdtype(), words);
2035 }
2036 
2037 void SpaceManager::dec_total_from_size_metrics() {
2038   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2039   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2040   // Also deduct the overhead per Metachunk
2041   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2042 }
2043 
2044 void SpaceManager::initialize() {
2045   Metadebug::init_allocation_fail_alot_count();
2046   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2047     _chunks_in_use[i] = NULL;
2048   }
2049   _current_chunk = NULL;
2050   if (TraceMetadataChunkAllocation && Verbose) {
2051     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2052   }
2053 }
2054 
2055 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2056   if (chunks == NULL) {
2057     return;
2058   }
2059   ChunkList* list = free_chunks(index);
2060   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2061   assert_lock_strong(SpaceManager::expand_lock());
2062   Metachunk* cur = chunks;
2063 
2064   // This returns chunks one at a time.  If a new
2065   // class List can be created that is a base class
2066   // of FreeList then something like FreeList::prepend()
2067   // can be used in place of this loop
2068   while (cur != NULL) {
2069     assert(cur->container() != NULL, "Container should have been set");
2070     cur->container()->dec_container_count();
2071     // Capture the next link before it is changed
2072     // by the call to return_chunk_at_head();
2073     Metachunk* next = cur->next();
2074     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2075     list->return_chunk_at_head(cur);
2076     cur = next;
2077   }
2078 }
2079 
2080 SpaceManager::~SpaceManager() {
2081   // This call this->_lock which can't be done while holding expand_lock()
2082   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2083     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2084             " allocated_chunks_words() " SIZE_FORMAT,
2085             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2086 
2087   MutexLockerEx fcl(SpaceManager::expand_lock(),
2088                     Mutex::_no_safepoint_check_flag);
2089 
2090   chunk_manager()->slow_locked_verify();
2091 
2092   dec_total_from_size_metrics();
2093 
2094   if (TraceMetadataChunkAllocation && Verbose) {
2095     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2096     locked_print_chunks_in_use_on(gclog_or_tty);
2097   }
2098 
2099   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2100   // is during the freeing of a VirtualSpaceNodes.
2101 
2102   // Have to update before the chunks_in_use lists are emptied
2103   // below.
2104   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2105                                          sum_count_in_chunks_in_use());
2106 
2107   // Add all the chunks in use by this space manager
2108   // to the global list of free chunks.
2109 
2110   // Follow each list of chunks-in-use and add them to the
2111   // free lists.  Each list is NULL terminated.
2112 
2113   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2114     if (TraceMetadataChunkAllocation && Verbose) {
2115       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2116                              sum_count_in_chunks_in_use(i),
2117                              chunk_size_name(i));
2118     }
2119     Metachunk* chunks = chunks_in_use(i);
2120     chunk_manager()->return_chunks(i, chunks);
2121     set_chunks_in_use(i, NULL);
2122     if (TraceMetadataChunkAllocation && Verbose) {
2123       gclog_or_tty->print_cr("updated freelist count %d %s",
2124                              chunk_manager()->free_chunks(i)->count(),
2125                              chunk_size_name(i));
2126     }
2127     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2128   }
2129 
2130   // The medium chunk case may be optimized by passing the head and
2131   // tail of the medium chunk list to add_at_head().  The tail is often
2132   // the current chunk but there are probably exceptions.
2133 
2134   // Humongous chunks
2135   if (TraceMetadataChunkAllocation && Verbose) {
2136     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2137                             sum_count_in_chunks_in_use(HumongousIndex),
2138                             chunk_size_name(HumongousIndex));
2139     gclog_or_tty->print("Humongous chunk dictionary: ");
2140   }
2141   // Humongous chunks are never the current chunk.
2142   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2143 
2144   while (humongous_chunks != NULL) {
2145 #ifdef ASSERT
2146     humongous_chunks->set_is_tagged_free(true);
2147 #endif
2148     if (TraceMetadataChunkAllocation && Verbose) {
2149       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2150                           humongous_chunks,
2151                           humongous_chunks->word_size());
2152     }
2153     assert(humongous_chunks->word_size() == (size_t)
2154            align_size_up(humongous_chunks->word_size(),
2155                              HumongousChunkGranularity),
2156            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2157                    " granularity %d",
2158                    humongous_chunks->word_size(), HumongousChunkGranularity));
2159     Metachunk* next_humongous_chunks = humongous_chunks->next();
2160     humongous_chunks->container()->dec_container_count();
2161     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2162     humongous_chunks = next_humongous_chunks;
2163   }
2164   if (TraceMetadataChunkAllocation && Verbose) {
2165     gclog_or_tty->print_cr("");
2166     gclog_or_tty->print_cr("updated dictionary count %d %s",
2167                      chunk_manager()->humongous_dictionary()->total_count(),
2168                      chunk_size_name(HumongousIndex));
2169   }
2170   chunk_manager()->slow_locked_verify();
2171 }
2172 
2173 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2174   switch (index) {
2175     case SpecializedIndex:
2176       return "Specialized";
2177     case SmallIndex:
2178       return "Small";
2179     case MediumIndex:
2180       return "Medium";
2181     case HumongousIndex:
2182       return "Humongous";
2183     default:
2184       return NULL;
2185   }
2186 }
2187 
2188 ChunkIndex ChunkManager::list_index(size_t size) {
2189   switch (size) {
2190     case SpecializedChunk:
2191       assert(SpecializedChunk == ClassSpecializedChunk,
2192              "Need branch for ClassSpecializedChunk");
2193       return SpecializedIndex;
2194     case SmallChunk:
2195     case ClassSmallChunk:
2196       return SmallIndex;
2197     case MediumChunk:
2198     case ClassMediumChunk:
2199       return MediumIndex;
2200     default:
2201       assert(size > MediumChunk || size > ClassMediumChunk,
2202              "Not a humongous chunk");
2203       return HumongousIndex;
2204   }
2205 }
2206 
2207 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2208   assert_lock_strong(_lock);
2209   size_t raw_word_size = get_raw_word_size(word_size);
2210   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2211   assert(raw_word_size >= min_size,
2212          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2213   block_freelists()->return_block(p, raw_word_size);
2214 }
2215 
2216 // Adds a chunk to the list of chunks in use.
2217 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2218 
2219   assert(new_chunk != NULL, "Should not be NULL");
2220   assert(new_chunk->next() == NULL, "Should not be on a list");
2221 
2222   new_chunk->reset_empty();
2223 
2224   // Find the correct list and and set the current
2225   // chunk for that list.
2226   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2227 
2228   if (index != HumongousIndex) {
2229     retire_current_chunk();
2230     set_current_chunk(new_chunk);
2231     new_chunk->set_next(chunks_in_use(index));
2232     set_chunks_in_use(index, new_chunk);
2233   } else {
2234     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2235     // small, so small will be null.  Link this first chunk as the current
2236     // chunk.
2237     if (make_current) {
2238       // Set as the current chunk but otherwise treat as a humongous chunk.
2239       set_current_chunk(new_chunk);
2240     }
2241     // Link at head.  The _current_chunk only points to a humongous chunk for
2242     // the null class loader metaspace (class and data virtual space managers)
2243     // any humongous chunks so will not point to the tail
2244     // of the humongous chunks list.
2245     new_chunk->set_next(chunks_in_use(HumongousIndex));
2246     set_chunks_in_use(HumongousIndex, new_chunk);
2247 
2248     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2249   }
2250 
2251   // Add to the running sum of capacity
2252   inc_size_metrics(new_chunk->word_size());
2253 
2254   assert(new_chunk->is_empty(), "Not ready for reuse");
2255   if (TraceMetadataChunkAllocation && Verbose) {
2256     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2257                         sum_count_in_chunks_in_use());
2258     new_chunk->print_on(gclog_or_tty);
2259     chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2260   }
2261 }
2262 
2263 void SpaceManager::retire_current_chunk() {
2264   if (current_chunk() != NULL) {
2265     size_t remaining_words = current_chunk()->free_word_size();
2266     if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
2267       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2268       inc_used_metrics(remaining_words);
2269     }
2270   }
2271 }
2272 
2273 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2274                                        size_t grow_chunks_by_words) {
2275   // Get a chunk from the chunk freelist
2276   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2277 
2278   if (next == NULL) {
2279     next = vs_list()->get_new_chunk(word_size,
2280                                     grow_chunks_by_words,
2281                                     medium_chunk_bunch());
2282   }
2283 
2284   if (TraceMetadataHumongousAllocation && next != NULL &&
2285       SpaceManager::is_humongous(next->word_size())) {
2286     gclog_or_tty->print_cr("  new humongous chunk word size "
2287                            PTR_FORMAT, next->word_size());
2288   }
2289 
2290   return next;
2291 }
2292 
2293 MetaWord* SpaceManager::allocate(size_t word_size) {
2294   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2295 
2296   size_t raw_word_size = get_raw_word_size(word_size);
2297   BlockFreelist* fl =  block_freelists();
2298   MetaWord* p = NULL;
2299   // Allocation from the dictionary is expensive in the sense that
2300   // the dictionary has to be searched for a size.  Don't allocate
2301   // from the dictionary until it starts to get fat.  Is this
2302   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2303   // for allocations.  Do some profiling.  JJJ
2304   if (fl->total_size() > allocation_from_dictionary_limit) {
2305     p = fl->get_block(raw_word_size);
2306   }
2307   if (p == NULL) {
2308     p = allocate_work(raw_word_size);
2309   }
2310 
2311   return p;
2312 }
2313 
2314 // Returns the address of spaced allocated for "word_size".
2315 // This methods does not know about blocks (Metablocks)
2316 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2317   assert_lock_strong(_lock);
2318 #ifdef ASSERT
2319   if (Metadebug::test_metadata_failure()) {
2320     return NULL;
2321   }
2322 #endif
2323   // Is there space in the current chunk?
2324   MetaWord* result = NULL;
2325 
2326   // For DumpSharedSpaces, only allocate out of the current chunk which is
2327   // never null because we gave it the size we wanted.   Caller reports out
2328   // of memory if this returns null.
2329   if (DumpSharedSpaces) {
2330     assert(current_chunk() != NULL, "should never happen");
2331     inc_used_metrics(word_size);
2332     return current_chunk()->allocate(word_size); // caller handles null result
2333   }
2334 
2335   if (current_chunk() != NULL) {
2336     result = current_chunk()->allocate(word_size);
2337   }
2338 
2339   if (result == NULL) {
2340     result = grow_and_allocate(word_size);
2341   }
2342 
2343   if (result != NULL) {
2344     inc_used_metrics(word_size);
2345     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2346            "Head of the list is being allocated");
2347   }
2348 
2349   return result;
2350 }
2351 
2352 void SpaceManager::verify() {
2353   // If there are blocks in the dictionary, then
2354   // verfication of chunks does not work since
2355   // being in the dictionary alters a chunk.
2356   if (block_freelists()->total_size() == 0) {
2357     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2358       Metachunk* curr = chunks_in_use(i);
2359       while (curr != NULL) {
2360         curr->verify();
2361         verify_chunk_size(curr);
2362         curr = curr->next();
2363       }
2364     }
2365   }
2366 }
2367 
2368 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2369   assert(is_humongous(chunk->word_size()) ||
2370          chunk->word_size() == medium_chunk_size() ||
2371          chunk->word_size() == small_chunk_size() ||
2372          chunk->word_size() == specialized_chunk_size(),
2373          "Chunk size is wrong");
2374   return;
2375 }
2376 
2377 #ifdef ASSERT
2378 void SpaceManager::verify_allocated_blocks_words() {
2379   // Verification is only guaranteed at a safepoint.
2380   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2381     "Verification can fail if the applications is running");
2382   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2383     err_msg("allocation total is not consistent " SIZE_FORMAT
2384             " vs " SIZE_FORMAT,
2385             allocated_blocks_words(), sum_used_in_chunks_in_use()));
2386 }
2387 
2388 #endif
2389 
2390 void SpaceManager::dump(outputStream* const out) const {
2391   size_t curr_total = 0;
2392   size_t waste = 0;
2393   uint i = 0;
2394   size_t used = 0;
2395   size_t capacity = 0;
2396 
2397   // Add up statistics for all chunks in this SpaceManager.
2398   for (ChunkIndex index = ZeroIndex;
2399        index < NumberOfInUseLists;
2400        index = next_chunk_index(index)) {
2401     for (Metachunk* curr = chunks_in_use(index);
2402          curr != NULL;
2403          curr = curr->next()) {
2404       out->print("%d) ", i++);
2405       curr->print_on(out);
2406       curr_total += curr->word_size();
2407       used += curr->used_word_size();
2408       capacity += curr->word_size();
2409       waste += curr->free_word_size() + curr->overhead();;
2410     }
2411   }
2412 
2413   if (TraceMetadataChunkAllocation && Verbose) {
2414     block_freelists()->print_on(out);
2415   }
2416 
2417   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2418   // Free space isn't wasted.
2419   waste -= free;
2420 
2421   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2422                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2423                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2424 }
2425 
2426 #ifndef PRODUCT
2427 void SpaceManager::mangle_freed_chunks() {
2428   for (ChunkIndex index = ZeroIndex;
2429        index < NumberOfInUseLists;
2430        index = next_chunk_index(index)) {
2431     for (Metachunk* curr = chunks_in_use(index);
2432          curr != NULL;
2433          curr = curr->next()) {
2434       curr->mangle();
2435     }
2436   }
2437 }
2438 #endif // PRODUCT
2439 
2440 // MetaspaceAux
2441 
2442 
2443 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2444 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2445 
2446 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2447   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2448   return list == NULL ? 0 : list->free_bytes();
2449 }
2450 
2451 size_t MetaspaceAux::free_bytes() {
2452   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2453 }
2454 
2455 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2456   assert_lock_strong(SpaceManager::expand_lock());
2457   assert(words <= allocated_capacity_words(mdtype),
2458     err_msg("About to decrement below 0: words " SIZE_FORMAT
2459             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2460             words, mdtype, allocated_capacity_words(mdtype)));
2461   _allocated_capacity_words[mdtype] -= words;
2462 }
2463 
2464 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2465   assert_lock_strong(SpaceManager::expand_lock());
2466   // Needs to be atomic
2467   _allocated_capacity_words[mdtype] += words;
2468 }
2469 
2470 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2471   assert(words <= allocated_used_words(mdtype),
2472     err_msg("About to decrement below 0: words " SIZE_FORMAT
2473             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2474             words, mdtype, allocated_used_words(mdtype)));
2475   // For CMS deallocation of the Metaspaces occurs during the
2476   // sweep which is a concurrent phase.  Protection by the expand_lock()
2477   // is not enough since allocation is on a per Metaspace basis
2478   // and protected by the Metaspace lock.
2479   jlong minus_words = (jlong) - (jlong) words;
2480   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2481 }
2482 
2483 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2484   // _allocated_used_words tracks allocations for
2485   // each piece of metadata.  Those allocations are
2486   // generally done concurrently by different application
2487   // threads so must be done atomically.
2488   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2489 }
2490 
2491 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2492   size_t used = 0;
2493   ClassLoaderDataGraphMetaspaceIterator iter;
2494   while (iter.repeat()) {
2495     Metaspace* msp = iter.get_next();
2496     // Sum allocated_blocks_words for each metaspace
2497     if (msp != NULL) {
2498       used += msp->used_words_slow(mdtype);
2499     }
2500   }
2501   return used * BytesPerWord;
2502 }
2503 
2504 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2505   size_t free = 0;
2506   ClassLoaderDataGraphMetaspaceIterator iter;
2507   while (iter.repeat()) {
2508     Metaspace* msp = iter.get_next();
2509     if (msp != NULL) {
2510       free += msp->free_words_slow(mdtype);
2511     }
2512   }
2513   return free * BytesPerWord;
2514 }
2515 
2516 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2517   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2518     return 0;
2519   }
2520   // Don't count the space in the freelists.  That space will be
2521   // added to the capacity calculation as needed.
2522   size_t capacity = 0;
2523   ClassLoaderDataGraphMetaspaceIterator iter;
2524   while (iter.repeat()) {
2525     Metaspace* msp = iter.get_next();
2526     if (msp != NULL) {
2527       capacity += msp->capacity_words_slow(mdtype);
2528     }
2529   }
2530   return capacity * BytesPerWord;
2531 }
2532 
2533 size_t MetaspaceAux::capacity_bytes_slow() {
2534 #ifdef PRODUCT
2535   // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2536   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2537 #endif
2538   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2539   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2540   assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2541       err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2542         " class_capacity + non_class_capacity " SIZE_FORMAT
2543         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2544         allocated_capacity_bytes(), class_capacity + non_class_capacity,
2545         class_capacity, non_class_capacity));
2546 
2547   return class_capacity + non_class_capacity;
2548 }
2549 
2550 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2551   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2552   return list == NULL ? 0 : list->reserved_bytes();
2553 }
2554 
2555 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2556   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2557   return list == NULL ? 0 : list->committed_bytes();
2558 }
2559 
2560 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2561 
2562 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2563   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2564   if (chunk_manager == NULL) {
2565     return 0;
2566   }
2567   chunk_manager->slow_verify();
2568   return chunk_manager->free_chunks_total_words();
2569 }
2570 
2571 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2572   return free_chunks_total_words(mdtype) * BytesPerWord;
2573 }
2574 
2575 size_t MetaspaceAux::free_chunks_total_words() {
2576   return free_chunks_total_words(Metaspace::ClassType) +
2577          free_chunks_total_words(Metaspace::NonClassType);
2578 }
2579 
2580 size_t MetaspaceAux::free_chunks_total_bytes() {
2581   return free_chunks_total_words() * BytesPerWord;
2582 }
2583 
2584 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2585   gclog_or_tty->print(", [Metaspace:");
2586   if (PrintGCDetails && Verbose) {
2587     gclog_or_tty->print(" "  SIZE_FORMAT
2588                         "->" SIZE_FORMAT
2589                         "("  SIZE_FORMAT ")",
2590                         prev_metadata_used,
2591                         allocated_used_bytes(),
2592                         reserved_bytes());
2593   } else {
2594     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2595                         "->" SIZE_FORMAT "K"
2596                         "("  SIZE_FORMAT "K)",
2597                         prev_metadata_used/K,
2598                         allocated_used_bytes()/K,
2599                         reserved_bytes()/K);
2600   }
2601 
2602   gclog_or_tty->print("]");
2603 }
2604 
2605 // This is printed when PrintGCDetails
2606 void MetaspaceAux::print_on(outputStream* out) {
2607   Metaspace::MetadataType nct = Metaspace::NonClassType;
2608 
2609   out->print_cr(" Metaspace       "
2610                 "used "      SIZE_FORMAT "K, "
2611                 "capacity "  SIZE_FORMAT "K, "
2612                 "committed " SIZE_FORMAT "K, "
2613                 "reserved "  SIZE_FORMAT "K",
2614                 allocated_used_bytes()/K,
2615                 allocated_capacity_bytes()/K,
2616                 committed_bytes()/K,
2617                 reserved_bytes()/K);
2618 
2619   if (Metaspace::using_class_space()) {
2620     Metaspace::MetadataType ct = Metaspace::ClassType;
2621     out->print_cr("  class space    "
2622                   "used "      SIZE_FORMAT "K, "
2623                   "capacity "  SIZE_FORMAT "K, "
2624                   "committed " SIZE_FORMAT "K, "
2625                   "reserved "  SIZE_FORMAT "K",
2626                   allocated_used_bytes(ct)/K,
2627                   allocated_capacity_bytes(ct)/K,
2628                   committed_bytes(ct)/K,
2629                   reserved_bytes(ct)/K);
2630   }
2631 }
2632 
2633 // Print information for class space and data space separately.
2634 // This is almost the same as above.
2635 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2636   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2637   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2638   size_t used_bytes = used_bytes_slow(mdtype);
2639   size_t free_bytes = free_bytes_slow(mdtype);
2640   size_t used_and_free = used_bytes + free_bytes +
2641                            free_chunks_capacity_bytes;
2642   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2643              "K + unused in chunks " SIZE_FORMAT "K  + "
2644              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2645              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2646              used_bytes / K,
2647              free_bytes / K,
2648              free_chunks_capacity_bytes / K,
2649              used_and_free / K,
2650              capacity_bytes / K);
2651   // Accounting can only be correct if we got the values during a safepoint
2652   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2653 }
2654 
2655 // Print total fragmentation for class metaspaces
2656 void MetaspaceAux::print_class_waste(outputStream* out) {
2657   assert(Metaspace::using_class_space(), "class metaspace not used");
2658   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2659   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2660   ClassLoaderDataGraphMetaspaceIterator iter;
2661   while (iter.repeat()) {
2662     Metaspace* msp = iter.get_next();
2663     if (msp != NULL) {
2664       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2665       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2666       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2667       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2668       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2669       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2670       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2671     }
2672   }
2673   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2674                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2675                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2676                 "large count " SIZE_FORMAT,
2677                 cls_specialized_count, cls_specialized_waste,
2678                 cls_small_count, cls_small_waste,
2679                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2680 }
2681 
2682 // Print total fragmentation for data and class metaspaces separately
2683 void MetaspaceAux::print_waste(outputStream* out) {
2684   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2685   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2686 
2687   ClassLoaderDataGraphMetaspaceIterator iter;
2688   while (iter.repeat()) {
2689     Metaspace* msp = iter.get_next();
2690     if (msp != NULL) {
2691       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2692       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2693       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2694       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2695       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2696       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2697       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2698     }
2699   }
2700   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2701   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2702                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2703                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2704                         "large count " SIZE_FORMAT,
2705              specialized_count, specialized_waste, small_count,
2706              small_waste, medium_count, medium_waste, humongous_count);
2707   if (Metaspace::using_class_space()) {
2708     print_class_waste(out);
2709   }
2710 }
2711 
2712 // Dump global metaspace things from the end of ClassLoaderDataGraph
2713 void MetaspaceAux::dump(outputStream* out) {
2714   out->print_cr("All Metaspace:");
2715   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2716   out->print("class space: "); print_on(out, Metaspace::ClassType);
2717   print_waste(out);
2718 }
2719 
2720 void MetaspaceAux::verify_free_chunks() {
2721   Metaspace::chunk_manager_metadata()->verify();
2722   if (Metaspace::using_class_space()) {
2723     Metaspace::chunk_manager_class()->verify();
2724   }
2725 }
2726 
2727 void MetaspaceAux::verify_capacity() {
2728 #ifdef ASSERT
2729   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2730   // For purposes of the running sum of capacity, verify against capacity
2731   size_t capacity_in_use_bytes = capacity_bytes_slow();
2732   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2733     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2734             " capacity_bytes_slow()" SIZE_FORMAT,
2735             running_sum_capacity_bytes, capacity_in_use_bytes));
2736   for (Metaspace::MetadataType i = Metaspace::ClassType;
2737        i < Metaspace:: MetadataTypeCount;
2738        i = (Metaspace::MetadataType)(i + 1)) {
2739     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2740     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2741       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2742               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2743               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2744   }
2745 #endif
2746 }
2747 
2748 void MetaspaceAux::verify_used() {
2749 #ifdef ASSERT
2750   size_t running_sum_used_bytes = allocated_used_bytes();
2751   // For purposes of the running sum of used, verify against used
2752   size_t used_in_use_bytes = used_bytes_slow();
2753   assert(allocated_used_bytes() == used_in_use_bytes,
2754     err_msg("allocated_used_bytes() " SIZE_FORMAT
2755             " used_bytes_slow()" SIZE_FORMAT,
2756             allocated_used_bytes(), used_in_use_bytes));
2757   for (Metaspace::MetadataType i = Metaspace::ClassType;
2758        i < Metaspace:: MetadataTypeCount;
2759        i = (Metaspace::MetadataType)(i + 1)) {
2760     size_t used_in_use_bytes = used_bytes_slow(i);
2761     assert(allocated_used_bytes(i) == used_in_use_bytes,
2762       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2763               " used_bytes_slow(%u)" SIZE_FORMAT,
2764               i, allocated_used_bytes(i), i, used_in_use_bytes));
2765   }
2766 #endif
2767 }
2768 
2769 void MetaspaceAux::verify_metrics() {
2770   verify_capacity();
2771   verify_used();
2772 }
2773 
2774 
2775 // Metaspace methods
2776 
2777 size_t Metaspace::_first_chunk_word_size = 0;
2778 size_t Metaspace::_first_class_chunk_word_size = 0;
2779 
2780 size_t Metaspace::_commit_alignment = 0;
2781 size_t Metaspace::_reserve_alignment = 0;
2782 
2783 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2784   initialize(lock, type);
2785 }
2786 
2787 Metaspace::~Metaspace() {
2788   delete _vsm;
2789   if (using_class_space()) {
2790     delete _class_vsm;
2791   }
2792 }
2793 
2794 VirtualSpaceList* Metaspace::_space_list = NULL;
2795 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2796 
2797 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2798 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2799 
2800 #define VIRTUALSPACEMULTIPLIER 2
2801 
2802 #ifdef _LP64
2803 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2804   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2805   // narrow_klass_base is the lower of the metaspace base and the cds base
2806   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2807   // between the lower base and higher address.
2808   address lower_base;
2809   address higher_address;
2810   if (UseSharedSpaces) {
2811     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2812                           (address)(metaspace_base + class_metaspace_size()));
2813     lower_base = MIN2(metaspace_base, cds_base);
2814   } else {
2815     higher_address = metaspace_base + class_metaspace_size();
2816     lower_base = metaspace_base;
2817   }
2818   Universe::set_narrow_klass_base(lower_base);
2819   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
2820     Universe::set_narrow_klass_shift(0);
2821   } else {
2822     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2823     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2824   }
2825 }
2826 
2827 // Return TRUE if the specified metaspace_base and cds_base are close enough
2828 // to work with compressed klass pointers.
2829 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2830   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2831   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2832   address lower_base = MIN2((address)metaspace_base, cds_base);
2833   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2834                                 (address)(metaspace_base + class_metaspace_size()));
2835   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2836 }
2837 
2838 // Try to allocate the metaspace at the requested addr.
2839 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2840   assert(using_class_space(), "called improperly");
2841   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2842   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2843          "Metaspace size is too big");
2844   assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
2845   assert_is_ptr_aligned(cds_base,                _reserve_alignment);
2846   assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
2847 
2848   // Don't use large pages for the class space.
2849   bool large_pages = false;
2850 
2851   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2852                                              _reserve_alignment,
2853                                              large_pages,
2854                                              requested_addr, 0);
2855   if (!metaspace_rs.is_reserved()) {
2856     if (UseSharedSpaces) {
2857       size_t increment = align_size_up(1*G, _reserve_alignment);
2858 
2859       // Keep trying to allocate the metaspace, increasing the requested_addr
2860       // by 1GB each time, until we reach an address that will no longer allow
2861       // use of CDS with compressed klass pointers.
2862       char *addr = requested_addr;
2863       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2864              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2865         addr = addr + increment;
2866         metaspace_rs = ReservedSpace(class_metaspace_size(),
2867                                      _reserve_alignment, large_pages, addr, 0);
2868       }
2869     }
2870 
2871     // If no successful allocation then try to allocate the space anywhere.  If
2872     // that fails then OOM doom.  At this point we cannot try allocating the
2873     // metaspace as if UseCompressedClassPointers is off because too much
2874     // initialization has happened that depends on UseCompressedClassPointers.
2875     // So, UseCompressedClassPointers cannot be turned off at this point.
2876     if (!metaspace_rs.is_reserved()) {
2877       metaspace_rs = ReservedSpace(class_metaspace_size(),
2878                                    _reserve_alignment, large_pages);
2879       if (!metaspace_rs.is_reserved()) {
2880         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2881                                               class_metaspace_size()));
2882       }
2883     }
2884   }
2885 
2886   // If we got here then the metaspace got allocated.
2887   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
2888 
2889   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
2890   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
2891     FileMapInfo::stop_sharing_and_unmap(
2892         "Could not allocate metaspace at a compatible address");
2893   }
2894 
2895   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
2896                                   UseSharedSpaces ? (address)cds_base : 0);
2897 
2898   initialize_class_space(metaspace_rs);
2899 
2900   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2901     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2902                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2903     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2904                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
2905   }
2906 }
2907 
2908 // For UseCompressedClassPointers the class space is reserved above the top of
2909 // the Java heap.  The argument passed in is at the base of the compressed space.
2910 void Metaspace::initialize_class_space(ReservedSpace rs) {
2911   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2912   assert(rs.size() >= CompressedClassSpaceSize,
2913          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2914   assert(using_class_space(), "Must be using class space");
2915   _class_space_list = new VirtualSpaceList(rs);
2916   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
2917 
2918   if (!_class_space_list->initialization_succeeded()) {
2919     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
2920   }
2921 }
2922 
2923 #endif
2924 
2925 // Align down. If the aligning result in 0, return 'alignment'.
2926 static size_t restricted_align_down(size_t size, size_t alignment) {
2927   return MAX2(alignment, align_size_down_(size, alignment));
2928 }
2929 
2930 void Metaspace::ergo_initialize() {
2931   if (DumpSharedSpaces) {
2932     // Using large pages when dumping the shared archive is currently not implemented.
2933     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
2934   }
2935 
2936   size_t page_size = os::vm_page_size();
2937   if (UseLargePages && UseLargePagesInMetaspace) {
2938     page_size = os::large_page_size();
2939   }
2940 
2941   _commit_alignment  = page_size;
2942   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
2943 
2944   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
2945   // override if MaxMetaspaceSize was set on the command line or not.
2946   // This information is needed later to conform to the specification of the
2947   // java.lang.management.MemoryUsage API.
2948   //
2949   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
2950   // globals.hpp to the aligned value, but this is not possible, since the
2951   // alignment depends on other flags being parsed.
2952   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
2953 
2954   if (MetaspaceSize > MaxMetaspaceSize) {
2955     MetaspaceSize = MaxMetaspaceSize;
2956   }
2957 
2958   MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
2959 
2960   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
2961 
2962   if (MetaspaceSize < 256*K) {
2963     vm_exit_during_initialization("Too small initial Metaspace size");
2964   }
2965 
2966   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
2967   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
2968 
2969   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
2970   set_class_metaspace_size(CompressedClassSpaceSize);
2971 }
2972 
2973 void Metaspace::global_initialize() {
2974   // Initialize the alignment for shared spaces.
2975   int max_alignment = os::vm_page_size();
2976   size_t cds_total = 0;
2977 
2978   MetaspaceShared::set_max_alignment(max_alignment);
2979 
2980   if (DumpSharedSpaces) {
2981     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
2982     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2983     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
2984     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
2985 
2986     // Initialize with the sum of the shared space sizes.  The read-only
2987     // and read write metaspace chunks will be allocated out of this and the
2988     // remainder is the misc code and data chunks.
2989     cds_total = FileMapInfo::shared_spaces_size();
2990     cds_total = align_size_up(cds_total, _reserve_alignment);
2991     _space_list = new VirtualSpaceList(cds_total/wordSize);
2992     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
2993 
2994     if (!_space_list->initialization_succeeded()) {
2995       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
2996     }
2997 
2998 #ifdef _LP64
2999     if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
3000       vm_exit_during_initialization("Unable to dump shared archive.",
3001           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3002                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3003                   "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
3004                   cds_total + class_metaspace_size(), (size_t)max_juint));
3005     }
3006 
3007     // Set the compressed klass pointer base so that decoding of these pointers works
3008     // properly when creating the shared archive.
3009     assert(UseCompressedOops && UseCompressedClassPointers,
3010       "UseCompressedOops and UseCompressedClassPointers must be set");
3011     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3012     if (TraceMetavirtualspaceAllocation && Verbose) {
3013       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3014                              _space_list->current_virtual_space()->bottom());
3015     }
3016 
3017     Universe::set_narrow_klass_shift(0);
3018 #endif
3019 
3020   } else {
3021     // If using shared space, open the file that contains the shared space
3022     // and map in the memory before initializing the rest of metaspace (so
3023     // the addresses don't conflict)
3024     address cds_address = NULL;
3025     if (UseSharedSpaces) {
3026       FileMapInfo* mapinfo = new FileMapInfo();
3027       memset(mapinfo, 0, sizeof(FileMapInfo));
3028 
3029       // Open the shared archive file, read and validate the header. If
3030       // initialization fails, shared spaces [UseSharedSpaces] are
3031       // disabled and the file is closed.
3032       // Map in spaces now also
3033       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3034         FileMapInfo::set_current_info(mapinfo);
3035         cds_total = FileMapInfo::shared_spaces_size();
3036         cds_address = (address)mapinfo->region_base(0);
3037       } else {
3038         assert(!mapinfo->is_open() && !UseSharedSpaces,
3039                "archive file not closed or shared spaces not disabled.");
3040       }
3041     }
3042 
3043 #ifdef _LP64
3044     // If UseCompressedClassPointers is set then allocate the metaspace area
3045     // above the heap and above the CDS area (if it exists).
3046     if (using_class_space()) {
3047       if (UseSharedSpaces) {
3048         char* cds_end = (char*)(cds_address + cds_total);
3049         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3050         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3051       } else {
3052         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
3053       }
3054     }
3055 #endif
3056 
3057     // Initialize these before initializing the VirtualSpaceList
3058     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3059     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3060     // Make the first class chunk bigger than a medium chunk so it's not put
3061     // on the medium chunk list.   The next chunk will be small and progress
3062     // from there.  This size calculated by -version.
3063     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3064                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3065     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3066     // Arbitrarily set the initial virtual space to a multiple
3067     // of the boot class loader size.
3068     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3069     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3070 
3071     // Initialize the list of virtual spaces.
3072     _space_list = new VirtualSpaceList(word_size);
3073     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3074 
3075     if (!_space_list->initialization_succeeded()) {
3076       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3077     }
3078   }
3079 
3080   MetaspaceGC::initialize();
3081 }
3082 
3083 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3084                                                size_t chunk_word_size,
3085                                                size_t chunk_bunch) {
3086   // Get a chunk from the chunk freelist
3087   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3088   if (chunk != NULL) {
3089     return chunk;
3090   }
3091 
3092   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3093 }
3094 
3095 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3096 
3097   assert(space_list() != NULL,
3098     "Metadata VirtualSpaceList has not been initialized");
3099   assert(chunk_manager_metadata() != NULL,
3100     "Metadata ChunkManager has not been initialized");
3101 
3102   _vsm = new SpaceManager(NonClassType, lock);
3103   if (_vsm == NULL) {
3104     return;
3105   }
3106   size_t word_size;
3107   size_t class_word_size;
3108   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3109 
3110   if (using_class_space()) {
3111   assert(class_space_list() != NULL,
3112     "Class VirtualSpaceList has not been initialized");
3113   assert(chunk_manager_class() != NULL,
3114     "Class ChunkManager has not been initialized");
3115 
3116     // Allocate SpaceManager for classes.
3117     _class_vsm = new SpaceManager(ClassType, lock);
3118     if (_class_vsm == NULL) {
3119       return;
3120     }
3121   }
3122 
3123   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3124 
3125   // Allocate chunk for metadata objects
3126   Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3127                                                   word_size,
3128                                                   vsm()->medium_chunk_bunch());
3129   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3130   if (new_chunk != NULL) {
3131     // Add to this manager's list of chunks in use and current_chunk().
3132     vsm()->add_chunk(new_chunk, true);
3133   }
3134 
3135   // Allocate chunk for class metadata objects
3136   if (using_class_space()) {
3137     Metachunk* class_chunk = get_initialization_chunk(ClassType,
3138                                                       class_word_size,
3139                                                       class_vsm()->medium_chunk_bunch());
3140     if (class_chunk != NULL) {
3141       class_vsm()->add_chunk(class_chunk, true);
3142     }
3143   }
3144 
3145   _alloc_record_head = NULL;
3146   _alloc_record_tail = NULL;
3147 }
3148 
3149 size_t Metaspace::align_word_size_up(size_t word_size) {
3150   size_t byte_size = word_size * wordSize;
3151   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3152 }
3153 
3154 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3155   // DumpSharedSpaces doesn't use class metadata area (yet)
3156   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3157   if (is_class_space_allocation(mdtype)) {
3158     return  class_vsm()->allocate(word_size);
3159   } else {
3160     return  vsm()->allocate(word_size);
3161   }
3162 }
3163 
3164 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3165   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3166   assert(delta_bytes > 0, "Must be");
3167 
3168   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3169   size_t before_inc = after_inc - delta_bytes;
3170 
3171   if (PrintGCDetails && Verbose) {
3172     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3173         " to " SIZE_FORMAT, before_inc, after_inc);
3174   }
3175 
3176   return allocate(word_size, mdtype);
3177 }
3178 
3179 // Space allocated in the Metaspace.  This may
3180 // be across several metadata virtual spaces.
3181 char* Metaspace::bottom() const {
3182   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3183   return (char*)vsm()->current_chunk()->bottom();
3184 }
3185 
3186 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3187   if (mdtype == ClassType) {
3188     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3189   } else {
3190     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3191   }
3192 }
3193 
3194 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3195   if (mdtype == ClassType) {
3196     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3197   } else {
3198     return vsm()->sum_free_in_chunks_in_use();
3199   }
3200 }
3201 
3202 // Space capacity in the Metaspace.  It includes
3203 // space in the list of chunks from which allocations
3204 // have been made. Don't include space in the global freelist and
3205 // in the space available in the dictionary which
3206 // is already counted in some chunk.
3207 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3208   if (mdtype == ClassType) {
3209     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3210   } else {
3211     return vsm()->sum_capacity_in_chunks_in_use();
3212   }
3213 }
3214 
3215 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3216   return used_words_slow(mdtype) * BytesPerWord;
3217 }
3218 
3219 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3220   return capacity_words_slow(mdtype) * BytesPerWord;
3221 }
3222 
3223 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3224   if (SafepointSynchronize::is_at_safepoint()) {
3225     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3226     // Don't take Heap_lock
3227     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3228     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3229       // Dark matter.  Too small for dictionary.
3230 #ifdef ASSERT
3231       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3232 #endif
3233       return;
3234     }
3235     if (is_class && using_class_space()) {
3236       class_vsm()->deallocate(ptr, word_size);
3237     } else {
3238       vsm()->deallocate(ptr, word_size);
3239     }
3240   } else {
3241     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3242 
3243     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3244       // Dark matter.  Too small for dictionary.
3245 #ifdef ASSERT
3246       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3247 #endif
3248       return;
3249     }
3250     if (is_class && using_class_space()) {
3251       class_vsm()->deallocate(ptr, word_size);
3252     } else {
3253       vsm()->deallocate(ptr, word_size);
3254     }
3255   }
3256 }
3257 
3258 
3259 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3260                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3261   if (HAS_PENDING_EXCEPTION) {
3262     assert(false, "Should not allocate with exception pending");
3263     return NULL;  // caller does a CHECK_NULL too
3264   }
3265 
3266   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3267         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3268 
3269   // Allocate in metaspaces without taking out a lock, because it deadlocks
3270   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3271   // to revisit this for application class data sharing.
3272   if (DumpSharedSpaces) {
3273     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3274     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3275     MetaWord* result = space->allocate(word_size, NonClassType);
3276     if (result == NULL) {
3277       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3278     }
3279 
3280     space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3281 
3282     // Zero initialize.
3283     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3284 
3285     return result;
3286   }
3287 
3288   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3289 
3290   // Try to allocate metadata.
3291   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3292 
3293   if (result == NULL) {
3294     // Allocation failed.
3295     if (is_init_completed()) {
3296       // Only start a GC if the bootstrapping has completed.
3297 
3298       // Try to clean out some memory and retry.
3299       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3300           loader_data, word_size, mdtype);
3301     }
3302   }
3303 
3304   if (result == NULL) {
3305     report_metadata_oome(loader_data, word_size, mdtype, THREAD);
3306     // Will not reach here.
3307     return NULL;
3308   }
3309 
3310   // Zero initialize.
3311   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3312 
3313   return result;
3314 }
3315 
3316 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3317   // If result is still null, we are out of memory.
3318   if (Verbose && TraceMetadataChunkAllocation) {
3319     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3320         SIZE_FORMAT, word_size);
3321     if (loader_data->metaspace_or_null() != NULL) {
3322       loader_data->dump(gclog_or_tty);
3323     }
3324     MetaspaceAux::dump(gclog_or_tty);
3325   }
3326 
3327   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3328   const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
3329                                                                  "Metadata space";
3330   report_java_out_of_memory(space_string);
3331 
3332   if (JvmtiExport::should_post_resource_exhausted()) {
3333     JvmtiExport::post_resource_exhausted(
3334         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3335         space_string);
3336   }
3337 
3338   if (!is_init_completed()) {
3339     vm_exit_during_initialization("OutOfMemoryError", space_string);
3340   }
3341 
3342   if (is_class_space_allocation(mdtype)) {
3343     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3344   } else {
3345     THROW_OOP(Universe::out_of_memory_error_metaspace());
3346   }
3347 }
3348 
3349 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3350   assert(DumpSharedSpaces, "sanity");
3351 
3352   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3353   if (_alloc_record_head == NULL) {
3354     _alloc_record_head = _alloc_record_tail = rec;
3355   } else {
3356     _alloc_record_tail->_next = rec;
3357     _alloc_record_tail = rec;
3358   }
3359 }
3360 
3361 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3362   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3363 
3364   address last_addr = (address)bottom();
3365 
3366   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3367     address ptr = rec->_ptr;
3368     if (last_addr < ptr) {
3369       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3370     }
3371     closure->doit(ptr, rec->_type, rec->_byte_size);
3372     last_addr = ptr + rec->_byte_size;
3373   }
3374 
3375   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3376   if (last_addr < top) {
3377     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3378   }
3379 }
3380 
3381 void Metaspace::purge(MetadataType mdtype) {
3382   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3383 }
3384 
3385 void Metaspace::purge() {
3386   MutexLockerEx cl(SpaceManager::expand_lock(),
3387                    Mutex::_no_safepoint_check_flag);
3388   purge(NonClassType);
3389   if (using_class_space()) {
3390     purge(ClassType);
3391   }
3392 }
3393 
3394 void Metaspace::print_on(outputStream* out) const {
3395   // Print both class virtual space counts and metaspace.
3396   if (Verbose) {
3397     vsm()->print_on(out);
3398     if (using_class_space()) {
3399       class_vsm()->print_on(out);
3400     }
3401   }
3402 }
3403 
3404 bool Metaspace::contains(const void * ptr) {
3405   if (MetaspaceShared::is_in_shared_space(ptr)) {
3406     return true;
3407   }
3408   // This is checked while unlocked.  As long as the virtualspaces are added
3409   // at the end, the pointer will be in one of them.  The virtual spaces
3410   // aren't deleted presently.  When they are, some sort of locking might
3411   // be needed.  Note, locking this can cause inversion problems with the
3412   // caller in MetaspaceObj::is_metadata() function.
3413   return space_list()->contains(ptr) ||
3414          (using_class_space() && class_space_list()->contains(ptr));
3415 }
3416 
3417 void Metaspace::verify() {
3418   vsm()->verify();
3419   if (using_class_space()) {
3420     class_vsm()->verify();
3421   }
3422 }
3423 
3424 void Metaspace::dump(outputStream* const out) const {
3425   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3426   vsm()->dump(out);
3427   if (using_class_space()) {
3428     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3429     class_vsm()->dump(out);
3430   }
3431 }
3432 
3433 /////////////// Unit tests ///////////////
3434 
3435 #ifndef PRODUCT
3436 
3437 class TestMetaspaceAuxTest : AllStatic {
3438  public:
3439   static void test_reserved() {
3440     size_t reserved = MetaspaceAux::reserved_bytes();
3441 
3442     assert(reserved > 0, "assert");
3443 
3444     size_t committed  = MetaspaceAux::committed_bytes();
3445     assert(committed <= reserved, "assert");
3446 
3447     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3448     assert(reserved_metadata > 0, "assert");
3449     assert(reserved_metadata <= reserved, "assert");
3450 
3451     if (UseCompressedClassPointers) {
3452       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3453       assert(reserved_class > 0, "assert");
3454       assert(reserved_class < reserved, "assert");
3455     }
3456   }
3457 
3458   static void test_committed() {
3459     size_t committed = MetaspaceAux::committed_bytes();
3460 
3461     assert(committed > 0, "assert");
3462 
3463     size_t reserved  = MetaspaceAux::reserved_bytes();
3464     assert(committed <= reserved, "assert");
3465 
3466     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3467     assert(committed_metadata > 0, "assert");
3468     assert(committed_metadata <= committed, "assert");
3469 
3470     if (UseCompressedClassPointers) {
3471       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3472       assert(committed_class > 0, "assert");
3473       assert(committed_class < committed, "assert");
3474     }
3475   }
3476 
3477   static void test_virtual_space_list_large_chunk() {
3478     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3479     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3480     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3481     // vm_allocation_granularity aligned on Windows.
3482     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3483     large_size += (os::vm_page_size()/BytesPerWord);
3484     vs_list->get_new_chunk(large_size, large_size, 0);
3485   }
3486 
3487   static void test() {
3488     test_reserved();
3489     test_committed();
3490     test_virtual_space_list_large_chunk();
3491   }
3492 };
3493 
3494 void TestMetaspaceAux_test() {
3495   TestMetaspaceAuxTest::test();
3496 }
3497 
3498 #endif