1 /*
   2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metablock.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceShared.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "runtime/atomic.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/mutex.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "services/memTracker.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  51 // Define this macro to enable slow integrity checking of
  52 // the free chunk lists
  53 const bool metaspace_slow_verify = false;
  54 
  55 // Parameters for stress mode testing
  56 const uint metadata_deallocate_a_lot_block = 10;
  57 const uint metadata_deallocate_a_lock_chunk = 3;
  58 size_t const allocation_from_dictionary_limit = 4 * K;
  59 
  60 MetaWord* last_allocated = 0;
  61 
  62 size_t Metaspace::_class_metaspace_size;
  63 
  64 // Used in declarations in SpaceManager and ChunkManager
  65 enum ChunkIndex {
  66   ZeroIndex = 0,
  67   SpecializedIndex = ZeroIndex,
  68   SmallIndex = SpecializedIndex + 1,
  69   MediumIndex = SmallIndex + 1,
  70   HumongousIndex = MediumIndex + 1,
  71   NumberOfFreeLists = 3,
  72   NumberOfInUseLists = 4
  73 };
  74 
  75 enum ChunkSizes {    // in words.
  76   ClassSpecializedChunk = 128,
  77   SpecializedChunk = 128,
  78   ClassSmallChunk = 256,
  79   SmallChunk = 512,
  80   ClassMediumChunk = 4 * K,
  81   MediumChunk = 8 * K,
  82   HumongousChunkGranularity = 8
  83 };
  84 
  85 static ChunkIndex next_chunk_index(ChunkIndex i) {
  86   assert(i < NumberOfInUseLists, "Out of bound");
  87   return (ChunkIndex) (i+1);
  88 }
  89 
  90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  91 uint MetaspaceGC::_shrink_factor = 0;
  92 bool MetaspaceGC::_should_concurrent_collect = false;
  93 
  94 // Blocks of space for metadata are allocated out of Metachunks.
  95 //
  96 // Metachunk are allocated out of MetadataVirtualspaces and once
  97 // allocated there is no explicit link between a Metachunk and
  98 // the MetadataVirtualspaces from which it was allocated.
  99 //
 100 // Each SpaceManager maintains a
 101 // list of the chunks it is using and the current chunk.  The current
 102 // chunk is the chunk from which allocations are done.  Space freed in
 103 // a chunk is placed on the free list of blocks (BlockFreelist) and
 104 // reused from there.
 105 
 106 typedef class FreeList<Metachunk> ChunkList;
 107 
 108 // Manages the global free lists of chunks.
 109 // Has three lists of free chunks, and a total size and
 110 // count that includes all three
 111 
 112 class ChunkManager : public CHeapObj<mtInternal> {
 113 
 114   // Free list of chunks of different sizes.
 115   //   SpecializedChunk
 116   //   SmallChunk
 117   //   MediumChunk
 118   //   HumongousChunk
 119   ChunkList _free_chunks[NumberOfFreeLists];
 120 
 121 
 122   //   HumongousChunk
 123   ChunkTreeDictionary _humongous_dictionary;
 124 
 125   // ChunkManager in all lists of this type
 126   size_t _free_chunks_total;
 127   size_t _free_chunks_count;
 128 
 129   void dec_free_chunks_total(size_t v) {
 130     assert(_free_chunks_count > 0 &&
 131              _free_chunks_total > 0,
 132              "About to go negative");
 133     Atomic::add_ptr(-1, &_free_chunks_count);
 134     jlong minus_v = (jlong) - (jlong) v;
 135     Atomic::add_ptr(minus_v, &_free_chunks_total);
 136   }
 137 
 138   // Debug support
 139 
 140   size_t sum_free_chunks();
 141   size_t sum_free_chunks_count();
 142 
 143   void locked_verify_free_chunks_total();
 144   void slow_locked_verify_free_chunks_total() {
 145     if (metaspace_slow_verify) {
 146       locked_verify_free_chunks_total();
 147     }
 148   }
 149   void locked_verify_free_chunks_count();
 150   void slow_locked_verify_free_chunks_count() {
 151     if (metaspace_slow_verify) {
 152       locked_verify_free_chunks_count();
 153     }
 154   }
 155   void verify_free_chunks_count();
 156 
 157  public:
 158 
 159   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 160       : _free_chunks_total(0), _free_chunks_count(0) {
 161     _free_chunks[SpecializedIndex].set_size(specialized_size);
 162     _free_chunks[SmallIndex].set_size(small_size);
 163     _free_chunks[MediumIndex].set_size(medium_size);
 164   }
 165 
 166   // add or delete (return) a chunk to the global freelist.
 167   Metachunk* chunk_freelist_allocate(size_t word_size);
 168   void chunk_freelist_deallocate(Metachunk* chunk);
 169 
 170   // Map a size to a list index assuming that there are lists
 171   // for special, small, medium, and humongous chunks.
 172   static ChunkIndex list_index(size_t size);
 173 
 174   // Remove the chunk from its freelist.  It is
 175   // expected to be on one of the _free_chunks[] lists.
 176   void remove_chunk(Metachunk* chunk);
 177 
 178   // Add the simple linked list of chunks to the freelist of chunks
 179   // of type index.
 180   void return_chunks(ChunkIndex index, Metachunk* chunks);
 181 
 182   // Total of the space in the free chunks list
 183   size_t free_chunks_total_words();
 184   size_t free_chunks_total_bytes();
 185 
 186   // Number of chunks in the free chunks list
 187   size_t free_chunks_count();
 188 
 189   void inc_free_chunks_total(size_t v, size_t count = 1) {
 190     Atomic::add_ptr(count, &_free_chunks_count);
 191     Atomic::add_ptr(v, &_free_chunks_total);
 192   }
 193   ChunkTreeDictionary* humongous_dictionary() {
 194     return &_humongous_dictionary;
 195   }
 196 
 197   ChunkList* free_chunks(ChunkIndex index);
 198 
 199   // Returns the list for the given chunk word size.
 200   ChunkList* find_free_chunks_list(size_t word_size);
 201 
 202   // Add and remove from a list by size.  Selects
 203   // list based on size of chunk.
 204   void free_chunks_put(Metachunk* chuck);
 205   Metachunk* free_chunks_get(size_t chunk_word_size);
 206 
 207   // Debug support
 208   void verify();
 209   void slow_verify() {
 210     if (metaspace_slow_verify) {
 211       verify();
 212     }
 213   }
 214   void locked_verify();
 215   void slow_locked_verify() {
 216     if (metaspace_slow_verify) {
 217       locked_verify();
 218     }
 219   }
 220   void verify_free_chunks_total();
 221 
 222   void locked_print_free_chunks(outputStream* st);
 223   void locked_print_sum_free_chunks(outputStream* st);
 224 
 225   void print_on(outputStream* st) const;
 226 };
 227 
 228 // Used to manage the free list of Metablocks (a block corresponds
 229 // to the allocation of a quantum of metadata).
 230 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 231   BlockTreeDictionary* _dictionary;
 232   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
 233 
 234   // Only allocate and split from freelist if the size of the allocation
 235   // is at least 1/4th the size of the available block.
 236   const static int WasteMultiplier = 4;
 237 
 238   // Accessors
 239   BlockTreeDictionary* dictionary() const { return _dictionary; }
 240 
 241  public:
 242   BlockFreelist();
 243   ~BlockFreelist();
 244 
 245   // Get and return a block to the free list
 246   MetaWord* get_block(size_t word_size);
 247   void return_block(MetaWord* p, size_t word_size);
 248 
 249   size_t total_size() {
 250   if (dictionary() == NULL) {
 251     return 0;
 252   } else {
 253     return dictionary()->total_size();
 254   }
 255 }
 256 
 257   void print_on(outputStream* st) const;
 258 };
 259 
 260 class VirtualSpaceNode : public CHeapObj<mtClass> {
 261   friend class VirtualSpaceList;
 262 
 263   // Link to next VirtualSpaceNode
 264   VirtualSpaceNode* _next;
 265 
 266   // total in the VirtualSpace
 267   MemRegion _reserved;
 268   ReservedSpace _rs;
 269   VirtualSpace _virtual_space;
 270   MetaWord* _top;
 271   // count of chunks contained in this VirtualSpace
 272   uintx _container_count;
 273 
 274   // Convenience functions to access the _virtual_space
 275   char* low()  const { return virtual_space()->low(); }
 276   char* high() const { return virtual_space()->high(); }
 277 
 278   // The first Metachunk will be allocated at the bottom of the
 279   // VirtualSpace
 280   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 281 
 282  public:
 283 
 284   VirtualSpaceNode(size_t byte_size);
 285   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 286   ~VirtualSpaceNode();
 287 
 288   // Convenience functions for logical bottom and end
 289   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 290   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 291 
 292   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 293   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 294 
 295   bool is_pre_committed() const { return _virtual_space.special(); }
 296 
 297   // address of next available space in _virtual_space;
 298   // Accessors
 299   VirtualSpaceNode* next() { return _next; }
 300   void set_next(VirtualSpaceNode* v) { _next = v; }
 301 
 302   void set_reserved(MemRegion const v) { _reserved = v; }
 303   void set_top(MetaWord* v) { _top = v; }
 304 
 305   // Accessors
 306   MemRegion* reserved() { return &_reserved; }
 307   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 308 
 309   // Returns true if "word_size" is available in the VirtualSpace
 310   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 311 
 312   MetaWord* top() const { return _top; }
 313   void inc_top(size_t word_size) { _top += word_size; }
 314 
 315   uintx container_count() { return _container_count; }
 316   void inc_container_count();
 317   void dec_container_count();
 318 #ifdef ASSERT
 319   uint container_count_slow();
 320   void verify_container_count();
 321 #endif
 322 
 323   // used and capacity in this single entry in the list
 324   size_t used_words_in_vs() const;
 325   size_t capacity_words_in_vs() const;
 326   size_t free_words_in_vs() const;
 327 
 328   bool initialize();
 329 
 330   // get space from the virtual space
 331   Metachunk* take_from_committed(size_t chunk_word_size);
 332 
 333   // Allocate a chunk from the virtual space and return it.
 334   Metachunk* get_chunk_vs(size_t chunk_word_size);
 335 
 336   // Expands/shrinks the committed space in a virtual space.  Delegates
 337   // to Virtualspace
 338   bool expand_by(size_t min_words, size_t preferred_words);
 339 
 340   // In preparation for deleting this node, remove all the chunks
 341   // in the node from any freelist.
 342   void purge(ChunkManager* chunk_manager);
 343 
 344 #ifdef ASSERT
 345   // Debug support
 346   void mangle();
 347 #endif
 348 
 349   void print_on(outputStream* st) const;
 350 };
 351 
 352 #define assert_is_ptr_aligned(ptr, alignment) \
 353   assert(is_ptr_aligned(ptr, alignment),      \
 354     err_msg(PTR_FORMAT " is not aligned to "  \
 355       SIZE_FORMAT, ptr, alignment))
 356 
 357 #define assert_is_size_aligned(size, alignment) \
 358   assert(is_size_aligned(size, alignment),      \
 359     err_msg(SIZE_FORMAT " is not aligned to "   \
 360        SIZE_FORMAT, size, alignment))
 361 
 362 
 363 // Decide if large pages should be committed when the memory is reserved.
 364 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 365   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 366     size_t words = bytes / BytesPerWord;
 367     bool is_class = false; // We never reserve large pages for the class space.
 368     if (MetaspaceGC::can_expand(words, is_class) &&
 369         MetaspaceGC::allowed_expansion() >= words) {
 370       return true;
 371     }
 372   }
 373 
 374   return false;
 375 }
 376 
 377   // byte_size is the size of the associated virtualspace.
 378 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 379   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 380 
 381   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 382   // configurable address, generally at the top of the Java heap so other
 383   // memory addresses don't conflict.
 384   if (DumpSharedSpaces) {
 385     bool large_pages = false; // No large pages when dumping the CDS archive.
 386     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 387 
 388     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 389     if (_rs.is_reserved()) {
 390       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 391     } else {
 392       // Get a mmap region anywhere if the SharedBaseAddress fails.
 393       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 394     }
 395     MetaspaceShared::set_shared_rs(&_rs);
 396   } else {
 397     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 398 
 399     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 400   }
 401 
 402   if (_rs.is_reserved()) {
 403     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 404     assert(_rs.size() != 0, "Catch if we get a 0 size");
 405     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 406     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 407 
 408     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 409   }
 410 }
 411 
 412 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 413   Metachunk* chunk = first_chunk();
 414   Metachunk* invalid_chunk = (Metachunk*) top();
 415   while (chunk < invalid_chunk ) {
 416     assert(chunk->is_free(), "Should be marked free");
 417       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 418       chunk_manager->remove_chunk(chunk);
 419       assert(chunk->next() == NULL &&
 420              chunk->prev() == NULL,
 421              "Was not removed from its list");
 422       chunk = (Metachunk*) next;
 423   }
 424 }
 425 
 426 #ifdef ASSERT
 427 uint VirtualSpaceNode::container_count_slow() {
 428   uint count = 0;
 429   Metachunk* chunk = first_chunk();
 430   Metachunk* invalid_chunk = (Metachunk*) top();
 431   while (chunk < invalid_chunk ) {
 432     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 433     // Don't count the chunks on the free lists.  Those are
 434     // still part of the VirtualSpaceNode but not currently
 435     // counted.
 436     if (!chunk->is_free()) {
 437       count++;
 438     }
 439     chunk = (Metachunk*) next;
 440   }
 441   return count;
 442 }
 443 #endif
 444 
 445 // List of VirtualSpaces for metadata allocation.
 446 class VirtualSpaceList : public CHeapObj<mtClass> {
 447   friend class VirtualSpaceNode;
 448 
 449   enum VirtualSpaceSizes {
 450     VirtualSpaceSize = 256 * K
 451   };
 452 
 453   // Head of the list
 454   VirtualSpaceNode* _virtual_space_list;
 455   // virtual space currently being used for allocations
 456   VirtualSpaceNode* _current_virtual_space;
 457 
 458   // Is this VirtualSpaceList used for the compressed class space
 459   bool _is_class;
 460 
 461   // Sum of reserved and committed memory in the virtual spaces
 462   size_t _reserved_words;
 463   size_t _committed_words;
 464 
 465   // Number of virtual spaces
 466   size_t _virtual_space_count;
 467 
 468   ~VirtualSpaceList();
 469 
 470   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 471 
 472   void set_virtual_space_list(VirtualSpaceNode* v) {
 473     _virtual_space_list = v;
 474   }
 475   void set_current_virtual_space(VirtualSpaceNode* v) {
 476     _current_virtual_space = v;
 477   }
 478 
 479   void link_vs(VirtualSpaceNode* new_entry);
 480 
 481   // Get another virtual space and add it to the list.  This
 482   // is typically prompted by a failed attempt to allocate a chunk
 483   // and is typically followed by the allocation of a chunk.
 484   bool create_new_virtual_space(size_t vs_word_size);
 485 
 486  public:
 487   VirtualSpaceList(size_t word_size);
 488   VirtualSpaceList(ReservedSpace rs);
 489 
 490   size_t free_bytes();
 491 
 492   Metachunk* get_new_chunk(size_t word_size,
 493                            size_t grow_chunks_by_words,
 494                            size_t medium_chunk_bunch);
 495 
 496   bool expand_node_by(VirtualSpaceNode* node,
 497                       size_t min_words,
 498                       size_t preferred_words);
 499 
 500   bool expand_by(size_t min_words,
 501                  size_t preferred_words);
 502 
 503   VirtualSpaceNode* current_virtual_space() {
 504     return _current_virtual_space;
 505   }
 506 
 507   bool is_class() const { return _is_class; }
 508 
 509   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 510 
 511   size_t reserved_words()  { return _reserved_words; }
 512   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 513   size_t committed_words() { return _committed_words; }
 514   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 515 
 516   void inc_reserved_words(size_t v);
 517   void dec_reserved_words(size_t v);
 518   void inc_committed_words(size_t v);
 519   void dec_committed_words(size_t v);
 520   void inc_virtual_space_count();
 521   void dec_virtual_space_count();
 522 
 523   // Unlink empty VirtualSpaceNodes and free it.
 524   void purge(ChunkManager* chunk_manager);
 525 
 526   bool contains(const void *ptr);
 527 
 528   void print_on(outputStream* st) const;
 529 
 530   class VirtualSpaceListIterator : public StackObj {
 531     VirtualSpaceNode* _virtual_spaces;
 532    public:
 533     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 534       _virtual_spaces(virtual_spaces) {}
 535 
 536     bool repeat() {
 537       return _virtual_spaces != NULL;
 538     }
 539 
 540     VirtualSpaceNode* get_next() {
 541       VirtualSpaceNode* result = _virtual_spaces;
 542       if (_virtual_spaces != NULL) {
 543         _virtual_spaces = _virtual_spaces->next();
 544       }
 545       return result;
 546     }
 547   };
 548 };
 549 
 550 class Metadebug : AllStatic {
 551   // Debugging support for Metaspaces
 552   static int _deallocate_block_a_lot_count;
 553   static int _deallocate_chunk_a_lot_count;
 554   static int _allocation_fail_alot_count;
 555 
 556  public:
 557   static int deallocate_block_a_lot_count() {
 558     return _deallocate_block_a_lot_count;
 559   }
 560   static void set_deallocate_block_a_lot_count(int v) {
 561     _deallocate_block_a_lot_count = v;
 562   }
 563   static void inc_deallocate_block_a_lot_count() {
 564     _deallocate_block_a_lot_count++;
 565   }
 566   static int deallocate_chunk_a_lot_count() {
 567     return _deallocate_chunk_a_lot_count;
 568   }
 569   static void reset_deallocate_chunk_a_lot_count() {
 570     _deallocate_chunk_a_lot_count = 1;
 571   }
 572   static void inc_deallocate_chunk_a_lot_count() {
 573     _deallocate_chunk_a_lot_count++;
 574   }
 575 
 576   static void init_allocation_fail_alot_count();
 577 #ifdef ASSERT
 578   static bool test_metadata_failure();
 579 #endif
 580 
 581   static void deallocate_chunk_a_lot(SpaceManager* sm,
 582                                      size_t chunk_word_size);
 583   static void deallocate_block_a_lot(SpaceManager* sm,
 584                                      size_t chunk_word_size);
 585 
 586 };
 587 
 588 int Metadebug::_deallocate_block_a_lot_count = 0;
 589 int Metadebug::_deallocate_chunk_a_lot_count = 0;
 590 int Metadebug::_allocation_fail_alot_count = 0;
 591 
 592 //  SpaceManager - used by Metaspace to handle allocations
 593 class SpaceManager : public CHeapObj<mtClass> {
 594   friend class Metaspace;
 595   friend class Metadebug;
 596 
 597  private:
 598 
 599   // protects allocations and contains.
 600   Mutex* const _lock;
 601 
 602   // Type of metadata allocated.
 603   Metaspace::MetadataType _mdtype;
 604 
 605   // List of chunks in use by this SpaceManager.  Allocations
 606   // are done from the current chunk.  The list is used for deallocating
 607   // chunks when the SpaceManager is freed.
 608   Metachunk* _chunks_in_use[NumberOfInUseLists];
 609   Metachunk* _current_chunk;
 610 
 611   // Number of small chunks to allocate to a manager
 612   // If class space manager, small chunks are unlimited
 613   static uint const _small_chunk_limit;
 614 
 615   // Sum of all space in allocated chunks
 616   size_t _allocated_blocks_words;
 617 
 618   // Sum of all allocated chunks
 619   size_t _allocated_chunks_words;
 620   size_t _allocated_chunks_count;
 621 
 622   // Free lists of blocks are per SpaceManager since they
 623   // are assumed to be in chunks in use by the SpaceManager
 624   // and all chunks in use by a SpaceManager are freed when
 625   // the class loader using the SpaceManager is collected.
 626   BlockFreelist _block_freelists;
 627 
 628   // protects virtualspace and chunk expansions
 629   static const char*  _expand_lock_name;
 630   static const int    _expand_lock_rank;
 631   static Mutex* const _expand_lock;
 632 
 633  private:
 634   // Accessors
 635   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 636   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
 637 
 638   BlockFreelist* block_freelists() const {
 639     return (BlockFreelist*) &_block_freelists;
 640   }
 641 
 642   Metaspace::MetadataType mdtype() { return _mdtype; }
 643 
 644   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 645   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 646 
 647   Metachunk* current_chunk() const { return _current_chunk; }
 648   void set_current_chunk(Metachunk* v) {
 649     _current_chunk = v;
 650   }
 651 
 652   Metachunk* find_current_chunk(size_t word_size);
 653 
 654   // Add chunk to the list of chunks in use
 655   void add_chunk(Metachunk* v, bool make_current);
 656   void retire_current_chunk();
 657 
 658   Mutex* lock() const { return _lock; }
 659 
 660   const char* chunk_size_name(ChunkIndex index) const;
 661 
 662  protected:
 663   void initialize();
 664 
 665  public:
 666   SpaceManager(Metaspace::MetadataType mdtype,
 667                Mutex* lock);
 668   ~SpaceManager();
 669 
 670   enum ChunkMultiples {
 671     MediumChunkMultiple = 4
 672   };
 673 
 674   bool is_class() { return _mdtype == Metaspace::ClassType; }
 675 
 676   // Accessors
 677   size_t specialized_chunk_size() { return SpecializedChunk; }
 678   size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
 679   size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
 680   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 681 
 682   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 683   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 684   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 685   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 686 
 687   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 688 
 689   static Mutex* expand_lock() { return _expand_lock; }
 690 
 691   // Increment the per Metaspace and global running sums for Metachunks
 692   // by the given size.  This is used when a Metachunk to added to
 693   // the in-use list.
 694   void inc_size_metrics(size_t words);
 695   // Increment the per Metaspace and global running sums Metablocks by the given
 696   // size.  This is used when a Metablock is allocated.
 697   void inc_used_metrics(size_t words);
 698   // Delete the portion of the running sums for this SpaceManager. That is,
 699   // the globals running sums for the Metachunks and Metablocks are
 700   // decremented for all the Metachunks in-use by this SpaceManager.
 701   void dec_total_from_size_metrics();
 702 
 703   // Set the sizes for the initial chunks.
 704   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 705                                size_t* chunk_word_size,
 706                                size_t* class_chunk_word_size);
 707 
 708   size_t sum_capacity_in_chunks_in_use() const;
 709   size_t sum_used_in_chunks_in_use() const;
 710   size_t sum_free_in_chunks_in_use() const;
 711   size_t sum_waste_in_chunks_in_use() const;
 712   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 713 
 714   size_t sum_count_in_chunks_in_use();
 715   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 716 
 717   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 718 
 719   // Block allocation and deallocation.
 720   // Allocates a block from the current chunk
 721   MetaWord* allocate(size_t word_size);
 722 
 723   // Helper for allocations
 724   MetaWord* allocate_work(size_t word_size);
 725 
 726   // Returns a block to the per manager freelist
 727   void deallocate(MetaWord* p, size_t word_size);
 728 
 729   // Based on the allocation size and a minimum chunk size,
 730   // returned chunk size (for expanding space for chunk allocation).
 731   size_t calc_chunk_size(size_t allocation_word_size);
 732 
 733   // Called when an allocation from the current chunk fails.
 734   // Gets a new chunk (may require getting a new virtual space),
 735   // and allocates from that chunk.
 736   MetaWord* grow_and_allocate(size_t word_size);
 737 
 738   // debugging support.
 739 
 740   void dump(outputStream* const out) const;
 741   void print_on(outputStream* st) const;
 742   void locked_print_chunks_in_use_on(outputStream* st) const;
 743 
 744   void verify();
 745   void verify_chunk_size(Metachunk* chunk);
 746   NOT_PRODUCT(void mangle_freed_chunks();)
 747 #ifdef ASSERT
 748   void verify_allocated_blocks_words();
 749 #endif
 750 
 751   size_t get_raw_word_size(size_t word_size) {
 752     // If only the dictionary is going to be used (i.e., no
 753     // indexed free list), then there is a minimum size requirement.
 754     // MinChunkSize is a placeholder for the real minimum size JJJ
 755     size_t byte_size = word_size * BytesPerWord;
 756 
 757     size_t raw_bytes_size = MAX2(byte_size,
 758                                  Metablock::min_block_byte_size());
 759     raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
 760     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 761     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 762 
 763     return raw_word_size;
 764   }
 765 };
 766 
 767 uint const SpaceManager::_small_chunk_limit = 4;
 768 
 769 const char* SpaceManager::_expand_lock_name =
 770   "SpaceManager chunk allocation lock";
 771 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 772 Mutex* const SpaceManager::_expand_lock =
 773   new Mutex(SpaceManager::_expand_lock_rank,
 774             SpaceManager::_expand_lock_name,
 775             Mutex::_allow_vm_block_flag);
 776 
 777 void VirtualSpaceNode::inc_container_count() {
 778   assert_lock_strong(SpaceManager::expand_lock());
 779   _container_count++;
 780   assert(_container_count == container_count_slow(),
 781          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 782                  " container_count_slow() " SIZE_FORMAT,
 783                  _container_count, container_count_slow()));
 784 }
 785 
 786 void VirtualSpaceNode::dec_container_count() {
 787   assert_lock_strong(SpaceManager::expand_lock());
 788   _container_count--;
 789 }
 790 
 791 #ifdef ASSERT
 792 void VirtualSpaceNode::verify_container_count() {
 793   assert(_container_count == container_count_slow(),
 794     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 795             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 796 }
 797 #endif
 798 
 799 // BlockFreelist methods
 800 
 801 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 802 
 803 BlockFreelist::~BlockFreelist() {
 804   if (_dictionary != NULL) {
 805     if (Verbose && TraceMetadataChunkAllocation) {
 806       _dictionary->print_free_lists(gclog_or_tty);
 807     }
 808     delete _dictionary;
 809   }
 810 }
 811 
 812 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
 813   Metablock* block = (Metablock*) p;
 814   block->set_word_size(word_size);
 815   block->set_prev(NULL);
 816   block->set_next(NULL);
 817 
 818   return block;
 819 }
 820 
 821 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 822   Metablock* free_chunk = initialize_free_chunk(p, word_size);
 823   if (dictionary() == NULL) {
 824    _dictionary = new BlockTreeDictionary();
 825   }
 826   dictionary()->return_chunk(free_chunk);
 827 }
 828 
 829 MetaWord* BlockFreelist::get_block(size_t word_size) {
 830   if (dictionary() == NULL) {
 831     return NULL;
 832   }
 833 
 834   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 835     // Dark matter.  Too small for dictionary.
 836     return NULL;
 837   }
 838 
 839   Metablock* free_block =
 840     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 841   if (free_block == NULL) {
 842     return NULL;
 843   }
 844 
 845   const size_t block_size = free_block->size();
 846   if (block_size > WasteMultiplier * word_size) {
 847     return_block((MetaWord*)free_block, block_size);
 848     return NULL;
 849   }
 850 
 851   MetaWord* new_block = (MetaWord*)free_block;
 852   assert(block_size >= word_size, "Incorrect size of block from freelist");
 853   const size_t unused = block_size - word_size;
 854   if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
 855     return_block(new_block + word_size, unused);
 856   }
 857 
 858   return new_block;
 859 }
 860 
 861 void BlockFreelist::print_on(outputStream* st) const {
 862   if (dictionary() == NULL) {
 863     return;
 864   }
 865   dictionary()->print_free_lists(st);
 866 }
 867 
 868 // VirtualSpaceNode methods
 869 
 870 VirtualSpaceNode::~VirtualSpaceNode() {
 871   _rs.release();
 872 #ifdef ASSERT
 873   size_t word_size = sizeof(*this) / BytesPerWord;
 874   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 875 #endif
 876 }
 877 
 878 size_t VirtualSpaceNode::used_words_in_vs() const {
 879   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 880 }
 881 
 882 // Space committed in the VirtualSpace
 883 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 884   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 885 }
 886 
 887 size_t VirtualSpaceNode::free_words_in_vs() const {
 888   return pointer_delta(end(), top(), sizeof(MetaWord));
 889 }
 890 
 891 // Allocates the chunk from the virtual space only.
 892 // This interface is also used internally for debugging.  Not all
 893 // chunks removed here are necessarily used for allocation.
 894 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 895   // Bottom of the new chunk
 896   MetaWord* chunk_limit = top();
 897   assert(chunk_limit != NULL, "Not safe to call this method");
 898 
 899   // The virtual spaces are always expanded by the
 900   // commit granularity to enforce the following condition.
 901   // Without this the is_available check will not work correctly.
 902   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 903       "The committed memory doesn't match the expanded memory.");
 904 
 905   if (!is_available(chunk_word_size)) {
 906     if (TraceMetadataChunkAllocation) {
 907       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 908       // Dump some information about the virtual space that is nearly full
 909       print_on(gclog_or_tty);
 910     }
 911     return NULL;
 912   }
 913 
 914   // Take the space  (bump top on the current virtual space).
 915   inc_top(chunk_word_size);
 916 
 917   // Initialize the chunk
 918   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 919   return result;
 920 }
 921 
 922 
 923 // Expand the virtual space (commit more of the reserved space)
 924 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 925   size_t min_bytes = min_words * BytesPerWord;
 926   size_t preferred_bytes = preferred_words * BytesPerWord;
 927 
 928   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 929 
 930   if (uncommitted < min_bytes) {
 931     return false;
 932   }  
 933 
 934   size_t commit = MIN2(preferred_bytes, uncommitted);
 935   bool result = virtual_space()->expand_by(commit, false);
 936   
 937   assert(result, "Failed to commit memory");
 938 
 939   return result;
 940 }
 941 
 942 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 943   assert_lock_strong(SpaceManager::expand_lock());
 944   Metachunk* result = take_from_committed(chunk_word_size);
 945   if (result != NULL) {
 946     inc_container_count();
 947   }
 948   return result;
 949 }
 950 
 951 bool VirtualSpaceNode::initialize() {
 952 
 953   if (!_rs.is_reserved()) {
 954     return false;
 955   }
 956 
 957   // These are necessary restriction to make sure that the virtual space always
 958   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 959   // aligned only the middle alignment of the VirtualSpace is used.
 960   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 961   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 962 
 963   // ReservedSpaces marked as special will have the entire memory
 964   // pre-committed. Setting a committed size will make sure that
 965   // committed_size and actual_committed_size agrees.
 966   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 967 
 968   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 969                                             Metaspace::commit_alignment());
 970   if (result) {
 971     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 972         "Checking that the pre-committed memory was registered by the VirtualSpace");
 973   
 974     set_top((MetaWord*)virtual_space()->low());
 975     set_reserved(MemRegion((HeapWord*)_rs.base(),
 976                  (HeapWord*)(_rs.base() + _rs.size())));
 977 
 978     assert(reserved()->start() == (HeapWord*) _rs.base(),
 979       err_msg("Reserved start was not set properly " PTR_FORMAT
 980         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 981     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 982       err_msg("Reserved size was not set properly " SIZE_FORMAT
 983         " != " SIZE_FORMAT, reserved()->word_size(),
 984         _rs.size() / BytesPerWord));
 985   }
 986 
 987   return result;
 988 }
 989 
 990 void VirtualSpaceNode::print_on(outputStream* st) const {
 991   size_t used = used_words_in_vs();
 992   size_t capacity = capacity_words_in_vs();
 993   VirtualSpace* vs = virtual_space();
 994   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
 995            "[" PTR_FORMAT ", " PTR_FORMAT ", "
 996            PTR_FORMAT ", " PTR_FORMAT ")",
 997            vs, capacity / K,
 998            capacity == 0 ? 0 : used * 100 / capacity,
 999            bottom(), top(), end(),
1000            vs->high_boundary());
1001 }
1002 
1003 #ifdef ASSERT
1004 void VirtualSpaceNode::mangle() {
1005   size_t word_size = capacity_words_in_vs();
1006   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1007 }
1008 #endif // ASSERT
1009 
1010 // VirtualSpaceList methods
1011 // Space allocated from the VirtualSpace
1012 
1013 VirtualSpaceList::~VirtualSpaceList() {
1014   VirtualSpaceListIterator iter(virtual_space_list());
1015   while (iter.repeat()) {
1016     VirtualSpaceNode* vsl = iter.get_next();
1017     delete vsl;
1018   }
1019 }
1020 
1021 void VirtualSpaceList::inc_reserved_words(size_t v) {
1022   assert_lock_strong(SpaceManager::expand_lock());
1023   _reserved_words = _reserved_words + v;
1024 }
1025 void VirtualSpaceList::dec_reserved_words(size_t v) {
1026   assert_lock_strong(SpaceManager::expand_lock());
1027   _reserved_words = _reserved_words - v;
1028 }
1029 
1030 #define assert_committed_below_limit()                             \
1031   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1032       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1033               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1034           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1035 
1036 void VirtualSpaceList::inc_committed_words(size_t v) {
1037   assert_lock_strong(SpaceManager::expand_lock());
1038   _committed_words = _committed_words + v;
1039 
1040   assert_committed_below_limit();
1041 }
1042 void VirtualSpaceList::dec_committed_words(size_t v) {
1043   assert_lock_strong(SpaceManager::expand_lock());
1044   _committed_words = _committed_words - v;
1045 
1046   assert_committed_below_limit();
1047 }
1048 
1049 void VirtualSpaceList::inc_virtual_space_count() {
1050   assert_lock_strong(SpaceManager::expand_lock());
1051   _virtual_space_count++;
1052 }
1053 void VirtualSpaceList::dec_virtual_space_count() {
1054   assert_lock_strong(SpaceManager::expand_lock());
1055   _virtual_space_count--;
1056 }
1057 
1058 void ChunkManager::remove_chunk(Metachunk* chunk) {
1059   size_t word_size = chunk->word_size();
1060   ChunkIndex index = list_index(word_size);
1061   if (index != HumongousIndex) {
1062     free_chunks(index)->remove_chunk(chunk);
1063   } else {
1064     humongous_dictionary()->remove_chunk(chunk);
1065   }
1066 
1067   // Chunk is being removed from the chunks free list.
1068   dec_free_chunks_total(chunk->capacity_word_size());
1069 }
1070 
1071 // Walk the list of VirtualSpaceNodes and delete
1072 // nodes with a 0 container_count.  Remove Metachunks in
1073 // the node from their respective freelists.
1074 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1075   assert_lock_strong(SpaceManager::expand_lock());
1076   // Don't use a VirtualSpaceListIterator because this
1077   // list is being changed and a straightforward use of an iterator is not safe.
1078   VirtualSpaceNode* purged_vsl = NULL;
1079   VirtualSpaceNode* prev_vsl = virtual_space_list();
1080   VirtualSpaceNode* next_vsl = prev_vsl;
1081   while (next_vsl != NULL) {
1082     VirtualSpaceNode* vsl = next_vsl;
1083     next_vsl = vsl->next();
1084     // Don't free the current virtual space since it will likely
1085     // be needed soon.
1086     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1087       // Unlink it from the list
1088       if (prev_vsl == vsl) {
1089         // This is the case of the current node being the first node.
1090         assert(vsl == virtual_space_list(), "Expected to be the first node");
1091         set_virtual_space_list(vsl->next());
1092       } else {
1093         prev_vsl->set_next(vsl->next());
1094       }
1095 
1096       vsl->purge(chunk_manager);
1097       dec_reserved_words(vsl->reserved_words());
1098       dec_committed_words(vsl->committed_words());
1099       dec_virtual_space_count();
1100       purged_vsl = vsl;
1101       delete vsl;
1102     } else {
1103       prev_vsl = vsl;
1104     }
1105   }
1106 #ifdef ASSERT
1107   if (purged_vsl != NULL) {
1108   // List should be stable enough to use an iterator here.
1109   VirtualSpaceListIterator iter(virtual_space_list());
1110     while (iter.repeat()) {
1111       VirtualSpaceNode* vsl = iter.get_next();
1112       assert(vsl != purged_vsl, "Purge of vsl failed");
1113     }
1114   }
1115 #endif
1116 }
1117 
1118 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1119                                    _is_class(false),
1120                                    _virtual_space_list(NULL),
1121                                    _current_virtual_space(NULL),
1122                                    _reserved_words(0),
1123                                    _committed_words(0),
1124                                    _virtual_space_count(0) {
1125   MutexLockerEx cl(SpaceManager::expand_lock(),
1126                    Mutex::_no_safepoint_check_flag);
1127   create_new_virtual_space(word_size);
1128 }
1129 
1130 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1131                                    _is_class(true),
1132                                    _virtual_space_list(NULL),
1133                                    _current_virtual_space(NULL),
1134                                    _reserved_words(0),
1135                                    _committed_words(0),
1136                                    _virtual_space_count(0) {
1137   MutexLockerEx cl(SpaceManager::expand_lock(),
1138                    Mutex::_no_safepoint_check_flag);
1139   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1140   bool succeeded = class_entry->initialize();
1141   if (succeeded) {
1142     link_vs(class_entry);
1143   }
1144 }
1145 
1146 size_t VirtualSpaceList::free_bytes() {
1147   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1148 }
1149 
1150 // Allocate another meta virtual space and add it to the list.
1151 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1152   assert_lock_strong(SpaceManager::expand_lock());
1153 
1154   if (is_class()) {
1155     assert(false, "We currently don't support more than one VirtualSpace for"
1156                   " the compressed class space. The initialization of the"
1157                   " CCS uses another code path and should not hit this path.");
1158     return false;
1159   }
1160 
1161   if (vs_word_size == 0) {
1162     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1163     return false;
1164   }
1165 
1166   // Reserve the space
1167   size_t vs_byte_size = vs_word_size * BytesPerWord;
1168   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1169 
1170   // Allocate the meta virtual space and initialize it.
1171   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1172   if (!new_entry->initialize()) {
1173     delete new_entry;
1174     return false;
1175   } else {
1176     assert(new_entry->reserved_words() == vs_word_size,
1177         "Reserved memory size differs from requested memory size");
1178     // ensure lock-free iteration sees fully initialized node
1179     OrderAccess::storestore();
1180     link_vs(new_entry);
1181     return true;
1182   }
1183 }
1184 
1185 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1186   if (virtual_space_list() == NULL) {
1187       set_virtual_space_list(new_entry);
1188   } else {
1189     current_virtual_space()->set_next(new_entry);
1190   }
1191   set_current_virtual_space(new_entry);
1192   inc_reserved_words(new_entry->reserved_words());
1193   inc_committed_words(new_entry->committed_words());
1194   inc_virtual_space_count();
1195 #ifdef ASSERT
1196   new_entry->mangle();
1197 #endif
1198   if (TraceMetavirtualspaceAllocation && Verbose) {
1199     VirtualSpaceNode* vsl = current_virtual_space();
1200     vsl->print_on(gclog_or_tty);
1201   }
1202 }
1203 
1204 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1205                                       size_t min_words,
1206                                       size_t preferred_words) {
1207   size_t before = node->committed_words();
1208 
1209   bool result = node->expand_by(min_words, preferred_words);
1210 
1211   size_t after = node->committed_words();
1212 
1213   // after and before can be the same if the memory was pre-committed.
1214   assert(after >= before, "Inconsistency");
1215   inc_committed_words(after - before);
1216 
1217   return result;
1218 }
1219 
1220 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1221   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1222   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1223   assert(min_words <= preferred_words, "Invalid arguments");
1224 
1225   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1226     return  false;
1227   }
1228 
1229   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1230   if (allowed_expansion_words < min_words) {
1231     return false;
1232   }
1233 
1234   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1235 
1236   // Commit more memory from the the current virtual space.
1237   bool vs_expanded = expand_node_by(current_virtual_space(),
1238                                     min_words,
1239                                     max_expansion_words);
1240   if (vs_expanded) {
1241     return true;
1242   }
1243 
1244   // Get another virtual space.
1245   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1246   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1247 
1248   if (create_new_virtual_space(grow_vs_words)) {
1249     if (current_virtual_space()->is_pre_committed()) {
1250       // The memory was pre-comitted, so we are done here.
1251       assert(min_words <= current_virtual_space()->committed_words(),
1252           "The new VirtualSpace was pre-committed, so it"
1253           "should be large enough to fit the alloc request.");
1254       return true;
1255     }
1256 
1257     return expand_node_by(current_virtual_space(),
1258                           min_words,
1259                           max_expansion_words);
1260   }
1261 
1262   return false;
1263 }
1264 
1265 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1266                                            size_t grow_chunks_by_words,
1267                                            size_t medium_chunk_bunch) {
1268 
1269   // Allocate a chunk out of the current virtual space.
1270   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1271 
1272   if (next != NULL) {
1273     return next;
1274   }
1275 
1276   // The expand amount is currently only determined by the requested sizes
1277   // and not how much committed memory is left in the current virtual space.
1278 
1279   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1280   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1281   if (min_word_size >= preferred_word_size) {
1282     // Can happen when humongous chunks are allocated.
1283     preferred_word_size = min_word_size;
1284   }
1285 
1286   bool expanded = expand_by(min_word_size, preferred_word_size);
1287   if (expanded) {
1288     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1289     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1290   }
1291 
1292    return next;
1293 }
1294 
1295 void VirtualSpaceList::print_on(outputStream* st) const {
1296   if (TraceMetadataChunkAllocation && Verbose) {
1297     VirtualSpaceListIterator iter(virtual_space_list());
1298     while (iter.repeat()) {
1299       VirtualSpaceNode* node = iter.get_next();
1300       node->print_on(st);
1301     }
1302   }
1303 }
1304 
1305 bool VirtualSpaceList::contains(const void *ptr) {
1306   VirtualSpaceNode* list = virtual_space_list();
1307   VirtualSpaceListIterator iter(list);
1308   while (iter.repeat()) {
1309     VirtualSpaceNode* node = iter.get_next();
1310     if (node->reserved()->contains(ptr)) {
1311       return true;
1312     }
1313   }
1314   return false;
1315 }
1316 
1317 
1318 // MetaspaceGC methods
1319 
1320 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1321 // Within the VM operation after the GC the attempt to allocate the metadata
1322 // should succeed.  If the GC did not free enough space for the metaspace
1323 // allocation, the HWM is increased so that another virtualspace will be
1324 // allocated for the metadata.  With perm gen the increase in the perm
1325 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1326 // metaspace policy uses those as the small and large steps for the HWM.
1327 //
1328 // After the GC the compute_new_size() for MetaspaceGC is called to
1329 // resize the capacity of the metaspaces.  The current implementation
1330 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1331 // to resize the Java heap by some GC's.  New flags can be implemented
1332 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1333 // free space is desirable in the metaspace capacity to decide how much
1334 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1335 // free space is desirable in the metaspace capacity before decreasing
1336 // the HWM.
1337 
1338 // Calculate the amount to increase the high water mark (HWM).
1339 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1340 // another expansion is not requested too soon.  If that is not
1341 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1342 // If that is still not enough, expand by the size of the allocation
1343 // plus some.
1344 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1345   size_t min_delta = MinMetaspaceExpansion;
1346   size_t max_delta = MaxMetaspaceExpansion;
1347   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1348 
1349   if (delta <= min_delta) {
1350     delta = min_delta;
1351   } else if (delta <= max_delta) {
1352     // Don't want to hit the high water mark on the next
1353     // allocation so make the delta greater than just enough
1354     // for this allocation.
1355     delta = max_delta;
1356   } else {
1357     // This allocation is large but the next ones are probably not
1358     // so increase by the minimum.
1359     delta = delta + min_delta;
1360   }
1361 
1362   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1363 
1364   return delta;
1365 }
1366 
1367 size_t MetaspaceGC::capacity_until_GC() {
1368   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1369   assert(value >= MetaspaceSize, "Not initialied properly?");
1370   return value;
1371 }
1372 
1373 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1374   assert_is_size_aligned(v, Metaspace::commit_alignment());
1375 
1376   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1377 }
1378 
1379 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1380   assert_is_size_aligned(v, Metaspace::commit_alignment());
1381 
1382   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1383 }
1384 
1385 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1386   // Check if the compressed class space is full.
1387   if (is_class && Metaspace::using_class_space()) {
1388     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1389     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1390       return false;
1391     }
1392   }
1393 
1394   // Check if the user has imposed a limit on the metaspace memory.
1395   size_t committed_bytes = MetaspaceAux::committed_bytes();
1396   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1397     return false;
1398   }
1399 
1400   return true;
1401 }
1402 
1403 size_t MetaspaceGC::allowed_expansion() {
1404   size_t committed_bytes = MetaspaceAux::committed_bytes();
1405 
1406   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1407 
1408   // Always grant expansion if we are initiating the JVM,
1409   // or if the GC_locker is preventing GCs.
1410   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1411     return left_until_max / BytesPerWord;
1412   }
1413 
1414   size_t capacity_until_gc = capacity_until_GC();
1415 
1416   if (capacity_until_gc <= committed_bytes) {
1417     return 0;
1418   }
1419 
1420   size_t left_until_GC = capacity_until_gc - committed_bytes;
1421   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1422 
1423   return left_to_commit / BytesPerWord;
1424 }
1425 
1426 void MetaspaceGC::compute_new_size() {
1427   assert(_shrink_factor <= 100, "invalid shrink factor");
1428   uint current_shrink_factor = _shrink_factor;
1429   _shrink_factor = 0;
1430 
1431   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1432   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1433 
1434   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1435   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1436 
1437   const double min_tmp = used_after_gc / maximum_used_percentage;
1438   size_t minimum_desired_capacity =
1439     (size_t)MIN2(min_tmp, double(max_uintx));
1440   // Don't shrink less than the initial generation size
1441   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1442                                   MetaspaceSize);
1443 
1444   if (PrintGCDetails && Verbose) {
1445     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1446     gclog_or_tty->print_cr("  "
1447                   "  minimum_free_percentage: %6.2f"
1448                   "  maximum_used_percentage: %6.2f",
1449                   minimum_free_percentage,
1450                   maximum_used_percentage);
1451     gclog_or_tty->print_cr("  "
1452                   "   used_after_gc       : %6.1fKB",
1453                   used_after_gc / (double) K);
1454   }
1455 
1456 
1457   size_t shrink_bytes = 0;
1458   if (capacity_until_GC < minimum_desired_capacity) {
1459     // If we have less capacity below the metaspace HWM, then
1460     // increment the HWM.
1461     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1462     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1463     // Don't expand unless it's significant
1464     if (expand_bytes >= MinMetaspaceExpansion) {
1465       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1466     }
1467     if (PrintGCDetails && Verbose) {
1468       size_t new_capacity_until_GC = capacity_until_GC;
1469       gclog_or_tty->print_cr("    expanding:"
1470                     "  minimum_desired_capacity: %6.1fKB"
1471                     "  expand_bytes: %6.1fKB"
1472                     "  MinMetaspaceExpansion: %6.1fKB"
1473                     "  new metaspace HWM:  %6.1fKB",
1474                     minimum_desired_capacity / (double) K,
1475                     expand_bytes / (double) K,
1476                     MinMetaspaceExpansion / (double) K,
1477                     new_capacity_until_GC / (double) K);
1478     }
1479     return;
1480   }
1481 
1482   // No expansion, now see if we want to shrink
1483   // We would never want to shrink more than this
1484   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1485   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1486     max_shrink_bytes));
1487 
1488   // Should shrinking be considered?
1489   if (MaxMetaspaceFreeRatio < 100) {
1490     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1491     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1492     const double max_tmp = used_after_gc / minimum_used_percentage;
1493     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1494     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1495                                     MetaspaceSize);
1496     if (PrintGCDetails && Verbose) {
1497       gclog_or_tty->print_cr("  "
1498                              "  maximum_free_percentage: %6.2f"
1499                              "  minimum_used_percentage: %6.2f",
1500                              maximum_free_percentage,
1501                              minimum_used_percentage);
1502       gclog_or_tty->print_cr("  "
1503                              "  minimum_desired_capacity: %6.1fKB"
1504                              "  maximum_desired_capacity: %6.1fKB",
1505                              minimum_desired_capacity / (double) K,
1506                              maximum_desired_capacity / (double) K);
1507     }
1508 
1509     assert(minimum_desired_capacity <= maximum_desired_capacity,
1510            "sanity check");
1511 
1512     if (capacity_until_GC > maximum_desired_capacity) {
1513       // Capacity too large, compute shrinking size
1514       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1515       // We don't want shrink all the way back to initSize if people call
1516       // System.gc(), because some programs do that between "phases" and then
1517       // we'd just have to grow the heap up again for the next phase.  So we
1518       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1519       // on the third call, and 100% by the fourth call.  But if we recompute
1520       // size without shrinking, it goes back to 0%.
1521       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1522 
1523       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1524 
1525       assert(shrink_bytes <= max_shrink_bytes,
1526         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1527           shrink_bytes, max_shrink_bytes));
1528       if (current_shrink_factor == 0) {
1529         _shrink_factor = 10;
1530       } else {
1531         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1532       }
1533       if (PrintGCDetails && Verbose) {
1534         gclog_or_tty->print_cr("  "
1535                       "  shrinking:"
1536                       "  initSize: %.1fK"
1537                       "  maximum_desired_capacity: %.1fK",
1538                       MetaspaceSize / (double) K,
1539                       maximum_desired_capacity / (double) K);
1540         gclog_or_tty->print_cr("  "
1541                       "  shrink_bytes: %.1fK"
1542                       "  current_shrink_factor: %d"
1543                       "  new shrink factor: %d"
1544                       "  MinMetaspaceExpansion: %.1fK",
1545                       shrink_bytes / (double) K,
1546                       current_shrink_factor,
1547                       _shrink_factor,
1548                       MinMetaspaceExpansion / (double) K);
1549       }
1550     }
1551   }
1552 
1553   // Don't shrink unless it's significant
1554   if (shrink_bytes >= MinMetaspaceExpansion &&
1555       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1556     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1557   }
1558 }
1559 
1560 // Metadebug methods
1561 
1562 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1563                                        size_t chunk_word_size){
1564 #ifdef ASSERT
1565   VirtualSpaceList* vsl = sm->vs_list();
1566   if (MetaDataDeallocateALot &&
1567       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1568     Metadebug::reset_deallocate_chunk_a_lot_count();
1569     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1570       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1571       if (dummy_chunk == NULL) {
1572         break;
1573       }
1574       sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1575 
1576       if (TraceMetadataChunkAllocation && Verbose) {
1577         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1578                                sm->sum_count_in_chunks_in_use());
1579         dummy_chunk->print_on(gclog_or_tty);
1580         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
1581                                sm->chunk_manager()->free_chunks_total_words(),
1582                                sm->chunk_manager()->free_chunks_count());
1583       }
1584     }
1585   } else {
1586     Metadebug::inc_deallocate_chunk_a_lot_count();
1587   }
1588 #endif
1589 }
1590 
1591 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1592                                        size_t raw_word_size){
1593 #ifdef ASSERT
1594   if (MetaDataDeallocateALot &&
1595         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1596     Metadebug::set_deallocate_block_a_lot_count(0);
1597     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1598       MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1599       if (dummy_block == 0) {
1600         break;
1601       }
1602       sm->deallocate(dummy_block, raw_word_size);
1603     }
1604   } else {
1605     Metadebug::inc_deallocate_block_a_lot_count();
1606   }
1607 #endif
1608 }
1609 
1610 void Metadebug::init_allocation_fail_alot_count() {
1611   if (MetadataAllocationFailALot) {
1612     _allocation_fail_alot_count =
1613       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1614   }
1615 }
1616 
1617 #ifdef ASSERT
1618 bool Metadebug::test_metadata_failure() {
1619   if (MetadataAllocationFailALot &&
1620       Threads::is_vm_complete()) {
1621     if (_allocation_fail_alot_count > 0) {
1622       _allocation_fail_alot_count--;
1623     } else {
1624       if (TraceMetadataChunkAllocation && Verbose) {
1625         gclog_or_tty->print_cr("Metadata allocation failing for "
1626                                "MetadataAllocationFailALot");
1627       }
1628       init_allocation_fail_alot_count();
1629       return true;
1630     }
1631   }
1632   return false;
1633 }
1634 #endif
1635 
1636 // ChunkManager methods
1637 
1638 size_t ChunkManager::free_chunks_total_words() {
1639   return _free_chunks_total;
1640 }
1641 
1642 size_t ChunkManager::free_chunks_total_bytes() {
1643   return free_chunks_total_words() * BytesPerWord;
1644 }
1645 
1646 size_t ChunkManager::free_chunks_count() {
1647 #ifdef ASSERT
1648   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1649     MutexLockerEx cl(SpaceManager::expand_lock(),
1650                      Mutex::_no_safepoint_check_flag);
1651     // This lock is only needed in debug because the verification
1652     // of the _free_chunks_totals walks the list of free chunks
1653     slow_locked_verify_free_chunks_count();
1654   }
1655 #endif
1656   return _free_chunks_count;
1657 }
1658 
1659 void ChunkManager::locked_verify_free_chunks_total() {
1660   assert_lock_strong(SpaceManager::expand_lock());
1661   assert(sum_free_chunks() == _free_chunks_total,
1662     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1663            " same as sum " SIZE_FORMAT, _free_chunks_total,
1664            sum_free_chunks()));
1665 }
1666 
1667 void ChunkManager::verify_free_chunks_total() {
1668   MutexLockerEx cl(SpaceManager::expand_lock(),
1669                      Mutex::_no_safepoint_check_flag);
1670   locked_verify_free_chunks_total();
1671 }
1672 
1673 void ChunkManager::locked_verify_free_chunks_count() {
1674   assert_lock_strong(SpaceManager::expand_lock());
1675   assert(sum_free_chunks_count() == _free_chunks_count,
1676     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1677            " same as sum " SIZE_FORMAT, _free_chunks_count,
1678            sum_free_chunks_count()));
1679 }
1680 
1681 void ChunkManager::verify_free_chunks_count() {
1682 #ifdef ASSERT
1683   MutexLockerEx cl(SpaceManager::expand_lock(),
1684                      Mutex::_no_safepoint_check_flag);
1685   locked_verify_free_chunks_count();
1686 #endif
1687 }
1688 
1689 void ChunkManager::verify() {
1690   MutexLockerEx cl(SpaceManager::expand_lock(),
1691                      Mutex::_no_safepoint_check_flag);
1692   locked_verify();
1693 }
1694 
1695 void ChunkManager::locked_verify() {
1696   locked_verify_free_chunks_count();
1697   locked_verify_free_chunks_total();
1698 }
1699 
1700 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1701   assert_lock_strong(SpaceManager::expand_lock());
1702   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1703                 _free_chunks_total, _free_chunks_count);
1704 }
1705 
1706 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1707   assert_lock_strong(SpaceManager::expand_lock());
1708   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1709                 sum_free_chunks(), sum_free_chunks_count());
1710 }
1711 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1712   return &_free_chunks[index];
1713 }
1714 
1715 // These methods that sum the free chunk lists are used in printing
1716 // methods that are used in product builds.
1717 size_t ChunkManager::sum_free_chunks() {
1718   assert_lock_strong(SpaceManager::expand_lock());
1719   size_t result = 0;
1720   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1721     ChunkList* list = free_chunks(i);
1722 
1723     if (list == NULL) {
1724       continue;
1725     }
1726 
1727     result = result + list->count() * list->size();
1728   }
1729   result = result + humongous_dictionary()->total_size();
1730   return result;
1731 }
1732 
1733 size_t ChunkManager::sum_free_chunks_count() {
1734   assert_lock_strong(SpaceManager::expand_lock());
1735   size_t count = 0;
1736   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1737     ChunkList* list = free_chunks(i);
1738     if (list == NULL) {
1739       continue;
1740     }
1741     count = count + list->count();
1742   }
1743   count = count + humongous_dictionary()->total_free_blocks();
1744   return count;
1745 }
1746 
1747 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1748   ChunkIndex index = list_index(word_size);
1749   assert(index < HumongousIndex, "No humongous list");
1750   return free_chunks(index);
1751 }
1752 
1753 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1754   assert_lock_strong(SpaceManager::expand_lock());
1755   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1756   chunk->set_next(free_list->head());
1757   free_list->set_head(chunk);
1758   // chunk is being returned to the chunk free list
1759   inc_free_chunks_total(chunk->capacity_word_size());
1760   slow_locked_verify();
1761 }
1762 
1763 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1764   // The deallocation of a chunk originates in the freelist
1765   // manangement code for a Metaspace and does not hold the
1766   // lock.
1767   assert(chunk != NULL, "Deallocating NULL");
1768   assert_lock_strong(SpaceManager::expand_lock());
1769   slow_locked_verify();
1770   if (TraceMetadataChunkAllocation) {
1771     gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1772                            PTR_FORMAT "  size " SIZE_FORMAT,
1773                            chunk, chunk->word_size());
1774   }
1775   free_chunks_put(chunk);
1776 }
1777 
1778 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1779   assert_lock_strong(SpaceManager::expand_lock());
1780 
1781   slow_locked_verify();
1782 
1783   Metachunk* chunk = NULL;
1784   if (list_index(word_size) != HumongousIndex) {
1785     ChunkList* free_list = find_free_chunks_list(word_size);
1786     assert(free_list != NULL, "Sanity check");
1787 
1788     chunk = free_list->head();
1789 
1790     if (chunk == NULL) {
1791       return NULL;
1792     }
1793 
1794     // Remove the chunk as the head of the list.
1795     free_list->remove_chunk(chunk);
1796 
1797     if (TraceMetadataChunkAllocation && Verbose) {
1798       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1799                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1800                              free_list, chunk, chunk->word_size());
1801     }
1802   } else {
1803     chunk = humongous_dictionary()->get_chunk(
1804       word_size,
1805       FreeBlockDictionary<Metachunk>::atLeast);
1806 
1807     if (chunk == NULL) {
1808       return NULL;
1809     }
1810 
1811     if (TraceMetadataHumongousAllocation) {
1812       size_t waste = chunk->word_size() - word_size;
1813       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1814                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1815                              " waste " SIZE_FORMAT,
1816                              chunk->word_size(), word_size, waste);
1817     }
1818   }
1819 
1820   // Chunk is being removed from the chunks free list.
1821   dec_free_chunks_total(chunk->capacity_word_size());
1822 
1823   // Remove it from the links to this freelist
1824   chunk->set_next(NULL);
1825   chunk->set_prev(NULL);
1826 #ifdef ASSERT
1827   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1828   // work.
1829   chunk->set_is_free(false);
1830 #endif
1831   chunk->container()->inc_container_count();
1832 
1833   slow_locked_verify();
1834   return chunk;
1835 }
1836 
1837 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1838   assert_lock_strong(SpaceManager::expand_lock());
1839   slow_locked_verify();
1840 
1841   // Take from the beginning of the list
1842   Metachunk* chunk = free_chunks_get(word_size);
1843   if (chunk == NULL) {
1844     return NULL;
1845   }
1846 
1847   assert((word_size <= chunk->word_size()) ||
1848          list_index(chunk->word_size() == HumongousIndex),
1849          "Non-humongous variable sized chunk");
1850   if (TraceMetadataChunkAllocation) {
1851     size_t list_count;
1852     if (list_index(word_size) < HumongousIndex) {
1853       ChunkList* list = find_free_chunks_list(word_size);
1854       list_count = list->count();
1855     } else {
1856       list_count = humongous_dictionary()->total_count();
1857     }
1858     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1859                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1860                         this, chunk, chunk->word_size(), list_count);
1861     locked_print_free_chunks(gclog_or_tty);
1862   }
1863 
1864   return chunk;
1865 }
1866 
1867 void ChunkManager::print_on(outputStream* out) const {
1868   if (PrintFLSStatistics != 0) {
1869     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1870   }
1871 }
1872 
1873 // SpaceManager methods
1874 
1875 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1876                                            size_t* chunk_word_size,
1877                                            size_t* class_chunk_word_size) {
1878   switch (type) {
1879   case Metaspace::BootMetaspaceType:
1880     *chunk_word_size = Metaspace::first_chunk_word_size();
1881     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1882     break;
1883   case Metaspace::ROMetaspaceType:
1884     *chunk_word_size = SharedReadOnlySize / wordSize;
1885     *class_chunk_word_size = ClassSpecializedChunk;
1886     break;
1887   case Metaspace::ReadWriteMetaspaceType:
1888     *chunk_word_size = SharedReadWriteSize / wordSize;
1889     *class_chunk_word_size = ClassSpecializedChunk;
1890     break;
1891   case Metaspace::AnonymousMetaspaceType:
1892   case Metaspace::ReflectionMetaspaceType:
1893     *chunk_word_size = SpecializedChunk;
1894     *class_chunk_word_size = ClassSpecializedChunk;
1895     break;
1896   default:
1897     *chunk_word_size = SmallChunk;
1898     *class_chunk_word_size = ClassSmallChunk;
1899     break;
1900   }
1901   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1902     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1903             " class " SIZE_FORMAT,
1904             *chunk_word_size, *class_chunk_word_size));
1905 }
1906 
1907 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1908   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1909   size_t free = 0;
1910   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1911     Metachunk* chunk = chunks_in_use(i);
1912     while (chunk != NULL) {
1913       free += chunk->free_word_size();
1914       chunk = chunk->next();
1915     }
1916   }
1917   return free;
1918 }
1919 
1920 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1921   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1922   size_t result = 0;
1923   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1924    result += sum_waste_in_chunks_in_use(i);
1925   }
1926 
1927   return result;
1928 }
1929 
1930 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1931   size_t result = 0;
1932   Metachunk* chunk = chunks_in_use(index);
1933   // Count the free space in all the chunk but not the
1934   // current chunk from which allocations are still being done.
1935   while (chunk != NULL) {
1936     if (chunk != current_chunk()) {
1937       result += chunk->free_word_size();
1938     }
1939     chunk = chunk->next();
1940   }
1941   return result;
1942 }
1943 
1944 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1945   // For CMS use "allocated_chunks_words()" which does not need the
1946   // Metaspace lock.  For the other collectors sum over the
1947   // lists.  Use both methods as a check that "allocated_chunks_words()"
1948   // is correct.  That is, sum_capacity_in_chunks() is too expensive
1949   // to use in the product and allocated_chunks_words() should be used
1950   // but allow for  checking that allocated_chunks_words() returns the same
1951   // value as sum_capacity_in_chunks_in_use() which is the definitive
1952   // answer.
1953   if (UseConcMarkSweepGC) {
1954     return allocated_chunks_words();
1955   } else {
1956     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1957     size_t sum = 0;
1958     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1959       Metachunk* chunk = chunks_in_use(i);
1960       while (chunk != NULL) {
1961         sum += chunk->capacity_word_size();
1962         chunk = chunk->next();
1963       }
1964     }
1965   return sum;
1966   }
1967 }
1968 
1969 size_t SpaceManager::sum_count_in_chunks_in_use() {
1970   size_t count = 0;
1971   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1972     count = count + sum_count_in_chunks_in_use(i);
1973   }
1974 
1975   return count;
1976 }
1977 
1978 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1979   size_t count = 0;
1980   Metachunk* chunk = chunks_in_use(i);
1981   while (chunk != NULL) {
1982     count++;
1983     chunk = chunk->next();
1984   }
1985   return count;
1986 }
1987 
1988 
1989 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1990   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1991   size_t used = 0;
1992   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1993     Metachunk* chunk = chunks_in_use(i);
1994     while (chunk != NULL) {
1995       used += chunk->used_word_size();
1996       chunk = chunk->next();
1997     }
1998   }
1999   return used;
2000 }
2001 
2002 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2003 
2004   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2005     Metachunk* chunk = chunks_in_use(i);
2006     st->print("SpaceManager: %s " PTR_FORMAT,
2007                  chunk_size_name(i), chunk);
2008     if (chunk != NULL) {
2009       st->print_cr(" free " SIZE_FORMAT,
2010                    chunk->free_word_size());
2011     } else {
2012       st->print_cr("");
2013     }
2014   }
2015 
2016   chunk_manager()->locked_print_free_chunks(st);
2017   chunk_manager()->locked_print_sum_free_chunks(st);
2018 }
2019 
2020 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2021 
2022   // Decide between a small chunk and a medium chunk.  Up to
2023   // _small_chunk_limit small chunks can be allocated but
2024   // once a medium chunk has been allocated, no more small
2025   // chunks will be allocated.
2026   size_t chunk_word_size;
2027   if (chunks_in_use(MediumIndex) == NULL &&
2028       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2029     chunk_word_size = (size_t) small_chunk_size();
2030     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2031       chunk_word_size = medium_chunk_size();
2032     }
2033   } else {
2034     chunk_word_size = medium_chunk_size();
2035   }
2036 
2037   // Might still need a humongous chunk.  Enforce an
2038   // eight word granularity to facilitate reuse (some
2039   // wastage but better chance of reuse).
2040   size_t if_humongous_sized_chunk =
2041     align_size_up(word_size + Metachunk::overhead(),
2042                   HumongousChunkGranularity);
2043   chunk_word_size =
2044     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2045 
2046   assert(!SpaceManager::is_humongous(word_size) ||
2047          chunk_word_size == if_humongous_sized_chunk,
2048          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2049                  " chunk_word_size " SIZE_FORMAT,
2050                  word_size, chunk_word_size));
2051   if (TraceMetadataHumongousAllocation &&
2052       SpaceManager::is_humongous(word_size)) {
2053     gclog_or_tty->print_cr("Metadata humongous allocation:");
2054     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2055     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2056                            chunk_word_size);
2057     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2058                            Metachunk::overhead());
2059   }
2060   return chunk_word_size;
2061 }
2062 
2063 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2064   assert(vs_list()->current_virtual_space() != NULL,
2065          "Should have been set");
2066   assert(current_chunk() == NULL ||
2067          current_chunk()->allocate(word_size) == NULL,
2068          "Don't need to expand");
2069   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2070 
2071   if (TraceMetadataChunkAllocation && Verbose) {
2072     size_t words_left = 0;
2073     size_t words_used = 0;
2074     if (current_chunk() != NULL) {
2075       words_left = current_chunk()->free_word_size();
2076       words_used = current_chunk()->used_word_size();
2077     }
2078     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2079                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2080                            " words left",
2081                             word_size, words_used, words_left);
2082   }
2083 
2084   // Get another chunk out of the virtual space
2085   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2086   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2087 
2088   if (next != NULL) {
2089     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2090   }
2091 
2092   MetaWord* mem = NULL;
2093 
2094   // If a chunk was available, add it to the in-use chunk list
2095   // and do an allocation from it.
2096   if (next != NULL) {
2097     // Add to this manager's list of chunks in use.
2098     add_chunk(next, false);
2099     mem = next->allocate(word_size);
2100   }
2101 
2102   return mem;
2103 }
2104 
2105 void SpaceManager::print_on(outputStream* st) const {
2106 
2107   for (ChunkIndex i = ZeroIndex;
2108        i < NumberOfInUseLists ;
2109        i = next_chunk_index(i) ) {
2110     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2111                  chunks_in_use(i),
2112                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2113   }
2114   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2115                " Humongous " SIZE_FORMAT,
2116                sum_waste_in_chunks_in_use(SmallIndex),
2117                sum_waste_in_chunks_in_use(MediumIndex),
2118                sum_waste_in_chunks_in_use(HumongousIndex));
2119   // block free lists
2120   if (block_freelists() != NULL) {
2121     st->print_cr("total in block free lists " SIZE_FORMAT,
2122       block_freelists()->total_size());
2123   }
2124 }
2125 
2126 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2127                            Mutex* lock) :
2128   _mdtype(mdtype),
2129   _allocated_blocks_words(0),
2130   _allocated_chunks_words(0),
2131   _allocated_chunks_count(0),
2132   _lock(lock)
2133 {
2134   initialize();
2135 }
2136 
2137 void SpaceManager::inc_size_metrics(size_t words) {
2138   assert_lock_strong(SpaceManager::expand_lock());
2139   // Total of allocated Metachunks and allocated Metachunks count
2140   // for each SpaceManager
2141   _allocated_chunks_words = _allocated_chunks_words + words;
2142   _allocated_chunks_count++;
2143   // Global total of capacity in allocated Metachunks
2144   MetaspaceAux::inc_capacity(mdtype(), words);
2145   // Global total of allocated Metablocks.
2146   // used_words_slow() includes the overhead in each
2147   // Metachunk so include it in the used when the
2148   // Metachunk is first added (so only added once per
2149   // Metachunk).
2150   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2151 }
2152 
2153 void SpaceManager::inc_used_metrics(size_t words) {
2154   // Add to the per SpaceManager total
2155   Atomic::add_ptr(words, &_allocated_blocks_words);
2156   // Add to the global total
2157   MetaspaceAux::inc_used(mdtype(), words);
2158 }
2159 
2160 void SpaceManager::dec_total_from_size_metrics() {
2161   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2162   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2163   // Also deduct the overhead per Metachunk
2164   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2165 }
2166 
2167 void SpaceManager::initialize() {
2168   Metadebug::init_allocation_fail_alot_count();
2169   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2170     _chunks_in_use[i] = NULL;
2171   }
2172   _current_chunk = NULL;
2173   if (TraceMetadataChunkAllocation && Verbose) {
2174     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2175   }
2176 }
2177 
2178 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2179   if (chunks == NULL) {
2180     return;
2181   }
2182   ChunkList* list = free_chunks(index);
2183   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2184   assert_lock_strong(SpaceManager::expand_lock());
2185   Metachunk* cur = chunks;
2186 
2187   // This returns chunks one at a time.  If a new
2188   // class List can be created that is a base class
2189   // of FreeList then something like FreeList::prepend()
2190   // can be used in place of this loop
2191   while (cur != NULL) {
2192     assert(cur->container() != NULL, "Container should have been set");
2193     cur->container()->dec_container_count();
2194     // Capture the next link before it is changed
2195     // by the call to return_chunk_at_head();
2196     Metachunk* next = cur->next();
2197     cur->set_is_free(true);
2198     list->return_chunk_at_head(cur);
2199     cur = next;
2200   }
2201 }
2202 
2203 SpaceManager::~SpaceManager() {
2204   // This call this->_lock which can't be done while holding expand_lock()
2205   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2206     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2207             " allocated_chunks_words() " SIZE_FORMAT,
2208             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2209 
2210   MutexLockerEx fcl(SpaceManager::expand_lock(),
2211                     Mutex::_no_safepoint_check_flag);
2212 
2213   chunk_manager()->slow_locked_verify();
2214 
2215   dec_total_from_size_metrics();
2216 
2217   if (TraceMetadataChunkAllocation && Verbose) {
2218     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2219     locked_print_chunks_in_use_on(gclog_or_tty);
2220   }
2221 
2222   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2223   // is during the freeing of a VirtualSpaceNodes.
2224 
2225   // Have to update before the chunks_in_use lists are emptied
2226   // below.
2227   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2228                                          sum_count_in_chunks_in_use());
2229 
2230   // Add all the chunks in use by this space manager
2231   // to the global list of free chunks.
2232 
2233   // Follow each list of chunks-in-use and add them to the
2234   // free lists.  Each list is NULL terminated.
2235 
2236   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2237     if (TraceMetadataChunkAllocation && Verbose) {
2238       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2239                              sum_count_in_chunks_in_use(i),
2240                              chunk_size_name(i));
2241     }
2242     Metachunk* chunks = chunks_in_use(i);
2243     chunk_manager()->return_chunks(i, chunks);
2244     set_chunks_in_use(i, NULL);
2245     if (TraceMetadataChunkAllocation && Verbose) {
2246       gclog_or_tty->print_cr("updated freelist count %d %s",
2247                              chunk_manager()->free_chunks(i)->count(),
2248                              chunk_size_name(i));
2249     }
2250     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2251   }
2252 
2253   // The medium chunk case may be optimized by passing the head and
2254   // tail of the medium chunk list to add_at_head().  The tail is often
2255   // the current chunk but there are probably exceptions.
2256 
2257   // Humongous chunks
2258   if (TraceMetadataChunkAllocation && Verbose) {
2259     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2260                             sum_count_in_chunks_in_use(HumongousIndex),
2261                             chunk_size_name(HumongousIndex));
2262     gclog_or_tty->print("Humongous chunk dictionary: ");
2263   }
2264   // Humongous chunks are never the current chunk.
2265   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2266 
2267   while (humongous_chunks != NULL) {
2268 #ifdef ASSERT
2269     humongous_chunks->set_is_free(true);
2270 #endif
2271     if (TraceMetadataChunkAllocation && Verbose) {
2272       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2273                           humongous_chunks,
2274                           humongous_chunks->word_size());
2275     }
2276     assert(humongous_chunks->word_size() == (size_t)
2277            align_size_up(humongous_chunks->word_size(),
2278                              HumongousChunkGranularity),
2279            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2280                    " granularity %d",
2281                    humongous_chunks->word_size(), HumongousChunkGranularity));
2282     Metachunk* next_humongous_chunks = humongous_chunks->next();
2283     humongous_chunks->container()->dec_container_count();
2284     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2285     humongous_chunks = next_humongous_chunks;
2286   }
2287   if (TraceMetadataChunkAllocation && Verbose) {
2288     gclog_or_tty->print_cr("");
2289     gclog_or_tty->print_cr("updated dictionary count %d %s",
2290                      chunk_manager()->humongous_dictionary()->total_count(),
2291                      chunk_size_name(HumongousIndex));
2292   }
2293   chunk_manager()->slow_locked_verify();
2294 }
2295 
2296 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2297   switch (index) {
2298     case SpecializedIndex:
2299       return "Specialized";
2300     case SmallIndex:
2301       return "Small";
2302     case MediumIndex:
2303       return "Medium";
2304     case HumongousIndex:
2305       return "Humongous";
2306     default:
2307       return NULL;
2308   }
2309 }
2310 
2311 ChunkIndex ChunkManager::list_index(size_t size) {
2312   switch (size) {
2313     case SpecializedChunk:
2314       assert(SpecializedChunk == ClassSpecializedChunk,
2315              "Need branch for ClassSpecializedChunk");
2316       return SpecializedIndex;
2317     case SmallChunk:
2318     case ClassSmallChunk:
2319       return SmallIndex;
2320     case MediumChunk:
2321     case ClassMediumChunk:
2322       return MediumIndex;
2323     default:
2324       assert(size > MediumChunk || size > ClassMediumChunk,
2325              "Not a humongous chunk");
2326       return HumongousIndex;
2327   }
2328 }
2329 
2330 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2331   assert_lock_strong(_lock);
2332   size_t raw_word_size = get_raw_word_size(word_size);
2333   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2334   assert(raw_word_size >= min_size,
2335          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2336   block_freelists()->return_block(p, raw_word_size);
2337 }
2338 
2339 // Adds a chunk to the list of chunks in use.
2340 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2341 
2342   assert(new_chunk != NULL, "Should not be NULL");
2343   assert(new_chunk->next() == NULL, "Should not be on a list");
2344 
2345   new_chunk->reset_empty();
2346 
2347   // Find the correct list and and set the current
2348   // chunk for that list.
2349   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2350 
2351   if (index != HumongousIndex) {
2352     retire_current_chunk();
2353     set_current_chunk(new_chunk);
2354     new_chunk->set_next(chunks_in_use(index));
2355     set_chunks_in_use(index, new_chunk);
2356   } else {
2357     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2358     // small, so small will be null.  Link this first chunk as the current
2359     // chunk.
2360     if (make_current) {
2361       // Set as the current chunk but otherwise treat as a humongous chunk.
2362       set_current_chunk(new_chunk);
2363     }
2364     // Link at head.  The _current_chunk only points to a humongous chunk for
2365     // the null class loader metaspace (class and data virtual space managers)
2366     // any humongous chunks so will not point to the tail
2367     // of the humongous chunks list.
2368     new_chunk->set_next(chunks_in_use(HumongousIndex));
2369     set_chunks_in_use(HumongousIndex, new_chunk);
2370 
2371     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2372   }
2373 
2374   // Add to the running sum of capacity
2375   inc_size_metrics(new_chunk->word_size());
2376 
2377   assert(new_chunk->is_empty(), "Not ready for reuse");
2378   if (TraceMetadataChunkAllocation && Verbose) {
2379     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2380                         sum_count_in_chunks_in_use());
2381     new_chunk->print_on(gclog_or_tty);
2382     chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2383   }
2384 }
2385 
2386 void SpaceManager::retire_current_chunk() {
2387   if (current_chunk() != NULL) {
2388     size_t remaining_words = current_chunk()->free_word_size();
2389     if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
2390       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2391       inc_used_metrics(remaining_words);
2392     }
2393   }
2394 }
2395 
2396 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2397                                        size_t grow_chunks_by_words) {
2398   // Get a chunk from the chunk freelist
2399   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2400 
2401   if (next == NULL) {
2402     next = vs_list()->get_new_chunk(word_size,
2403                                     grow_chunks_by_words,
2404                                     medium_chunk_bunch());
2405   }
2406 
2407   if (TraceMetadataHumongousAllocation && next != NULL &&
2408       SpaceManager::is_humongous(next->word_size())) {
2409     gclog_or_tty->print_cr("  new humongous chunk word size "
2410                            PTR_FORMAT, next->word_size());
2411   }
2412 
2413   return next;
2414 }
2415 
2416 MetaWord* SpaceManager::allocate(size_t word_size) {
2417   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2418 
2419   size_t raw_word_size = get_raw_word_size(word_size);
2420   BlockFreelist* fl =  block_freelists();
2421   MetaWord* p = NULL;
2422   // Allocation from the dictionary is expensive in the sense that
2423   // the dictionary has to be searched for a size.  Don't allocate
2424   // from the dictionary until it starts to get fat.  Is this
2425   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2426   // for allocations.  Do some profiling.  JJJ
2427   if (fl->total_size() > allocation_from_dictionary_limit) {
2428     p = fl->get_block(raw_word_size);
2429   }
2430   if (p == NULL) {
2431     p = allocate_work(raw_word_size);
2432   }
2433   Metadebug::deallocate_block_a_lot(this, raw_word_size);
2434 
2435   return p;
2436 }
2437 
2438 // Returns the address of spaced allocated for "word_size".
2439 // This methods does not know about blocks (Metablocks)
2440 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2441   assert_lock_strong(_lock);
2442 #ifdef ASSERT
2443   if (Metadebug::test_metadata_failure()) {
2444     return NULL;
2445   }
2446 #endif
2447   // Is there space in the current chunk?
2448   MetaWord* result = NULL;
2449 
2450   // For DumpSharedSpaces, only allocate out of the current chunk which is
2451   // never null because we gave it the size we wanted.   Caller reports out
2452   // of memory if this returns null.
2453   if (DumpSharedSpaces) {
2454     assert(current_chunk() != NULL, "should never happen");
2455     inc_used_metrics(word_size);
2456     return current_chunk()->allocate(word_size); // caller handles null result
2457   }
2458 
2459   if (current_chunk() != NULL) {
2460     result = current_chunk()->allocate(word_size);
2461   }
2462 
2463   if (result == NULL) {
2464     result = grow_and_allocate(word_size);
2465   }
2466 
2467   if (result != NULL) {
2468     inc_used_metrics(word_size);
2469     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2470            "Head of the list is being allocated");
2471   }
2472 
2473   return result;
2474 }
2475 
2476 void SpaceManager::verify() {
2477   // If there are blocks in the dictionary, then
2478   // verfication of chunks does not work since
2479   // being in the dictionary alters a chunk.
2480   if (block_freelists()->total_size() == 0) {
2481     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2482       Metachunk* curr = chunks_in_use(i);
2483       while (curr != NULL) {
2484         curr->verify();
2485         verify_chunk_size(curr);
2486         curr = curr->next();
2487       }
2488     }
2489   }
2490 }
2491 
2492 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2493   assert(is_humongous(chunk->word_size()) ||
2494          chunk->word_size() == medium_chunk_size() ||
2495          chunk->word_size() == small_chunk_size() ||
2496          chunk->word_size() == specialized_chunk_size(),
2497          "Chunk size is wrong");
2498   return;
2499 }
2500 
2501 #ifdef ASSERT
2502 void SpaceManager::verify_allocated_blocks_words() {
2503   // Verification is only guaranteed at a safepoint.
2504   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2505     "Verification can fail if the applications is running");
2506   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2507     err_msg("allocation total is not consistent " SIZE_FORMAT
2508             " vs " SIZE_FORMAT,
2509             allocated_blocks_words(), sum_used_in_chunks_in_use()));
2510 }
2511 
2512 #endif
2513 
2514 void SpaceManager::dump(outputStream* const out) const {
2515   size_t curr_total = 0;
2516   size_t waste = 0;
2517   uint i = 0;
2518   size_t used = 0;
2519   size_t capacity = 0;
2520 
2521   // Add up statistics for all chunks in this SpaceManager.
2522   for (ChunkIndex index = ZeroIndex;
2523        index < NumberOfInUseLists;
2524        index = next_chunk_index(index)) {
2525     for (Metachunk* curr = chunks_in_use(index);
2526          curr != NULL;
2527          curr = curr->next()) {
2528       out->print("%d) ", i++);
2529       curr->print_on(out);
2530       curr_total += curr->word_size();
2531       used += curr->used_word_size();
2532       capacity += curr->capacity_word_size();
2533       waste += curr->free_word_size() + curr->overhead();;
2534     }
2535   }
2536 
2537   if (TraceMetadataChunkAllocation && Verbose) {
2538     block_freelists()->print_on(out);
2539   }
2540 
2541   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2542   // Free space isn't wasted.
2543   waste -= free;
2544 
2545   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2546                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2547                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2548 }
2549 
2550 #ifndef PRODUCT
2551 void SpaceManager::mangle_freed_chunks() {
2552   for (ChunkIndex index = ZeroIndex;
2553        index < NumberOfInUseLists;
2554        index = next_chunk_index(index)) {
2555     for (Metachunk* curr = chunks_in_use(index);
2556          curr != NULL;
2557          curr = curr->next()) {
2558       curr->mangle();
2559     }
2560   }
2561 }
2562 #endif // PRODUCT
2563 
2564 // MetaspaceAux
2565 
2566 
2567 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2568 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2569 
2570 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2571   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2572   return list == NULL ? 0 : list->free_bytes();
2573 }
2574 
2575 size_t MetaspaceAux::free_bytes() {
2576   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2577 }
2578 
2579 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2580   assert_lock_strong(SpaceManager::expand_lock());
2581   assert(words <= allocated_capacity_words(mdtype),
2582     err_msg("About to decrement below 0: words " SIZE_FORMAT
2583             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2584             words, mdtype, allocated_capacity_words(mdtype)));
2585   _allocated_capacity_words[mdtype] -= words;
2586 }
2587 
2588 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2589   assert_lock_strong(SpaceManager::expand_lock());
2590   // Needs to be atomic
2591   _allocated_capacity_words[mdtype] += words;
2592 }
2593 
2594 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2595   assert(words <= allocated_used_words(mdtype),
2596     err_msg("About to decrement below 0: words " SIZE_FORMAT
2597             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2598             words, mdtype, allocated_used_words(mdtype)));
2599   // For CMS deallocation of the Metaspaces occurs during the
2600   // sweep which is a concurrent phase.  Protection by the expand_lock()
2601   // is not enough since allocation is on a per Metaspace basis
2602   // and protected by the Metaspace lock.
2603   jlong minus_words = (jlong) - (jlong) words;
2604   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2605 }
2606 
2607 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2608   // _allocated_used_words tracks allocations for
2609   // each piece of metadata.  Those allocations are
2610   // generally done concurrently by different application
2611   // threads so must be done atomically.
2612   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2613 }
2614 
2615 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2616   size_t used = 0;
2617   ClassLoaderDataGraphMetaspaceIterator iter;
2618   while (iter.repeat()) {
2619     Metaspace* msp = iter.get_next();
2620     // Sum allocated_blocks_words for each metaspace
2621     if (msp != NULL) {
2622       used += msp->used_words_slow(mdtype);
2623     }
2624   }
2625   return used * BytesPerWord;
2626 }
2627 
2628 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2629   size_t free = 0;
2630   ClassLoaderDataGraphMetaspaceIterator iter;
2631   while (iter.repeat()) {
2632     Metaspace* msp = iter.get_next();
2633     if (msp != NULL) {
2634       free += msp->free_words_slow(mdtype);
2635     }
2636   }
2637   return free * BytesPerWord;
2638 }
2639 
2640 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2641   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2642     return 0;
2643   }
2644   // Don't count the space in the freelists.  That space will be
2645   // added to the capacity calculation as needed.
2646   size_t capacity = 0;
2647   ClassLoaderDataGraphMetaspaceIterator iter;
2648   while (iter.repeat()) {
2649     Metaspace* msp = iter.get_next();
2650     if (msp != NULL) {
2651       capacity += msp->capacity_words_slow(mdtype);
2652     }
2653   }
2654   return capacity * BytesPerWord;
2655 }
2656 
2657 size_t MetaspaceAux::capacity_bytes_slow() {
2658 #ifdef PRODUCT
2659   // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2660   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2661 #endif
2662   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2663   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2664   assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2665       err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2666         " class_capacity + non_class_capacity " SIZE_FORMAT
2667         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2668         allocated_capacity_bytes(), class_capacity + non_class_capacity,
2669         class_capacity, non_class_capacity));
2670 
2671   return class_capacity + non_class_capacity;
2672 }
2673 
2674 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2675   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2676   return list == NULL ? 0 : list->reserved_bytes();
2677 }
2678 
2679 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2680   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2681   return list == NULL ? 0 : list->committed_bytes();
2682 }
2683 
2684 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2685 
2686 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2687   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2688   if (chunk_manager == NULL) {
2689     return 0;
2690   }
2691   chunk_manager->slow_verify();
2692   return chunk_manager->free_chunks_total_words();
2693 }
2694 
2695 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2696   return free_chunks_total_words(mdtype) * BytesPerWord;
2697 }
2698 
2699 size_t MetaspaceAux::free_chunks_total_words() {
2700   return free_chunks_total_words(Metaspace::ClassType) +
2701          free_chunks_total_words(Metaspace::NonClassType);
2702 }
2703 
2704 size_t MetaspaceAux::free_chunks_total_bytes() {
2705   return free_chunks_total_words() * BytesPerWord;
2706 }
2707 
2708 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2709   gclog_or_tty->print(", [Metaspace:");
2710   if (PrintGCDetails && Verbose) {
2711     gclog_or_tty->print(" "  SIZE_FORMAT
2712                         "->" SIZE_FORMAT
2713                         "("  SIZE_FORMAT ")",
2714                         prev_metadata_used,
2715                         allocated_used_bytes(),
2716                         reserved_bytes());
2717   } else {
2718     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2719                         "->" SIZE_FORMAT "K"
2720                         "("  SIZE_FORMAT "K)",
2721                         prev_metadata_used/K,
2722                         allocated_used_bytes()/K,
2723                         reserved_bytes()/K);
2724   }
2725 
2726   gclog_or_tty->print("]");
2727 }
2728 
2729 // This is printed when PrintGCDetails
2730 void MetaspaceAux::print_on(outputStream* out) {
2731   Metaspace::MetadataType nct = Metaspace::NonClassType;
2732 
2733   out->print_cr(" Metaspace       "
2734                 "used "      SIZE_FORMAT "K, "
2735                 "capacity "  SIZE_FORMAT "K, "
2736                 "committed " SIZE_FORMAT "K, "
2737                 "reserved "  SIZE_FORMAT "K",
2738                 allocated_used_bytes()/K,
2739                 allocated_capacity_bytes()/K,
2740                 committed_bytes()/K,
2741                 reserved_bytes()/K);
2742 
2743   if (Metaspace::using_class_space()) {
2744     Metaspace::MetadataType ct = Metaspace::ClassType;
2745     out->print_cr("  class space    "
2746                   "used "      SIZE_FORMAT "K, "
2747                   "capacity "  SIZE_FORMAT "K, "
2748                   "committed " SIZE_FORMAT "K, "
2749                   "reserved "  SIZE_FORMAT "K",
2750                   allocated_used_bytes(ct)/K,
2751                   allocated_capacity_bytes(ct)/K,
2752                   committed_bytes(ct)/K,
2753                   reserved_bytes(ct)/K);
2754   }
2755 }
2756 
2757 // Print information for class space and data space separately.
2758 // This is almost the same as above.
2759 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2760   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2761   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2762   size_t used_bytes = used_bytes_slow(mdtype);
2763   size_t free_bytes = free_bytes_slow(mdtype);
2764   size_t used_and_free = used_bytes + free_bytes +
2765                            free_chunks_capacity_bytes;
2766   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2767              "K + unused in chunks " SIZE_FORMAT "K  + "
2768              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2769              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2770              used_bytes / K,
2771              free_bytes / K,
2772              free_chunks_capacity_bytes / K,
2773              used_and_free / K,
2774              capacity_bytes / K);
2775   // Accounting can only be correct if we got the values during a safepoint
2776   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2777 }
2778 
2779 // Print total fragmentation for class metaspaces
2780 void MetaspaceAux::print_class_waste(outputStream* out) {
2781   assert(Metaspace::using_class_space(), "class metaspace not used");
2782   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2783   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2784   ClassLoaderDataGraphMetaspaceIterator iter;
2785   while (iter.repeat()) {
2786     Metaspace* msp = iter.get_next();
2787     if (msp != NULL) {
2788       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2789       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2790       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2791       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2792       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2793       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2794       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2795     }
2796   }
2797   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2798                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2799                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2800                 "large count " SIZE_FORMAT,
2801                 cls_specialized_count, cls_specialized_waste,
2802                 cls_small_count, cls_small_waste,
2803                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2804 }
2805 
2806 // Print total fragmentation for data and class metaspaces separately
2807 void MetaspaceAux::print_waste(outputStream* out) {
2808   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2809   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2810 
2811   ClassLoaderDataGraphMetaspaceIterator iter;
2812   while (iter.repeat()) {
2813     Metaspace* msp = iter.get_next();
2814     if (msp != NULL) {
2815       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2816       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2817       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2818       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2819       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2820       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2821       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2822     }
2823   }
2824   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2825   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2826                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2827                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2828                         "large count " SIZE_FORMAT,
2829              specialized_count, specialized_waste, small_count,
2830              small_waste, medium_count, medium_waste, humongous_count);
2831   if (Metaspace::using_class_space()) {
2832     print_class_waste(out);
2833   }
2834 }
2835 
2836 // Dump global metaspace things from the end of ClassLoaderDataGraph
2837 void MetaspaceAux::dump(outputStream* out) {
2838   out->print_cr("All Metaspace:");
2839   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2840   out->print("class space: "); print_on(out, Metaspace::ClassType);
2841   print_waste(out);
2842 }
2843 
2844 void MetaspaceAux::verify_free_chunks() {
2845   Metaspace::chunk_manager_metadata()->verify();
2846   if (Metaspace::using_class_space()) {
2847     Metaspace::chunk_manager_class()->verify();
2848   }
2849 }
2850 
2851 void MetaspaceAux::verify_capacity() {
2852 #ifdef ASSERT
2853   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2854   // For purposes of the running sum of capacity, verify against capacity
2855   size_t capacity_in_use_bytes = capacity_bytes_slow();
2856   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2857     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2858             " capacity_bytes_slow()" SIZE_FORMAT,
2859             running_sum_capacity_bytes, capacity_in_use_bytes));
2860   for (Metaspace::MetadataType i = Metaspace::ClassType;
2861        i < Metaspace:: MetadataTypeCount;
2862        i = (Metaspace::MetadataType)(i + 1)) {
2863     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2864     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2865       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2866               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2867               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2868   }
2869 #endif
2870 }
2871 
2872 void MetaspaceAux::verify_used() {
2873 #ifdef ASSERT
2874   size_t running_sum_used_bytes = allocated_used_bytes();
2875   // For purposes of the running sum of used, verify against used
2876   size_t used_in_use_bytes = used_bytes_slow();
2877   assert(allocated_used_bytes() == used_in_use_bytes,
2878     err_msg("allocated_used_bytes() " SIZE_FORMAT
2879             " used_bytes_slow()" SIZE_FORMAT,
2880             allocated_used_bytes(), used_in_use_bytes));
2881   for (Metaspace::MetadataType i = Metaspace::ClassType;
2882        i < Metaspace:: MetadataTypeCount;
2883        i = (Metaspace::MetadataType)(i + 1)) {
2884     size_t used_in_use_bytes = used_bytes_slow(i);
2885     assert(allocated_used_bytes(i) == used_in_use_bytes,
2886       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2887               " used_bytes_slow(%u)" SIZE_FORMAT,
2888               i, allocated_used_bytes(i), i, used_in_use_bytes));
2889   }
2890 #endif
2891 }
2892 
2893 void MetaspaceAux::verify_metrics() {
2894   verify_capacity();
2895   verify_used();
2896 }
2897 
2898 
2899 // Metaspace methods
2900 
2901 size_t Metaspace::_first_chunk_word_size = 0;
2902 size_t Metaspace::_first_class_chunk_word_size = 0;
2903 
2904 size_t Metaspace::_commit_alignment = 0;
2905 size_t Metaspace::_reserve_alignment = 0;
2906 
2907 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2908   initialize(lock, type);
2909 }
2910 
2911 Metaspace::~Metaspace() {
2912   delete _vsm;
2913   if (using_class_space()) {
2914     delete _class_vsm;
2915   }
2916 }
2917 
2918 VirtualSpaceList* Metaspace::_space_list = NULL;
2919 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2920 
2921 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2922 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2923 
2924 #define VIRTUALSPACEMULTIPLIER 2
2925 
2926 #ifdef _LP64
2927 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2928   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2929   // narrow_klass_base is the lower of the metaspace base and the cds base
2930   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2931   // between the lower base and higher address.
2932   address lower_base;
2933   address higher_address;
2934   if (UseSharedSpaces) {
2935     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2936                           (address)(metaspace_base + class_metaspace_size()));
2937     lower_base = MIN2(metaspace_base, cds_base);
2938   } else {
2939     higher_address = metaspace_base + class_metaspace_size();
2940     lower_base = metaspace_base;
2941   }
2942   Universe::set_narrow_klass_base(lower_base);
2943   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
2944     Universe::set_narrow_klass_shift(0);
2945   } else {
2946     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2947     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2948   }
2949 }
2950 
2951 // Return TRUE if the specified metaspace_base and cds_base are close enough
2952 // to work with compressed klass pointers.
2953 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2954   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2955   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2956   address lower_base = MIN2((address)metaspace_base, cds_base);
2957   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2958                                 (address)(metaspace_base + class_metaspace_size()));
2959   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2960 }
2961 
2962 // Try to allocate the metaspace at the requested addr.
2963 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2964   assert(using_class_space(), "called improperly");
2965   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2966   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2967          "Metaspace size is too big");
2968   assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
2969   assert_is_ptr_aligned(cds_base,                _reserve_alignment);
2970   assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
2971 
2972   // Don't use large pages for the class space.
2973   bool large_pages = false;
2974 
2975   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2976                                              _reserve_alignment,
2977                                              large_pages, 
2978                                              requested_addr, 0);
2979   if (!metaspace_rs.is_reserved()) {
2980     if (UseSharedSpaces) {
2981       size_t increment = align_size_up(1*G, _reserve_alignment);
2982 
2983       // Keep trying to allocate the metaspace, increasing the requested_addr
2984       // by 1GB each time, until we reach an address that will no longer allow
2985       // use of CDS with compressed klass pointers.
2986       char *addr = requested_addr;
2987       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2988              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2989         addr = addr + increment;
2990         metaspace_rs = ReservedSpace(class_metaspace_size(),
2991                                      _reserve_alignment, large_pages, addr, 0);
2992       }
2993     }
2994 
2995     // If no successful allocation then try to allocate the space anywhere.  If
2996     // that fails then OOM doom.  At this point we cannot try allocating the
2997     // metaspace as if UseCompressedClassPointers is off because too much
2998     // initialization has happened that depends on UseCompressedClassPointers.
2999     // So, UseCompressedClassPointers cannot be turned off at this point.
3000     if (!metaspace_rs.is_reserved()) {
3001       metaspace_rs = ReservedSpace(class_metaspace_size(),
3002                                    _reserve_alignment, large_pages);
3003       if (!metaspace_rs.is_reserved()) {
3004         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3005                                               class_metaspace_size()));
3006       }
3007     }
3008   }
3009 
3010   // If we got here then the metaspace got allocated.
3011   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3012 
3013   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3014   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3015     FileMapInfo::stop_sharing_and_unmap(
3016         "Could not allocate metaspace at a compatible address");
3017   }
3018 
3019   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3020                                   UseSharedSpaces ? (address)cds_base : 0);
3021 
3022   initialize_class_space(metaspace_rs);
3023 
3024   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3025     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3026                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3027     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3028                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
3029   }
3030 }
3031 
3032 // For UseCompressedClassPointers the class space is reserved above the top of
3033 // the Java heap.  The argument passed in is at the base of the compressed space.
3034 void Metaspace::initialize_class_space(ReservedSpace rs) {
3035   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3036   assert(rs.size() >= CompressedClassSpaceSize,
3037          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3038   assert(using_class_space(), "Must be using class space");
3039   _class_space_list = new VirtualSpaceList(rs);
3040   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3041 
3042   if (!_class_space_list->initialization_succeeded()) {
3043     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3044   }
3045 }
3046 
3047 #endif
3048 
3049 // Align down. If the aligning result in 0, return 'alignment'.
3050 static size_t restricted_align_down(size_t size, size_t alignment) {
3051   return MAX2(alignment, align_size_down_(size, alignment));
3052 }
3053 
3054 void Metaspace::ergo_initialize() {
3055   if (DumpSharedSpaces) {
3056     // Using large pages when dumping the shared archive is currently not implemented.
3057     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3058   }
3059 
3060   size_t page_size = os::vm_page_size();
3061   if (UseLargePages && UseLargePagesInMetaspace) {
3062     page_size = os::large_page_size();
3063   }
3064 
3065   _commit_alignment  = page_size;
3066   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3067 
3068   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3069   // override if MaxMetaspaceSize was set on the command line or not.
3070   // This information is needed later to conform to the specification of the
3071   // java.lang.management.MemoryUsage API.
3072   //
3073   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3074   // globals.hpp to the aligned value, but this is not possible, since the
3075   // alignment depends on other flags being parsed.
3076   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
3077 
3078   if (MetaspaceSize > MaxMetaspaceSize) {
3079     MetaspaceSize = MaxMetaspaceSize;
3080   }
3081 
3082   MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
3083 
3084   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3085 
3086   if (MetaspaceSize < 256*K) {
3087     vm_exit_during_initialization("Too small initial Metaspace size");
3088   }
3089 
3090   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
3091   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
3092 
3093   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
3094   set_class_metaspace_size(CompressedClassSpaceSize);
3095 }
3096 
3097 void Metaspace::global_initialize() {
3098   // Initialize the alignment for shared spaces.
3099   int max_alignment = os::vm_page_size();
3100   size_t cds_total = 0;
3101 
3102   MetaspaceShared::set_max_alignment(max_alignment);
3103 
3104   if (DumpSharedSpaces) {
3105     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3106     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3107     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3108     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3109 
3110     // Initialize with the sum of the shared space sizes.  The read-only
3111     // and read write metaspace chunks will be allocated out of this and the
3112     // remainder is the misc code and data chunks.
3113     cds_total = FileMapInfo::shared_spaces_size();
3114     cds_total = align_size_up(cds_total, _reserve_alignment);
3115     _space_list = new VirtualSpaceList(cds_total/wordSize);
3116     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3117 
3118     if (!_space_list->initialization_succeeded()) {
3119       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3120     }
3121 
3122 #ifdef _LP64
3123     if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
3124       vm_exit_during_initialization("Unable to dump shared archive.",
3125           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3126                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3127                   "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
3128                   cds_total + class_metaspace_size(), (size_t)max_juint));
3129     }
3130 
3131     // Set the compressed klass pointer base so that decoding of these pointers works
3132     // properly when creating the shared archive.
3133     assert(UseCompressedOops && UseCompressedClassPointers,
3134       "UseCompressedOops and UseCompressedClassPointers must be set");
3135     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3136     if (TraceMetavirtualspaceAllocation && Verbose) {
3137       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3138                              _space_list->current_virtual_space()->bottom());
3139     }
3140 
3141     Universe::set_narrow_klass_shift(0);
3142 #endif
3143 
3144   } else {
3145     // If using shared space, open the file that contains the shared space
3146     // and map in the memory before initializing the rest of metaspace (so
3147     // the addresses don't conflict)
3148     address cds_address = NULL;
3149     if (UseSharedSpaces) {
3150       FileMapInfo* mapinfo = new FileMapInfo();
3151       memset(mapinfo, 0, sizeof(FileMapInfo));
3152 
3153       // Open the shared archive file, read and validate the header. If
3154       // initialization fails, shared spaces [UseSharedSpaces] are
3155       // disabled and the file is closed.
3156       // Map in spaces now also
3157       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3158         FileMapInfo::set_current_info(mapinfo);
3159         cds_total = FileMapInfo::shared_spaces_size();
3160         cds_address = (address)mapinfo->region_base(0);
3161       } else {
3162         assert(!mapinfo->is_open() && !UseSharedSpaces,
3163                "archive file not closed or shared spaces not disabled.");
3164       }
3165     }
3166 
3167 #ifdef _LP64
3168     // If UseCompressedClassPointers is set then allocate the metaspace area
3169     // above the heap and above the CDS area (if it exists).
3170     if (using_class_space()) {
3171       if (UseSharedSpaces) {
3172         char* cds_end = (char*)(cds_address + cds_total);
3173         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3174         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3175       } else {
3176         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
3177       }
3178     }
3179 #endif
3180 
3181     // Initialize these before initializing the VirtualSpaceList
3182     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3183     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3184     // Make the first class chunk bigger than a medium chunk so it's not put
3185     // on the medium chunk list.   The next chunk will be small and progress
3186     // from there.  This size calculated by -version.
3187     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3188                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3189     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3190     // Arbitrarily set the initial virtual space to a multiple
3191     // of the boot class loader size.
3192     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3193     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3194 
3195     // Initialize the list of virtual spaces.
3196     _space_list = new VirtualSpaceList(word_size);
3197     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3198 
3199     if (!_space_list->initialization_succeeded()) {
3200       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3201     }
3202   }
3203 
3204   MetaspaceGC::initialize();
3205 }
3206 
3207 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3208                                                size_t chunk_word_size,
3209                                                size_t chunk_bunch) {
3210   // Get a chunk from the chunk freelist
3211   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3212   if (chunk != NULL) {
3213     return chunk;
3214   }
3215 
3216   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3217 }
3218 
3219 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3220 
3221   assert(space_list() != NULL,
3222     "Metadata VirtualSpaceList has not been initialized");
3223   assert(chunk_manager_metadata() != NULL,
3224     "Metadata ChunkManager has not been initialized");
3225 
3226   _vsm = new SpaceManager(NonClassType, lock);
3227   if (_vsm == NULL) {
3228     return;
3229   }
3230   size_t word_size;
3231   size_t class_word_size;
3232   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3233 
3234   if (using_class_space()) {
3235   assert(class_space_list() != NULL,
3236     "Class VirtualSpaceList has not been initialized");
3237   assert(chunk_manager_class() != NULL,
3238     "Class ChunkManager has not been initialized");
3239 
3240     // Allocate SpaceManager for classes.
3241     _class_vsm = new SpaceManager(ClassType, lock);
3242     if (_class_vsm == NULL) {
3243       return;
3244     }
3245   }
3246 
3247   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3248 
3249   // Allocate chunk for metadata objects
3250   Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3251                                                   word_size,
3252                                                   vsm()->medium_chunk_bunch());
3253   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3254   if (new_chunk != NULL) {
3255     // Add to this manager's list of chunks in use and current_chunk().
3256     vsm()->add_chunk(new_chunk, true);
3257   }
3258 
3259   // Allocate chunk for class metadata objects
3260   if (using_class_space()) {
3261     Metachunk* class_chunk = get_initialization_chunk(ClassType,
3262                                                       class_word_size,
3263                                                       class_vsm()->medium_chunk_bunch());
3264     if (class_chunk != NULL) {
3265       class_vsm()->add_chunk(class_chunk, true);
3266     }
3267   }
3268 
3269   _alloc_record_head = NULL;
3270   _alloc_record_tail = NULL;
3271 }
3272 
3273 size_t Metaspace::align_word_size_up(size_t word_size) {
3274   size_t byte_size = word_size * wordSize;
3275   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3276 }
3277 
3278 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3279   // DumpSharedSpaces doesn't use class metadata area (yet)
3280   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3281   if (is_class_space_allocation(mdtype)) {
3282     return  class_vsm()->allocate(word_size);
3283   } else {
3284     return  vsm()->allocate(word_size);
3285   }
3286 }
3287 
3288 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3289   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3290   assert(delta_bytes > 0, "Must be");
3291 
3292   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3293   size_t before_inc = after_inc - delta_bytes;
3294 
3295   if (PrintGCDetails && Verbose) {
3296     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3297         " to " SIZE_FORMAT, before_inc, after_inc);
3298   }
3299 
3300   return allocate(word_size, mdtype);
3301 }
3302 
3303 // Space allocated in the Metaspace.  This may
3304 // be across several metadata virtual spaces.
3305 char* Metaspace::bottom() const {
3306   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3307   return (char*)vsm()->current_chunk()->bottom();
3308 }
3309 
3310 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3311   if (mdtype == ClassType) {
3312     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3313   } else {
3314     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3315   }
3316 }
3317 
3318 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3319   if (mdtype == ClassType) {
3320     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3321   } else {
3322     return vsm()->sum_free_in_chunks_in_use();
3323   }
3324 }
3325 
3326 // Space capacity in the Metaspace.  It includes
3327 // space in the list of chunks from which allocations
3328 // have been made. Don't include space in the global freelist and
3329 // in the space available in the dictionary which
3330 // is already counted in some chunk.
3331 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3332   if (mdtype == ClassType) {
3333     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3334   } else {
3335     return vsm()->sum_capacity_in_chunks_in_use();
3336   }
3337 }
3338 
3339 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3340   return used_words_slow(mdtype) * BytesPerWord;
3341 }
3342 
3343 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3344   return capacity_words_slow(mdtype) * BytesPerWord;
3345 }
3346 
3347 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3348   if (SafepointSynchronize::is_at_safepoint()) {
3349     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3350     // Don't take Heap_lock
3351     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3352     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3353       // Dark matter.  Too small for dictionary.
3354 #ifdef ASSERT
3355       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3356 #endif
3357       return;
3358     }
3359     if (is_class && using_class_space()) {
3360       class_vsm()->deallocate(ptr, word_size);
3361     } else {
3362       vsm()->deallocate(ptr, word_size);
3363     }
3364   } else {
3365     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3366 
3367     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3368       // Dark matter.  Too small for dictionary.
3369 #ifdef ASSERT
3370       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3371 #endif
3372       return;
3373     }
3374     if (is_class && using_class_space()) {
3375       class_vsm()->deallocate(ptr, word_size);
3376     } else {
3377       vsm()->deallocate(ptr, word_size);
3378     }
3379   }
3380 }
3381 
3382 
3383 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3384                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3385   if (HAS_PENDING_EXCEPTION) {
3386     assert(false, "Should not allocate with exception pending");
3387     return NULL;  // caller does a CHECK_NULL too
3388   }
3389 
3390   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3391         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3392 
3393   // Allocate in metaspaces without taking out a lock, because it deadlocks
3394   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3395   // to revisit this for application class data sharing.
3396   if (DumpSharedSpaces) {
3397     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3398     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3399     MetaWord* result = space->allocate(word_size, NonClassType);
3400     if (result == NULL) {
3401       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3402     } else {
3403       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3404     }
3405     return Metablock::initialize(result, word_size);
3406   }
3407 
3408   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3409 
3410   // Try to allocate metadata.
3411   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3412 
3413   if (result == NULL) {
3414     // Allocation failed.
3415     if (is_init_completed()) {
3416       // Only start a GC if the bootstrapping has completed.
3417 
3418       // Try to clean out some memory and retry.
3419       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3420           loader_data, word_size, mdtype);
3421     }
3422   }
3423 
3424   if (result == NULL) {
3425     report_metadata_oome(loader_data, word_size, mdtype, THREAD);
3426     // Will not reach here.
3427     return NULL;
3428   }
3429 
3430   return Metablock::initialize(result, word_size);
3431 }
3432 
3433 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3434   // If result is still null, we are out of memory.
3435   if (Verbose && TraceMetadataChunkAllocation) {
3436     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3437         SIZE_FORMAT, word_size);
3438     if (loader_data->metaspace_or_null() != NULL) {
3439       loader_data->dump(gclog_or_tty);
3440     }
3441     MetaspaceAux::dump(gclog_or_tty);
3442   }
3443 
3444   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3445   const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
3446                                                                  "Metadata space";
3447   report_java_out_of_memory(space_string);
3448 
3449   if (JvmtiExport::should_post_resource_exhausted()) {
3450     JvmtiExport::post_resource_exhausted(
3451         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3452         space_string);
3453   }
3454 
3455   if (!is_init_completed()) {
3456     vm_exit_during_initialization("OutOfMemoryError", space_string);
3457   }
3458 
3459   if (is_class_space_allocation(mdtype)) {
3460     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3461   } else {
3462     THROW_OOP(Universe::out_of_memory_error_metaspace());
3463   }
3464 }
3465 
3466 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3467   assert(DumpSharedSpaces, "sanity");
3468 
3469   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3470   if (_alloc_record_head == NULL) {
3471     _alloc_record_head = _alloc_record_tail = rec;
3472   } else {
3473     _alloc_record_tail->_next = rec;
3474     _alloc_record_tail = rec;
3475   }
3476 }
3477 
3478 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3479   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3480 
3481   address last_addr = (address)bottom();
3482 
3483   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3484     address ptr = rec->_ptr;
3485     if (last_addr < ptr) {
3486       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3487     }
3488     closure->doit(ptr, rec->_type, rec->_byte_size);
3489     last_addr = ptr + rec->_byte_size;
3490   }
3491 
3492   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3493   if (last_addr < top) {
3494     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3495   }
3496 }
3497 
3498 void Metaspace::purge(MetadataType mdtype) {
3499   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3500 }
3501 
3502 void Metaspace::purge() {
3503   MutexLockerEx cl(SpaceManager::expand_lock(),
3504                    Mutex::_no_safepoint_check_flag);
3505   purge(NonClassType);
3506   if (using_class_space()) {
3507     purge(ClassType);
3508   }
3509 }
3510 
3511 void Metaspace::print_on(outputStream* out) const {
3512   // Print both class virtual space counts and metaspace.
3513   if (Verbose) {
3514     vsm()->print_on(out);
3515     if (using_class_space()) {
3516       class_vsm()->print_on(out);
3517     }
3518   }
3519 }
3520 
3521 bool Metaspace::contains(const void * ptr) {
3522   if (MetaspaceShared::is_in_shared_space(ptr)) {
3523     return true;
3524   }
3525   // This is checked while unlocked.  As long as the virtualspaces are added
3526   // at the end, the pointer will be in one of them.  The virtual spaces
3527   // aren't deleted presently.  When they are, some sort of locking might
3528   // be needed.  Note, locking this can cause inversion problems with the
3529   // caller in MetaspaceObj::is_metadata() function.
3530   return space_list()->contains(ptr) ||
3531          (using_class_space() && class_space_list()->contains(ptr));
3532 }
3533 
3534 void Metaspace::verify() {
3535   vsm()->verify();
3536   if (using_class_space()) {
3537     class_vsm()->verify();
3538   }
3539 }
3540 
3541 void Metaspace::dump(outputStream* const out) const {
3542   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3543   vsm()->dump(out);
3544   if (using_class_space()) {
3545     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3546     class_vsm()->dump(out);
3547   }
3548 }
3549 
3550 /////////////// Unit tests ///////////////
3551 
3552 #ifndef PRODUCT
3553 
3554 class TestMetaspaceAuxTest : AllStatic {
3555  public:
3556   static void test_reserved() {
3557     size_t reserved = MetaspaceAux::reserved_bytes();
3558 
3559     assert(reserved > 0, "assert");
3560 
3561     size_t committed  = MetaspaceAux::committed_bytes();
3562     assert(committed <= reserved, "assert");
3563 
3564     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3565     assert(reserved_metadata > 0, "assert");
3566     assert(reserved_metadata <= reserved, "assert");
3567 
3568     if (UseCompressedClassPointers) {
3569       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3570       assert(reserved_class > 0, "assert");
3571       assert(reserved_class < reserved, "assert");
3572     }
3573   }
3574 
3575   static void test_committed() {
3576     size_t committed = MetaspaceAux::committed_bytes();
3577 
3578     assert(committed > 0, "assert");
3579 
3580     size_t reserved  = MetaspaceAux::reserved_bytes();
3581     assert(committed <= reserved, "assert");
3582 
3583     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3584     assert(committed_metadata > 0, "assert");
3585     assert(committed_metadata <= committed, "assert");
3586 
3587     if (UseCompressedClassPointers) {
3588       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3589       assert(committed_class > 0, "assert");
3590       assert(committed_class < committed, "assert");
3591     }
3592   }
3593 
3594   static void test_virtual_space_list_large_chunk() {
3595     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3596     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3597     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3598     // vm_allocation_granularity aligned on Windows.
3599     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3600     large_size += (os::vm_page_size()/BytesPerWord);
3601     vs_list->get_new_chunk(large_size, large_size, 0);
3602   }
3603 
3604   static void test() {
3605     test_reserved();
3606     test_committed();
3607     test_virtual_space_list_large_chunk();
3608   }
3609 };
3610 
3611 void TestMetaspaceAux_test() {
3612   TestMetaspaceAuxTest::test();
3613 }
3614 
3615 #endif