1 /*
   2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"
  38 #include "runtime/atomic.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutex.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "services/memTracker.hpp"
  45 #include "services/memoryService.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  51 
  52 // Set this constant to enable slow integrity checking of the free chunk lists
  53 const bool metaspace_slow_verify = false;
  54 
  55 // Parameters for stress mode testing
  56 const uint metadata_deallocate_a_lot_block = 10;
  57 const uint metadata_deallocate_a_lock_chunk = 3;
  58 size_t const allocation_from_dictionary_limit = 4 * K;
  59 
  60 MetaWord* last_allocated = 0;
  61 
  62 size_t Metaspace::_class_metaspace_size;
  63 
  64 // Used in declarations in SpaceManager and ChunkManager
  65 enum ChunkIndex {
  66   ZeroIndex = 0,
  67   SpecializedIndex = ZeroIndex,
  68   SmallIndex = SpecializedIndex + 1,
  69   MediumIndex = SmallIndex + 1,
  70   HumongousIndex = MediumIndex + 1,
  71   NumberOfFreeLists = 3,
  72   NumberOfInUseLists = 4
  73 };
  74 
  75 enum ChunkSizes {    // in words.
  76   ClassSpecializedChunk = 128,
  77   SpecializedChunk = 128,
  78   ClassSmallChunk = 256,
  79   SmallChunk = 512,
  80   ClassMediumChunk = 4 * K,
  81   MediumChunk = 8 * K,
  82   HumongousChunkGranularity = 8
  83 };
  84 
  85 static ChunkIndex next_chunk_index(ChunkIndex i) {
  86   assert(i < NumberOfInUseLists, "Out of bound");
  87   return (ChunkIndex) (i+1);
  88 }
  89 
  90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  91 uint MetaspaceGC::_shrink_factor = 0;
  92 bool MetaspaceGC::_should_concurrent_collect = false;
  93 
  94 typedef class FreeList<Metachunk> ChunkList;
  95 
  96 // Manages the global free lists of chunks.
  97 class ChunkManager : public CHeapObj<mtInternal> {
  98 
  99   // Free list of chunks of different sizes.
 100   //   SpecializedChunk
 101   //   SmallChunk
 102   //   MediumChunk
 103   //   HumongousChunk
 104   ChunkList _free_chunks[NumberOfFreeLists];
 105 
 106   //   HumongousChunk
 107   ChunkTreeDictionary _humongous_dictionary;
 108 
 109   // ChunkManager in all lists of this type
 110   size_t _free_chunks_total;
 111   size_t _free_chunks_count;
 112 
 113   void dec_free_chunks_total(size_t v) {
 114     assert(_free_chunks_count > 0 &&
 115              _free_chunks_total > 0,
 116              "About to go negative");
 117     Atomic::add_ptr(-1, &_free_chunks_count);
 118     jlong minus_v = (jlong) - (jlong) v;
 119     Atomic::add_ptr(minus_v, &_free_chunks_total);
 120   }
 121 
 122   // Debug support
 123 
 124   size_t sum_free_chunks();
 125   size_t sum_free_chunks_count();
 126 
 127   void locked_verify_free_chunks_total();
 128   void slow_locked_verify_free_chunks_total() {
 129     if (metaspace_slow_verify) {
 130       locked_verify_free_chunks_total();
 131     }
 132   }
 133   void locked_verify_free_chunks_count();
 134   void slow_locked_verify_free_chunks_count() {
 135     if (metaspace_slow_verify) {
 136       locked_verify_free_chunks_count();
 137     }
 138   }
 139   void verify_free_chunks_count();
 140 
 141  public:
 142 
 143   ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
 144       : _free_chunks_total(0), _free_chunks_count(0) {
 145     _free_chunks[SpecializedIndex].set_size(specialized_size);
 146     _free_chunks[SmallIndex].set_size(small_size);
 147     _free_chunks[MediumIndex].set_size(medium_size);
 148   }
 149 
 150   // add or delete (return) a chunk to the global freelist.
 151   Metachunk* chunk_freelist_allocate(size_t word_size);
 152   void chunk_freelist_deallocate(Metachunk* chunk);
 153 
 154   // Map a size to a list index assuming that there are lists
 155   // for special, small, medium, and humongous chunks.
 156   static ChunkIndex list_index(size_t size);
 157 
 158   // Remove the chunk from its freelist.  It is
 159   // expected to be on one of the _free_chunks[] lists.
 160   void remove_chunk(Metachunk* chunk);
 161 
 162   // Add the simple linked list of chunks to the freelist of chunks
 163   // of type index.
 164   void return_chunks(ChunkIndex index, Metachunk* chunks);
 165 
 166   // Total of the space in the free chunks list
 167   size_t free_chunks_total_words();
 168   size_t free_chunks_total_bytes();
 169 
 170   // Number of chunks in the free chunks list
 171   size_t free_chunks_count();
 172 
 173   void inc_free_chunks_total(size_t v, size_t count = 1) {
 174     Atomic::add_ptr(count, &_free_chunks_count);
 175     Atomic::add_ptr(v, &_free_chunks_total);
 176   }
 177   ChunkTreeDictionary* humongous_dictionary() {
 178     return &_humongous_dictionary;
 179   }
 180 
 181   ChunkList* free_chunks(ChunkIndex index);
 182 
 183   // Returns the list for the given chunk word size.
 184   ChunkList* find_free_chunks_list(size_t word_size);
 185 
 186   // Add and remove from a list by size.  Selects
 187   // list based on size of chunk.
 188   void free_chunks_put(Metachunk* chuck);
 189   Metachunk* free_chunks_get(size_t chunk_word_size);
 190 
 191   // Debug support
 192   void verify();
 193   void slow_verify() {
 194     if (metaspace_slow_verify) {
 195       verify();
 196     }
 197   }
 198   void locked_verify();
 199   void slow_locked_verify() {
 200     if (metaspace_slow_verify) {
 201       locked_verify();
 202     }
 203   }
 204   void verify_free_chunks_total();
 205 
 206   void locked_print_free_chunks(outputStream* st);
 207   void locked_print_sum_free_chunks(outputStream* st);
 208 
 209   void print_on(outputStream* st) const;
 210 };
 211 
 212 // Used to manage the free list of Metablocks (a block corresponds
 213 // to the allocation of a quantum of metadata).
 214 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 215   BlockTreeDictionary* _dictionary;
 216 
 217   // Only allocate and split from freelist if the size of the allocation
 218   // is at least 1/4th the size of the available block.
 219   const static int WasteMultiplier = 4;
 220 
 221   // Accessors
 222   BlockTreeDictionary* dictionary() const { return _dictionary; }
 223 
 224  public:
 225   BlockFreelist();
 226   ~BlockFreelist();
 227 
 228   // Get and return a block to the free list
 229   MetaWord* get_block(size_t word_size);
 230   void return_block(MetaWord* p, size_t word_size);
 231 
 232   size_t total_size() {
 233   if (dictionary() == NULL) {
 234     return 0;
 235   } else {
 236     return dictionary()->total_size();
 237   }
 238 }
 239 
 240   void print_on(outputStream* st) const;
 241 };
 242 
 243 // A VirtualSpaceList node.
 244 class VirtualSpaceNode : public CHeapObj<mtClass> {
 245   friend class VirtualSpaceList;
 246 
 247   // Link to next VirtualSpaceNode
 248   VirtualSpaceNode* _next;
 249 
 250   // total in the VirtualSpace
 251   MemRegion _reserved;
 252   ReservedSpace _rs;
 253   VirtualSpace _virtual_space;
 254   MetaWord* _top;
 255   // count of chunks contained in this VirtualSpace
 256   uintx _container_count;
 257 
 258   // Convenience functions to access the _virtual_space
 259   char* low()  const { return virtual_space()->low(); }
 260   char* high() const { return virtual_space()->high(); }
 261 
 262   // The first Metachunk will be allocated at the bottom of the
 263   // VirtualSpace
 264   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 265 
 266  public:
 267 
 268   VirtualSpaceNode(size_t byte_size);
 269   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 270   ~VirtualSpaceNode();
 271 
 272   // Convenience functions for logical bottom and end
 273   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 274   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 275 
 276   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 277   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 278 
 279   bool is_pre_committed() const { return _virtual_space.special(); }
 280 
 281   // address of next available space in _virtual_space;
 282   // Accessors
 283   VirtualSpaceNode* next() { return _next; }
 284   void set_next(VirtualSpaceNode* v) { _next = v; }
 285 
 286   void set_reserved(MemRegion const v) { _reserved = v; }
 287   void set_top(MetaWord* v) { _top = v; }
 288 
 289   // Accessors
 290   MemRegion* reserved() { return &_reserved; }
 291   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 292 
 293   // Returns true if "word_size" is available in the VirtualSpace
 294   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 295 
 296   MetaWord* top() const { return _top; }
 297   void inc_top(size_t word_size) { _top += word_size; }
 298 
 299   uintx container_count() { return _container_count; }
 300   void inc_container_count();
 301   void dec_container_count();
 302 #ifdef ASSERT
 303   uint container_count_slow();
 304   void verify_container_count();
 305 #endif
 306 
 307   // used and capacity in this single entry in the list
 308   size_t used_words_in_vs() const;
 309   size_t capacity_words_in_vs() const;
 310   size_t free_words_in_vs() const;
 311 
 312   bool initialize();
 313 
 314   // get space from the virtual space
 315   Metachunk* take_from_committed(size_t chunk_word_size);
 316 
 317   // Allocate a chunk from the virtual space and return it.
 318   Metachunk* get_chunk_vs(size_t chunk_word_size);
 319 
 320   // Expands/shrinks the committed space in a virtual space.  Delegates
 321   // to Virtualspace
 322   bool expand_by(size_t min_words, size_t preferred_words);
 323 
 324   // In preparation for deleting this node, remove all the chunks
 325   // in the node from any freelist.
 326   void purge(ChunkManager* chunk_manager);
 327 
 328 #ifdef ASSERT
 329   // Debug support
 330   void mangle();
 331 #endif
 332 
 333   void print_on(outputStream* st) const;
 334 };
 335 
 336 #define assert_is_ptr_aligned(ptr, alignment) \
 337   assert(is_ptr_aligned(ptr, alignment),      \
 338     err_msg(PTR_FORMAT " is not aligned to "  \
 339       SIZE_FORMAT, ptr, alignment))
 340 
 341 #define assert_is_size_aligned(size, alignment) \
 342   assert(is_size_aligned(size, alignment),      \
 343     err_msg(SIZE_FORMAT " is not aligned to "   \
 344        SIZE_FORMAT, size, alignment))
 345 
 346 
 347 // Decide if large pages should be committed when the memory is reserved.
 348 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 349   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 350     size_t words = bytes / BytesPerWord;
 351     bool is_class = false; // We never reserve large pages for the class space.
 352     if (MetaspaceGC::can_expand(words, is_class) &&
 353         MetaspaceGC::allowed_expansion() >= words) {
 354       return true;
 355     }
 356   }
 357 
 358   return false;
 359 }
 360 
 361   // byte_size is the size of the associated virtualspace.
 362 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 363   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 364 
 365   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 366   // configurable address, generally at the top of the Java heap so other
 367   // memory addresses don't conflict.
 368   if (DumpSharedSpaces) {
 369     bool large_pages = false; // No large pages when dumping the CDS archive.
 370     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 371 
 372     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 373     if (_rs.is_reserved()) {
 374       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 375     } else {
 376       // Get a mmap region anywhere if the SharedBaseAddress fails.
 377       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 378     }
 379     MetaspaceShared::set_shared_rs(&_rs);
 380   } else {
 381     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 382 
 383     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 384   }
 385 
 386   if (_rs.is_reserved()) {
 387     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 388     assert(_rs.size() != 0, "Catch if we get a 0 size");
 389     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 390     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 391 
 392     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 393   }
 394 }
 395 
 396 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 397   Metachunk* chunk = first_chunk();
 398   Metachunk* invalid_chunk = (Metachunk*) top();
 399   while (chunk < invalid_chunk ) {
 400     assert(chunk->is_marked_free(), "Should be marked free");
 401     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 402     chunk_manager->remove_chunk(chunk);
 403     assert(chunk->next() == NULL &&
 404                chunk->prev() == NULL,
 405            "Was not removed from its list");
 406     chunk = (Metachunk*) next;
 407   }
 408 }
 409 
 410 #ifdef ASSERT
 411 uint VirtualSpaceNode::container_count_slow() {
 412   uint count = 0;
 413   Metachunk* chunk = first_chunk();
 414   Metachunk* invalid_chunk = (Metachunk*) top();
 415   while (chunk < invalid_chunk ) {
 416     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 417     // Don't count the chunks on the free lists.  Those are
 418     // still part of the VirtualSpaceNode but not currently
 419     // counted.
 420     if (!chunk->is_marked_free()) {
 421       count++;
 422     }
 423     chunk = (Metachunk*) next;
 424   }
 425   return count;
 426 }
 427 #endif
 428 
 429 // List of VirtualSpaces for metadata allocation.
 430 class VirtualSpaceList : public CHeapObj<mtClass> {
 431   friend class VirtualSpaceNode;
 432 
 433   enum VirtualSpaceSizes {
 434     VirtualSpaceSize = 256 * K
 435   };
 436 
 437   // Head of the list
 438   VirtualSpaceNode* _virtual_space_list;
 439   // virtual space currently being used for allocations
 440   VirtualSpaceNode* _current_virtual_space;
 441 
 442   // Is this VirtualSpaceList used for the compressed class space
 443   bool _is_class;
 444 
 445   // Sum of reserved and committed memory in the virtual spaces
 446   size_t _reserved_words;
 447   size_t _committed_words;
 448 
 449   // Number of virtual spaces
 450   size_t _virtual_space_count;
 451 
 452   ~VirtualSpaceList();
 453 
 454   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 455 
 456   void set_virtual_space_list(VirtualSpaceNode* v) {
 457     _virtual_space_list = v;
 458   }
 459   void set_current_virtual_space(VirtualSpaceNode* v) {
 460     _current_virtual_space = v;
 461   }
 462 
 463   void link_vs(VirtualSpaceNode* new_entry);
 464 
 465   // Get another virtual space and add it to the list.  This
 466   // is typically prompted by a failed attempt to allocate a chunk
 467   // and is typically followed by the allocation of a chunk.
 468   bool create_new_virtual_space(size_t vs_word_size);
 469 
 470  public:
 471   VirtualSpaceList(size_t word_size);
 472   VirtualSpaceList(ReservedSpace rs);
 473 
 474   size_t free_bytes();
 475 
 476   Metachunk* get_new_chunk(size_t word_size,
 477                            size_t grow_chunks_by_words,
 478                            size_t medium_chunk_bunch);
 479 
 480   bool expand_node_by(VirtualSpaceNode* node,
 481                       size_t min_words,
 482                       size_t preferred_words);
 483 
 484   bool expand_by(size_t min_words,
 485                  size_t preferred_words);
 486 
 487   VirtualSpaceNode* current_virtual_space() {
 488     return _current_virtual_space;
 489   }
 490 
 491   bool is_class() const { return _is_class; }
 492 
 493   bool initialization_succeeded() { return _virtual_space_list != NULL; }
 494 
 495   size_t reserved_words()  { return _reserved_words; }
 496   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 497   size_t committed_words() { return _committed_words; }
 498   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 499 
 500   void inc_reserved_words(size_t v);
 501   void dec_reserved_words(size_t v);
 502   void inc_committed_words(size_t v);
 503   void dec_committed_words(size_t v);
 504   void inc_virtual_space_count();
 505   void dec_virtual_space_count();
 506 
 507   // Unlink empty VirtualSpaceNodes and free it.
 508   void purge(ChunkManager* chunk_manager);
 509 
 510   bool contains(const void *ptr);
 511 
 512   void print_on(outputStream* st) const;
 513 
 514   class VirtualSpaceListIterator : public StackObj {
 515     VirtualSpaceNode* _virtual_spaces;
 516    public:
 517     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 518       _virtual_spaces(virtual_spaces) {}
 519 
 520     bool repeat() {
 521       return _virtual_spaces != NULL;
 522     }
 523 
 524     VirtualSpaceNode* get_next() {
 525       VirtualSpaceNode* result = _virtual_spaces;
 526       if (_virtual_spaces != NULL) {
 527         _virtual_spaces = _virtual_spaces->next();
 528       }
 529       return result;
 530     }
 531   };
 532 };
 533 
 534 class Metadebug : AllStatic {
 535   // Debugging support for Metaspaces
 536   static int _deallocate_block_a_lot_count;
 537   static int _deallocate_chunk_a_lot_count;
 538   static int _allocation_fail_alot_count;
 539 
 540  public:
 541   static int deallocate_block_a_lot_count() {
 542     return _deallocate_block_a_lot_count;
 543   }
 544   static void set_deallocate_block_a_lot_count(int v) {
 545     _deallocate_block_a_lot_count = v;
 546   }
 547   static void inc_deallocate_block_a_lot_count() {
 548     _deallocate_block_a_lot_count++;
 549   }
 550   static int deallocate_chunk_a_lot_count() {
 551     return _deallocate_chunk_a_lot_count;
 552   }
 553   static void reset_deallocate_chunk_a_lot_count() {
 554     _deallocate_chunk_a_lot_count = 1;
 555   }
 556   static void inc_deallocate_chunk_a_lot_count() {
 557     _deallocate_chunk_a_lot_count++;
 558   }
 559 
 560   static void init_allocation_fail_alot_count();
 561 #ifdef ASSERT
 562   static bool test_metadata_failure();
 563 #endif
 564 
 565   static void deallocate_chunk_a_lot(SpaceManager* sm,
 566                                      size_t chunk_word_size);
 567   static void deallocate_block_a_lot(SpaceManager* sm,
 568                                      size_t chunk_word_size);
 569 
 570 };
 571 
 572 int Metadebug::_deallocate_block_a_lot_count = 0;
 573 int Metadebug::_deallocate_chunk_a_lot_count = 0;
 574 int Metadebug::_allocation_fail_alot_count = 0;
 575 
 576 //  SpaceManager - used by Metaspace to handle allocations
 577 class SpaceManager : public CHeapObj<mtClass> {
 578   friend class Metaspace;
 579   friend class Metadebug;
 580 
 581  private:
 582 
 583   // protects allocations and contains.
 584   Mutex* const _lock;
 585 
 586   // Type of metadata allocated.
 587   Metaspace::MetadataType _mdtype;
 588 
 589   // List of chunks in use by this SpaceManager.  Allocations
 590   // are done from the current chunk.  The list is used for deallocating
 591   // chunks when the SpaceManager is freed.
 592   Metachunk* _chunks_in_use[NumberOfInUseLists];
 593   Metachunk* _current_chunk;
 594 
 595   // Number of small chunks to allocate to a manager
 596   // If class space manager, small chunks are unlimited
 597   static uint const _small_chunk_limit;
 598 
 599   // Sum of all space in allocated chunks
 600   size_t _allocated_blocks_words;
 601 
 602   // Sum of all allocated chunks
 603   size_t _allocated_chunks_words;
 604   size_t _allocated_chunks_count;
 605 
 606   // Free lists of blocks are per SpaceManager since they
 607   // are assumed to be in chunks in use by the SpaceManager
 608   // and all chunks in use by a SpaceManager are freed when
 609   // the class loader using the SpaceManager is collected.
 610   BlockFreelist _block_freelists;
 611 
 612   // protects virtualspace and chunk expansions
 613   static const char*  _expand_lock_name;
 614   static const int    _expand_lock_rank;
 615   static Mutex* const _expand_lock;
 616 
 617  private:
 618   // Accessors
 619   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 620   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
 621 
 622   BlockFreelist* block_freelists() const {
 623     return (BlockFreelist*) &_block_freelists;
 624   }
 625 
 626   Metaspace::MetadataType mdtype() { return _mdtype; }
 627 
 628   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
 629   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 630 
 631   Metachunk* current_chunk() const { return _current_chunk; }
 632   void set_current_chunk(Metachunk* v) {
 633     _current_chunk = v;
 634   }
 635 
 636   Metachunk* find_current_chunk(size_t word_size);
 637 
 638   // Add chunk to the list of chunks in use
 639   void add_chunk(Metachunk* v, bool make_current);
 640   void retire_current_chunk();
 641 
 642   Mutex* lock() const { return _lock; }
 643 
 644   const char* chunk_size_name(ChunkIndex index) const;
 645 
 646  protected:
 647   void initialize();
 648 
 649  public:
 650   SpaceManager(Metaspace::MetadataType mdtype,
 651                Mutex* lock);
 652   ~SpaceManager();
 653 
 654   enum ChunkMultiples {
 655     MediumChunkMultiple = 4
 656   };
 657 
 658   bool is_class() { return _mdtype == Metaspace::ClassType; }
 659 
 660   // Accessors
 661   size_t specialized_chunk_size() { return SpecializedChunk; }
 662   size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
 663   size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
 664   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 665 
 666   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
 667   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
 668   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
 669   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
 670 
 671   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 672 
 673   static Mutex* expand_lock() { return _expand_lock; }
 674 
 675   // Increment the per Metaspace and global running sums for Metachunks
 676   // by the given size.  This is used when a Metachunk to added to
 677   // the in-use list.
 678   void inc_size_metrics(size_t words);
 679   // Increment the per Metaspace and global running sums Metablocks by the given
 680   // size.  This is used when a Metablock is allocated.
 681   void inc_used_metrics(size_t words);
 682   // Delete the portion of the running sums for this SpaceManager. That is,
 683   // the globals running sums for the Metachunks and Metablocks are
 684   // decremented for all the Metachunks in-use by this SpaceManager.
 685   void dec_total_from_size_metrics();
 686 
 687   // Set the sizes for the initial chunks.
 688   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 689                                size_t* chunk_word_size,
 690                                size_t* class_chunk_word_size);
 691 
 692   size_t sum_capacity_in_chunks_in_use() const;
 693   size_t sum_used_in_chunks_in_use() const;
 694   size_t sum_free_in_chunks_in_use() const;
 695   size_t sum_waste_in_chunks_in_use() const;
 696   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 697 
 698   size_t sum_count_in_chunks_in_use();
 699   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 700 
 701   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 702 
 703   // Block allocation and deallocation.
 704   // Allocates a block from the current chunk
 705   MetaWord* allocate(size_t word_size);
 706 
 707   // Helper for allocations
 708   MetaWord* allocate_work(size_t word_size);
 709 
 710   // Returns a block to the per manager freelist
 711   void deallocate(MetaWord* p, size_t word_size);
 712 
 713   // Based on the allocation size and a minimum chunk size,
 714   // returned chunk size (for expanding space for chunk allocation).
 715   size_t calc_chunk_size(size_t allocation_word_size);
 716 
 717   // Called when an allocation from the current chunk fails.
 718   // Gets a new chunk (may require getting a new virtual space),
 719   // and allocates from that chunk.
 720   MetaWord* grow_and_allocate(size_t word_size);
 721 
 722   // Notify memory usage to MemoryService.
 723   void track_metaspace_memory_usage();
 724 
 725   // debugging support.
 726 
 727   void dump(outputStream* const out) const;
 728   void print_on(outputStream* st) const;
 729   void locked_print_chunks_in_use_on(outputStream* st) const;
 730 
 731   void verify();
 732   void verify_chunk_size(Metachunk* chunk);
 733   NOT_PRODUCT(void mangle_freed_chunks();)
 734 #ifdef ASSERT
 735   void verify_allocated_blocks_words();
 736 #endif
 737 
 738   size_t get_raw_word_size(size_t word_size) {
 739     size_t byte_size = word_size * BytesPerWord;
 740 
 741     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 742     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 743 
 744     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 745     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 746 
 747     return raw_word_size;
 748   }
 749 };
 750 
 751 uint const SpaceManager::_small_chunk_limit = 4;
 752 
 753 const char* SpaceManager::_expand_lock_name =
 754   "SpaceManager chunk allocation lock";
 755 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 756 Mutex* const SpaceManager::_expand_lock =
 757   new Mutex(SpaceManager::_expand_lock_rank,
 758             SpaceManager::_expand_lock_name,
 759             Mutex::_allow_vm_block_flag);
 760 
 761 void VirtualSpaceNode::inc_container_count() {
 762   assert_lock_strong(SpaceManager::expand_lock());
 763   _container_count++;
 764   assert(_container_count == container_count_slow(),
 765          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 766                  " container_count_slow() " SIZE_FORMAT,
 767                  _container_count, container_count_slow()));
 768 }
 769 
 770 void VirtualSpaceNode::dec_container_count() {
 771   assert_lock_strong(SpaceManager::expand_lock());
 772   _container_count--;
 773 }
 774 
 775 #ifdef ASSERT
 776 void VirtualSpaceNode::verify_container_count() {
 777   assert(_container_count == container_count_slow(),
 778     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
 779             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 780 }
 781 #endif
 782 
 783 // BlockFreelist methods
 784 
 785 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 786 
 787 BlockFreelist::~BlockFreelist() {
 788   if (_dictionary != NULL) {
 789     if (Verbose && TraceMetadataChunkAllocation) {
 790       _dictionary->print_free_lists(gclog_or_tty);
 791     }
 792     delete _dictionary;
 793   }
 794 }
 795 
 796 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 797   Metablock* free_chunk = ::new (p) Metablock(word_size);
 798   if (dictionary() == NULL) {
 799    _dictionary = new BlockTreeDictionary();
 800   }
 801   dictionary()->return_chunk(free_chunk);
 802 }
 803 
 804 MetaWord* BlockFreelist::get_block(size_t word_size) {
 805   if (dictionary() == NULL) {
 806     return NULL;
 807   }
 808 
 809   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 810     // Dark matter.  Too small for dictionary.
 811     return NULL;
 812   }
 813 
 814   Metablock* free_block =
 815     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 816   if (free_block == NULL) {
 817     return NULL;
 818   }
 819 
 820   const size_t block_size = free_block->size();
 821   if (block_size > WasteMultiplier * word_size) {
 822     return_block((MetaWord*)free_block, block_size);
 823     return NULL;
 824   }
 825 
 826   MetaWord* new_block = (MetaWord*)free_block;
 827   assert(block_size >= word_size, "Incorrect size of block from freelist");
 828   const size_t unused = block_size - word_size;
 829   if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
 830     return_block(new_block + word_size, unused);
 831   }
 832 
 833   return new_block;
 834 }
 835 
 836 void BlockFreelist::print_on(outputStream* st) const {
 837   if (dictionary() == NULL) {
 838     return;
 839   }
 840   dictionary()->print_free_lists(st);
 841 }
 842 
 843 // VirtualSpaceNode methods
 844 
 845 VirtualSpaceNode::~VirtualSpaceNode() {
 846   _rs.release();
 847 #ifdef ASSERT
 848   size_t word_size = sizeof(*this) / BytesPerWord;
 849   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 850 #endif
 851 }
 852 
 853 size_t VirtualSpaceNode::used_words_in_vs() const {
 854   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 855 }
 856 
 857 // Space committed in the VirtualSpace
 858 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 859   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 860 }
 861 
 862 size_t VirtualSpaceNode::free_words_in_vs() const {
 863   return pointer_delta(end(), top(), sizeof(MetaWord));
 864 }
 865 
 866 // Allocates the chunk from the virtual space only.
 867 // This interface is also used internally for debugging.  Not all
 868 // chunks removed here are necessarily used for allocation.
 869 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 870   // Bottom of the new chunk
 871   MetaWord* chunk_limit = top();
 872   assert(chunk_limit != NULL, "Not safe to call this method");
 873 
 874   // The virtual spaces are always expanded by the
 875   // commit granularity to enforce the following condition.
 876   // Without this the is_available check will not work correctly.
 877   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 878       "The committed memory doesn't match the expanded memory.");
 879 
 880   if (!is_available(chunk_word_size)) {
 881     if (TraceMetadataChunkAllocation) {
 882       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 883       // Dump some information about the virtual space that is nearly full
 884       print_on(gclog_or_tty);
 885     }
 886     return NULL;
 887   }
 888 
 889   // Take the space  (bump top on the current virtual space).
 890   inc_top(chunk_word_size);
 891 
 892   // Initialize the chunk
 893   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 894   return result;
 895 }
 896 
 897 
 898 // Expand the virtual space (commit more of the reserved space)
 899 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 900   size_t min_bytes = min_words * BytesPerWord;
 901   size_t preferred_bytes = preferred_words * BytesPerWord;
 902 
 903   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 904 
 905   if (uncommitted < min_bytes) {
 906     return false;
 907   }
 908 
 909   size_t commit = MIN2(preferred_bytes, uncommitted);
 910   bool result = virtual_space()->expand_by(commit, false);
 911 
 912   assert(result, "Failed to commit memory");
 913 
 914   return result;
 915 }
 916 
 917 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 918   assert_lock_strong(SpaceManager::expand_lock());
 919   Metachunk* result = take_from_committed(chunk_word_size);
 920   if (result != NULL) {
 921     inc_container_count();
 922   }
 923   return result;
 924 }
 925 
 926 bool VirtualSpaceNode::initialize() {
 927 
 928   if (!_rs.is_reserved()) {
 929     return false;
 930   }
 931 
 932   // These are necessary restriction to make sure that the virtual space always
 933   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 934   // aligned only the middle alignment of the VirtualSpace is used.
 935   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 936   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 937 
 938   // ReservedSpaces marked as special will have the entire memory
 939   // pre-committed. Setting a committed size will make sure that
 940   // committed_size and actual_committed_size agrees.
 941   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 942 
 943   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 944                                             Metaspace::commit_alignment());
 945   if (result) {
 946     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 947         "Checking that the pre-committed memory was registered by the VirtualSpace");
 948 
 949     set_top((MetaWord*)virtual_space()->low());
 950     set_reserved(MemRegion((HeapWord*)_rs.base(),
 951                  (HeapWord*)(_rs.base() + _rs.size())));
 952 
 953     assert(reserved()->start() == (HeapWord*) _rs.base(),
 954       err_msg("Reserved start was not set properly " PTR_FORMAT
 955         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 956     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 957       err_msg("Reserved size was not set properly " SIZE_FORMAT
 958         " != " SIZE_FORMAT, reserved()->word_size(),
 959         _rs.size() / BytesPerWord));
 960   }
 961 
 962   return result;
 963 }
 964 
 965 void VirtualSpaceNode::print_on(outputStream* st) const {
 966   size_t used = used_words_in_vs();
 967   size_t capacity = capacity_words_in_vs();
 968   VirtualSpace* vs = virtual_space();
 969   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
 970            "[" PTR_FORMAT ", " PTR_FORMAT ", "
 971            PTR_FORMAT ", " PTR_FORMAT ")",
 972            vs, capacity / K,
 973            capacity == 0 ? 0 : used * 100 / capacity,
 974            bottom(), top(), end(),
 975            vs->high_boundary());
 976 }
 977 
 978 #ifdef ASSERT
 979 void VirtualSpaceNode::mangle() {
 980   size_t word_size = capacity_words_in_vs();
 981   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 982 }
 983 #endif // ASSERT
 984 
 985 // VirtualSpaceList methods
 986 // Space allocated from the VirtualSpace
 987 
 988 VirtualSpaceList::~VirtualSpaceList() {
 989   VirtualSpaceListIterator iter(virtual_space_list());
 990   while (iter.repeat()) {
 991     VirtualSpaceNode* vsl = iter.get_next();
 992     delete vsl;
 993   }
 994 }
 995 
 996 void VirtualSpaceList::inc_reserved_words(size_t v) {
 997   assert_lock_strong(SpaceManager::expand_lock());
 998   _reserved_words = _reserved_words + v;
 999 }
1000 void VirtualSpaceList::dec_reserved_words(size_t v) {
1001   assert_lock_strong(SpaceManager::expand_lock());
1002   _reserved_words = _reserved_words - v;
1003 }
1004 
1005 #define assert_committed_below_limit()                             \
1006   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1007       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1008               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1009           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1010 
1011 void VirtualSpaceList::inc_committed_words(size_t v) {
1012   assert_lock_strong(SpaceManager::expand_lock());
1013   _committed_words = _committed_words + v;
1014 
1015   assert_committed_below_limit();
1016 }
1017 void VirtualSpaceList::dec_committed_words(size_t v) {
1018   assert_lock_strong(SpaceManager::expand_lock());
1019   _committed_words = _committed_words - v;
1020 
1021   assert_committed_below_limit();
1022 }
1023 
1024 void VirtualSpaceList::inc_virtual_space_count() {
1025   assert_lock_strong(SpaceManager::expand_lock());
1026   _virtual_space_count++;
1027 }
1028 void VirtualSpaceList::dec_virtual_space_count() {
1029   assert_lock_strong(SpaceManager::expand_lock());
1030   _virtual_space_count--;
1031 }
1032 
1033 void ChunkManager::remove_chunk(Metachunk* chunk) {
1034   size_t word_size = chunk->word_size();
1035   ChunkIndex index = list_index(word_size);
1036   if (index != HumongousIndex) {
1037     free_chunks(index)->remove_chunk(chunk);
1038   } else {
1039     humongous_dictionary()->remove_chunk(chunk);
1040   }
1041 
1042   // Chunk is being removed from the chunks free list.
1043   dec_free_chunks_total(chunk->word_size());
1044 }
1045 
1046 // Walk the list of VirtualSpaceNodes and delete
1047 // nodes with a 0 container_count.  Remove Metachunks in
1048 // the node from their respective freelists.
1049 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1050   assert_lock_strong(SpaceManager::expand_lock());
1051   // Don't use a VirtualSpaceListIterator because this
1052   // list is being changed and a straightforward use of an iterator is not safe.
1053   VirtualSpaceNode* purged_vsl = NULL;
1054   VirtualSpaceNode* prev_vsl = virtual_space_list();
1055   VirtualSpaceNode* next_vsl = prev_vsl;
1056   while (next_vsl != NULL) {
1057     VirtualSpaceNode* vsl = next_vsl;
1058     next_vsl = vsl->next();
1059     // Don't free the current virtual space since it will likely
1060     // be needed soon.
1061     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1062       // Unlink it from the list
1063       if (prev_vsl == vsl) {
1064         // This is the case of the current node being the first node.
1065         assert(vsl == virtual_space_list(), "Expected to be the first node");
1066         set_virtual_space_list(vsl->next());
1067       } else {
1068         prev_vsl->set_next(vsl->next());
1069       }
1070 
1071       vsl->purge(chunk_manager);
1072       dec_reserved_words(vsl->reserved_words());
1073       dec_committed_words(vsl->committed_words());
1074       dec_virtual_space_count();
1075       purged_vsl = vsl;
1076       delete vsl;
1077     } else {
1078       prev_vsl = vsl;
1079     }
1080   }
1081 #ifdef ASSERT
1082   if (purged_vsl != NULL) {
1083   // List should be stable enough to use an iterator here.
1084   VirtualSpaceListIterator iter(virtual_space_list());
1085     while (iter.repeat()) {
1086       VirtualSpaceNode* vsl = iter.get_next();
1087       assert(vsl != purged_vsl, "Purge of vsl failed");
1088     }
1089   }
1090 #endif
1091 }
1092 
1093 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1094                                    _is_class(false),
1095                                    _virtual_space_list(NULL),
1096                                    _current_virtual_space(NULL),
1097                                    _reserved_words(0),
1098                                    _committed_words(0),
1099                                    _virtual_space_count(0) {
1100   MutexLockerEx cl(SpaceManager::expand_lock(),
1101                    Mutex::_no_safepoint_check_flag);
1102   create_new_virtual_space(word_size);
1103 }
1104 
1105 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1106                                    _is_class(true),
1107                                    _virtual_space_list(NULL),
1108                                    _current_virtual_space(NULL),
1109                                    _reserved_words(0),
1110                                    _committed_words(0),
1111                                    _virtual_space_count(0) {
1112   MutexLockerEx cl(SpaceManager::expand_lock(),
1113                    Mutex::_no_safepoint_check_flag);
1114   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1115   bool succeeded = class_entry->initialize();
1116   if (succeeded) {
1117     link_vs(class_entry);
1118   }
1119 }
1120 
1121 size_t VirtualSpaceList::free_bytes() {
1122   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1123 }
1124 
1125 // Allocate another meta virtual space and add it to the list.
1126 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1127   assert_lock_strong(SpaceManager::expand_lock());
1128 
1129   if (is_class()) {
1130     assert(false, "We currently don't support more than one VirtualSpace for"
1131                   " the compressed class space. The initialization of the"
1132                   " CCS uses another code path and should not hit this path.");
1133     return false;
1134   }
1135 
1136   if (vs_word_size == 0) {
1137     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1138     return false;
1139   }
1140 
1141   // Reserve the space
1142   size_t vs_byte_size = vs_word_size * BytesPerWord;
1143   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1144 
1145   // Allocate the meta virtual space and initialize it.
1146   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1147   if (!new_entry->initialize()) {
1148     delete new_entry;
1149     return false;
1150   } else {
1151     assert(new_entry->reserved_words() == vs_word_size,
1152         "Reserved memory size differs from requested memory size");
1153     // ensure lock-free iteration sees fully initialized node
1154     OrderAccess::storestore();
1155     link_vs(new_entry);
1156     return true;
1157   }
1158 }
1159 
1160 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1161   if (virtual_space_list() == NULL) {
1162       set_virtual_space_list(new_entry);
1163   } else {
1164     current_virtual_space()->set_next(new_entry);
1165   }
1166   set_current_virtual_space(new_entry);
1167   inc_reserved_words(new_entry->reserved_words());
1168   inc_committed_words(new_entry->committed_words());
1169   inc_virtual_space_count();
1170 #ifdef ASSERT
1171   new_entry->mangle();
1172 #endif
1173   if (TraceMetavirtualspaceAllocation && Verbose) {
1174     VirtualSpaceNode* vsl = current_virtual_space();
1175     vsl->print_on(gclog_or_tty);
1176   }
1177 }
1178 
1179 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1180                                       size_t min_words,
1181                                       size_t preferred_words) {
1182   size_t before = node->committed_words();
1183 
1184   bool result = node->expand_by(min_words, preferred_words);
1185 
1186   size_t after = node->committed_words();
1187 
1188   // after and before can be the same if the memory was pre-committed.
1189   assert(after >= before, "Inconsistency");
1190   inc_committed_words(after - before);
1191 
1192   return result;
1193 }
1194 
1195 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1196   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1197   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1198   assert(min_words <= preferred_words, "Invalid arguments");
1199 
1200   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1201     return  false;
1202   }
1203 
1204   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1205   if (allowed_expansion_words < min_words) {
1206     return false;
1207   }
1208 
1209   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1210 
1211   // Commit more memory from the the current virtual space.
1212   bool vs_expanded = expand_node_by(current_virtual_space(),
1213                                     min_words,
1214                                     max_expansion_words);
1215   if (vs_expanded) {
1216     return true;
1217   }
1218 
1219   // Get another virtual space.
1220   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1221   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1222 
1223   if (create_new_virtual_space(grow_vs_words)) {
1224     if (current_virtual_space()->is_pre_committed()) {
1225       // The memory was pre-committed, so we are done here.
1226       assert(min_words <= current_virtual_space()->committed_words(),
1227           "The new VirtualSpace was pre-committed, so it"
1228           "should be large enough to fit the alloc request.");
1229       return true;
1230     }
1231 
1232     return expand_node_by(current_virtual_space(),
1233                           min_words,
1234                           max_expansion_words);
1235   }
1236 
1237   return false;
1238 }
1239 
1240 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1241                                            size_t grow_chunks_by_words,
1242                                            size_t medium_chunk_bunch) {
1243 
1244   // Allocate a chunk out of the current virtual space.
1245   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1246 
1247   if (next != NULL) {
1248     return next;
1249   }
1250 
1251   // The expand amount is currently only determined by the requested sizes
1252   // and not how much committed memory is left in the current virtual space.
1253 
1254   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1255   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1256   if (min_word_size >= preferred_word_size) {
1257     // Can happen when humongous chunks are allocated.
1258     preferred_word_size = min_word_size;
1259   }
1260 
1261   bool expanded = expand_by(min_word_size, preferred_word_size);
1262   if (expanded) {
1263     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1264     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1265   }
1266 
1267    return next;
1268 }
1269 
1270 void VirtualSpaceList::print_on(outputStream* st) const {
1271   if (TraceMetadataChunkAllocation && Verbose) {
1272     VirtualSpaceListIterator iter(virtual_space_list());
1273     while (iter.repeat()) {
1274       VirtualSpaceNode* node = iter.get_next();
1275       node->print_on(st);
1276     }
1277   }
1278 }
1279 
1280 bool VirtualSpaceList::contains(const void *ptr) {
1281   VirtualSpaceNode* list = virtual_space_list();
1282   VirtualSpaceListIterator iter(list);
1283   while (iter.repeat()) {
1284     VirtualSpaceNode* node = iter.get_next();
1285     if (node->reserved()->contains(ptr)) {
1286       return true;
1287     }
1288   }
1289   return false;
1290 }
1291 
1292 
1293 // MetaspaceGC methods
1294 
1295 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1296 // Within the VM operation after the GC the attempt to allocate the metadata
1297 // should succeed.  If the GC did not free enough space for the metaspace
1298 // allocation, the HWM is increased so that another virtualspace will be
1299 // allocated for the metadata.  With perm gen the increase in the perm
1300 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1301 // metaspace policy uses those as the small and large steps for the HWM.
1302 //
1303 // After the GC the compute_new_size() for MetaspaceGC is called to
1304 // resize the capacity of the metaspaces.  The current implementation
1305 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1306 // to resize the Java heap by some GC's.  New flags can be implemented
1307 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1308 // free space is desirable in the metaspace capacity to decide how much
1309 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1310 // free space is desirable in the metaspace capacity before decreasing
1311 // the HWM.
1312 
1313 // Calculate the amount to increase the high water mark (HWM).
1314 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1315 // another expansion is not requested too soon.  If that is not
1316 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1317 // If that is still not enough, expand by the size of the allocation
1318 // plus some.
1319 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1320   size_t min_delta = MinMetaspaceExpansion;
1321   size_t max_delta = MaxMetaspaceExpansion;
1322   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1323 
1324   if (delta <= min_delta) {
1325     delta = min_delta;
1326   } else if (delta <= max_delta) {
1327     // Don't want to hit the high water mark on the next
1328     // allocation so make the delta greater than just enough
1329     // for this allocation.
1330     delta = max_delta;
1331   } else {
1332     // This allocation is large but the next ones are probably not
1333     // so increase by the minimum.
1334     delta = delta + min_delta;
1335   }
1336 
1337   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1338 
1339   return delta;
1340 }
1341 
1342 size_t MetaspaceGC::capacity_until_GC() {
1343   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1344   assert(value >= MetaspaceSize, "Not initialied properly?");
1345   return value;
1346 }
1347 
1348 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1349   assert_is_size_aligned(v, Metaspace::commit_alignment());
1350 
1351   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1352 }
1353 
1354 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1355   assert_is_size_aligned(v, Metaspace::commit_alignment());
1356 
1357   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1358 }
1359 
1360 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1361   // Check if the compressed class space is full.
1362   if (is_class && Metaspace::using_class_space()) {
1363     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1364     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1365       return false;
1366     }
1367   }
1368 
1369   // Check if the user has imposed a limit on the metaspace memory.
1370   size_t committed_bytes = MetaspaceAux::committed_bytes();
1371   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1372     return false;
1373   }
1374 
1375   return true;
1376 }
1377 
1378 size_t MetaspaceGC::allowed_expansion() {
1379   size_t committed_bytes = MetaspaceAux::committed_bytes();
1380 
1381   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1382 
1383   // Always grant expansion if we are initiating the JVM,
1384   // or if the GC_locker is preventing GCs.
1385   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1386     return left_until_max / BytesPerWord;
1387   }
1388 
1389   size_t capacity_until_gc = capacity_until_GC();
1390 
1391   if (capacity_until_gc <= committed_bytes) {
1392     return 0;
1393   }
1394 
1395   size_t left_until_GC = capacity_until_gc - committed_bytes;
1396   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1397 
1398   return left_to_commit / BytesPerWord;
1399 }
1400 
1401 void MetaspaceGC::compute_new_size() {
1402   assert(_shrink_factor <= 100, "invalid shrink factor");
1403   uint current_shrink_factor = _shrink_factor;
1404   _shrink_factor = 0;
1405 
1406   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1407   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1408 
1409   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1410   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1411 
1412   const double min_tmp = used_after_gc / maximum_used_percentage;
1413   size_t minimum_desired_capacity =
1414     (size_t)MIN2(min_tmp, double(max_uintx));
1415   // Don't shrink less than the initial generation size
1416   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1417                                   MetaspaceSize);
1418 
1419   if (PrintGCDetails && Verbose) {
1420     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1421     gclog_or_tty->print_cr("  "
1422                   "  minimum_free_percentage: %6.2f"
1423                   "  maximum_used_percentage: %6.2f",
1424                   minimum_free_percentage,
1425                   maximum_used_percentage);
1426     gclog_or_tty->print_cr("  "
1427                   "   used_after_gc       : %6.1fKB",
1428                   used_after_gc / (double) K);
1429   }
1430 
1431 
1432   size_t shrink_bytes = 0;
1433   if (capacity_until_GC < minimum_desired_capacity) {
1434     // If we have less capacity below the metaspace HWM, then
1435     // increment the HWM.
1436     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1437     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1438     // Don't expand unless it's significant
1439     if (expand_bytes >= MinMetaspaceExpansion) {
1440       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1441     }
1442     if (PrintGCDetails && Verbose) {
1443       size_t new_capacity_until_GC = capacity_until_GC;
1444       gclog_or_tty->print_cr("    expanding:"
1445                     "  minimum_desired_capacity: %6.1fKB"
1446                     "  expand_bytes: %6.1fKB"
1447                     "  MinMetaspaceExpansion: %6.1fKB"
1448                     "  new metaspace HWM:  %6.1fKB",
1449                     minimum_desired_capacity / (double) K,
1450                     expand_bytes / (double) K,
1451                     MinMetaspaceExpansion / (double) K,
1452                     new_capacity_until_GC / (double) K);
1453     }
1454     return;
1455   }
1456 
1457   // No expansion, now see if we want to shrink
1458   // We would never want to shrink more than this
1459   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1460   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1461     max_shrink_bytes));
1462 
1463   // Should shrinking be considered?
1464   if (MaxMetaspaceFreeRatio < 100) {
1465     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1466     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1467     const double max_tmp = used_after_gc / minimum_used_percentage;
1468     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1469     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1470                                     MetaspaceSize);
1471     if (PrintGCDetails && Verbose) {
1472       gclog_or_tty->print_cr("  "
1473                              "  maximum_free_percentage: %6.2f"
1474                              "  minimum_used_percentage: %6.2f",
1475                              maximum_free_percentage,
1476                              minimum_used_percentage);
1477       gclog_or_tty->print_cr("  "
1478                              "  minimum_desired_capacity: %6.1fKB"
1479                              "  maximum_desired_capacity: %6.1fKB",
1480                              minimum_desired_capacity / (double) K,
1481                              maximum_desired_capacity / (double) K);
1482     }
1483 
1484     assert(minimum_desired_capacity <= maximum_desired_capacity,
1485            "sanity check");
1486 
1487     if (capacity_until_GC > maximum_desired_capacity) {
1488       // Capacity too large, compute shrinking size
1489       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1490       // We don't want shrink all the way back to initSize if people call
1491       // System.gc(), because some programs do that between "phases" and then
1492       // we'd just have to grow the heap up again for the next phase.  So we
1493       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1494       // on the third call, and 100% by the fourth call.  But if we recompute
1495       // size without shrinking, it goes back to 0%.
1496       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1497 
1498       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1499 
1500       assert(shrink_bytes <= max_shrink_bytes,
1501         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1502           shrink_bytes, max_shrink_bytes));
1503       if (current_shrink_factor == 0) {
1504         _shrink_factor = 10;
1505       } else {
1506         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1507       }
1508       if (PrintGCDetails && Verbose) {
1509         gclog_or_tty->print_cr("  "
1510                       "  shrinking:"
1511                       "  initSize: %.1fK"
1512                       "  maximum_desired_capacity: %.1fK",
1513                       MetaspaceSize / (double) K,
1514                       maximum_desired_capacity / (double) K);
1515         gclog_or_tty->print_cr("  "
1516                       "  shrink_bytes: %.1fK"
1517                       "  current_shrink_factor: %d"
1518                       "  new shrink factor: %d"
1519                       "  MinMetaspaceExpansion: %.1fK",
1520                       shrink_bytes / (double) K,
1521                       current_shrink_factor,
1522                       _shrink_factor,
1523                       MinMetaspaceExpansion / (double) K);
1524       }
1525     }
1526   }
1527 
1528   // Don't shrink unless it's significant
1529   if (shrink_bytes >= MinMetaspaceExpansion &&
1530       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1531     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1532   }
1533 }
1534 
1535 // Metadebug methods
1536 
1537 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1538                                        size_t chunk_word_size){
1539 #ifdef ASSERT
1540   VirtualSpaceList* vsl = sm->vs_list();
1541   if (MetaDataDeallocateALot &&
1542       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1543     Metadebug::reset_deallocate_chunk_a_lot_count();
1544     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1545       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1546       if (dummy_chunk == NULL) {
1547         break;
1548       }
1549       sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1550 
1551       if (TraceMetadataChunkAllocation && Verbose) {
1552         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1553                                sm->sum_count_in_chunks_in_use());
1554         dummy_chunk->print_on(gclog_or_tty);
1555         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
1556                                sm->chunk_manager()->free_chunks_total_words(),
1557                                sm->chunk_manager()->free_chunks_count());
1558       }
1559     }
1560   } else {
1561     Metadebug::inc_deallocate_chunk_a_lot_count();
1562   }
1563 #endif
1564 }
1565 
1566 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1567                                        size_t raw_word_size){
1568 #ifdef ASSERT
1569   if (MetaDataDeallocateALot &&
1570         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1571     Metadebug::set_deallocate_block_a_lot_count(0);
1572     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1573       MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1574       if (dummy_block == 0) {
1575         break;
1576       }
1577       sm->deallocate(dummy_block, raw_word_size);
1578     }
1579   } else {
1580     Metadebug::inc_deallocate_block_a_lot_count();
1581   }
1582 #endif
1583 }
1584 
1585 void Metadebug::init_allocation_fail_alot_count() {
1586   if (MetadataAllocationFailALot) {
1587     _allocation_fail_alot_count =
1588       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1589   }
1590 }
1591 
1592 #ifdef ASSERT
1593 bool Metadebug::test_metadata_failure() {
1594   if (MetadataAllocationFailALot &&
1595       Threads::is_vm_complete()) {
1596     if (_allocation_fail_alot_count > 0) {
1597       _allocation_fail_alot_count--;
1598     } else {
1599       if (TraceMetadataChunkAllocation && Verbose) {
1600         gclog_or_tty->print_cr("Metadata allocation failing for "
1601                                "MetadataAllocationFailALot");
1602       }
1603       init_allocation_fail_alot_count();
1604       return true;
1605     }
1606   }
1607   return false;
1608 }
1609 #endif
1610 
1611 // ChunkManager methods
1612 
1613 size_t ChunkManager::free_chunks_total_words() {
1614   return _free_chunks_total;
1615 }
1616 
1617 size_t ChunkManager::free_chunks_total_bytes() {
1618   return free_chunks_total_words() * BytesPerWord;
1619 }
1620 
1621 size_t ChunkManager::free_chunks_count() {
1622 #ifdef ASSERT
1623   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1624     MutexLockerEx cl(SpaceManager::expand_lock(),
1625                      Mutex::_no_safepoint_check_flag);
1626     // This lock is only needed in debug because the verification
1627     // of the _free_chunks_totals walks the list of free chunks
1628     slow_locked_verify_free_chunks_count();
1629   }
1630 #endif
1631   return _free_chunks_count;
1632 }
1633 
1634 void ChunkManager::locked_verify_free_chunks_total() {
1635   assert_lock_strong(SpaceManager::expand_lock());
1636   assert(sum_free_chunks() == _free_chunks_total,
1637     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1638            " same as sum " SIZE_FORMAT, _free_chunks_total,
1639            sum_free_chunks()));
1640 }
1641 
1642 void ChunkManager::verify_free_chunks_total() {
1643   MutexLockerEx cl(SpaceManager::expand_lock(),
1644                      Mutex::_no_safepoint_check_flag);
1645   locked_verify_free_chunks_total();
1646 }
1647 
1648 void ChunkManager::locked_verify_free_chunks_count() {
1649   assert_lock_strong(SpaceManager::expand_lock());
1650   assert(sum_free_chunks_count() == _free_chunks_count,
1651     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1652            " same as sum " SIZE_FORMAT, _free_chunks_count,
1653            sum_free_chunks_count()));
1654 }
1655 
1656 void ChunkManager::verify_free_chunks_count() {
1657 #ifdef ASSERT
1658   MutexLockerEx cl(SpaceManager::expand_lock(),
1659                      Mutex::_no_safepoint_check_flag);
1660   locked_verify_free_chunks_count();
1661 #endif
1662 }
1663 
1664 void ChunkManager::verify() {
1665   MutexLockerEx cl(SpaceManager::expand_lock(),
1666                      Mutex::_no_safepoint_check_flag);
1667   locked_verify();
1668 }
1669 
1670 void ChunkManager::locked_verify() {
1671   locked_verify_free_chunks_count();
1672   locked_verify_free_chunks_total();
1673 }
1674 
1675 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1676   assert_lock_strong(SpaceManager::expand_lock());
1677   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1678                 _free_chunks_total, _free_chunks_count);
1679 }
1680 
1681 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1682   assert_lock_strong(SpaceManager::expand_lock());
1683   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1684                 sum_free_chunks(), sum_free_chunks_count());
1685 }
1686 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1687   return &_free_chunks[index];
1688 }
1689 
1690 // These methods that sum the free chunk lists are used in printing
1691 // methods that are used in product builds.
1692 size_t ChunkManager::sum_free_chunks() {
1693   assert_lock_strong(SpaceManager::expand_lock());
1694   size_t result = 0;
1695   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1696     ChunkList* list = free_chunks(i);
1697 
1698     if (list == NULL) {
1699       continue;
1700     }
1701 
1702     result = result + list->count() * list->size();
1703   }
1704   result = result + humongous_dictionary()->total_size();
1705   return result;
1706 }
1707 
1708 size_t ChunkManager::sum_free_chunks_count() {
1709   assert_lock_strong(SpaceManager::expand_lock());
1710   size_t count = 0;
1711   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1712     ChunkList* list = free_chunks(i);
1713     if (list == NULL) {
1714       continue;
1715     }
1716     count = count + list->count();
1717   }
1718   count = count + humongous_dictionary()->total_free_blocks();
1719   return count;
1720 }
1721 
1722 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1723   ChunkIndex index = list_index(word_size);
1724   assert(index < HumongousIndex, "No humongous list");
1725   return free_chunks(index);
1726 }
1727 
1728 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1729   assert_lock_strong(SpaceManager::expand_lock());
1730   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1731   chunk->set_next(free_list->head());
1732   free_list->set_head(chunk);
1733   // chunk is being returned to the chunk free list
1734   inc_free_chunks_total(chunk->word_size());
1735   slow_locked_verify();
1736 }
1737 
1738 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1739   // The deallocation of a chunk originates in the freelist
1740   // manangement code for a Metaspace and does not hold the
1741   // lock.
1742   assert(chunk != NULL, "Deallocating NULL");
1743   assert_lock_strong(SpaceManager::expand_lock());
1744   slow_locked_verify();
1745   if (TraceMetadataChunkAllocation) {
1746     gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1747                            PTR_FORMAT "  size " SIZE_FORMAT,
1748                            chunk, chunk->word_size());
1749   }
1750   free_chunks_put(chunk);
1751 }
1752 
1753 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1754   assert_lock_strong(SpaceManager::expand_lock());
1755 
1756   slow_locked_verify();
1757 
1758   Metachunk* chunk = NULL;
1759   if (list_index(word_size) != HumongousIndex) {
1760     ChunkList* free_list = find_free_chunks_list(word_size);
1761     assert(free_list != NULL, "Sanity check");
1762 
1763     chunk = free_list->head();
1764 
1765     if (chunk == NULL) {
1766       return NULL;
1767     }
1768 
1769     // Remove the chunk as the head of the list.
1770     free_list->remove_chunk(chunk);
1771 
1772     if (TraceMetadataChunkAllocation && Verbose) {
1773       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1774                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1775                              free_list, chunk, chunk->word_size());
1776     }
1777   } else {
1778     chunk = humongous_dictionary()->get_chunk(
1779       word_size,
1780       FreeBlockDictionary<Metachunk>::atLeast);
1781 
1782     if (chunk == NULL) {
1783       return NULL;
1784     }
1785 
1786     if (TraceMetadataHumongousAllocation) {
1787       size_t waste = chunk->word_size() - word_size;
1788       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1789                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1790                              " waste " SIZE_FORMAT,
1791                              chunk->word_size(), word_size, waste);
1792     }
1793   }
1794 
1795   // Chunk is being removed from the chunks free list.
1796   dec_free_chunks_total(chunk->word_size());
1797 
1798   // Remove it from the links to this freelist
1799   chunk->set_next(NULL);
1800   chunk->set_prev(NULL);
1801 #ifdef ASSERT
1802   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1803   // work.
1804   chunk->set_is_marked_free(false);
1805 #endif
1806   chunk->container()->inc_container_count();
1807 
1808   slow_locked_verify();
1809   return chunk;
1810 }
1811 
1812 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1813   assert_lock_strong(SpaceManager::expand_lock());
1814   slow_locked_verify();
1815 
1816   // Take from the beginning of the list
1817   Metachunk* chunk = free_chunks_get(word_size);
1818   if (chunk == NULL) {
1819     return NULL;
1820   }
1821 
1822   assert((word_size <= chunk->word_size()) ||
1823          list_index(chunk->word_size() == HumongousIndex),
1824          "Non-humongous variable sized chunk");
1825   if (TraceMetadataChunkAllocation) {
1826     size_t list_count;
1827     if (list_index(word_size) < HumongousIndex) {
1828       ChunkList* list = find_free_chunks_list(word_size);
1829       list_count = list->count();
1830     } else {
1831       list_count = humongous_dictionary()->total_count();
1832     }
1833     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1834                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1835                         this, chunk, chunk->word_size(), list_count);
1836     locked_print_free_chunks(gclog_or_tty);
1837   }
1838 
1839   return chunk;
1840 }
1841 
1842 void ChunkManager::print_on(outputStream* out) const {
1843   if (PrintFLSStatistics != 0) {
1844     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1845   }
1846 }
1847 
1848 // SpaceManager methods
1849 
1850 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1851                                            size_t* chunk_word_size,
1852                                            size_t* class_chunk_word_size) {
1853   switch (type) {
1854   case Metaspace::BootMetaspaceType:
1855     *chunk_word_size = Metaspace::first_chunk_word_size();
1856     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1857     break;
1858   case Metaspace::ROMetaspaceType:
1859     *chunk_word_size = SharedReadOnlySize / wordSize;
1860     *class_chunk_word_size = ClassSpecializedChunk;
1861     break;
1862   case Metaspace::ReadWriteMetaspaceType:
1863     *chunk_word_size = SharedReadWriteSize / wordSize;
1864     *class_chunk_word_size = ClassSpecializedChunk;
1865     break;
1866   case Metaspace::AnonymousMetaspaceType:
1867   case Metaspace::ReflectionMetaspaceType:
1868     *chunk_word_size = SpecializedChunk;
1869     *class_chunk_word_size = ClassSpecializedChunk;
1870     break;
1871   default:
1872     *chunk_word_size = SmallChunk;
1873     *class_chunk_word_size = ClassSmallChunk;
1874     break;
1875   }
1876   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1877     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1878             " class " SIZE_FORMAT,
1879             *chunk_word_size, *class_chunk_word_size));
1880 }
1881 
1882 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1883   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1884   size_t free = 0;
1885   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1886     Metachunk* chunk = chunks_in_use(i);
1887     while (chunk != NULL) {
1888       free += chunk->free_word_size();
1889       chunk = chunk->next();
1890     }
1891   }
1892   return free;
1893 }
1894 
1895 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1896   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1897   size_t result = 0;
1898   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1899    result += sum_waste_in_chunks_in_use(i);
1900   }
1901 
1902   return result;
1903 }
1904 
1905 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1906   size_t result = 0;
1907   Metachunk* chunk = chunks_in_use(index);
1908   // Count the free space in all the chunk but not the
1909   // current chunk from which allocations are still being done.
1910   while (chunk != NULL) {
1911     if (chunk != current_chunk()) {
1912       result += chunk->free_word_size();
1913     }
1914     chunk = chunk->next();
1915   }
1916   return result;
1917 }
1918 
1919 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1920   // For CMS use "allocated_chunks_words()" which does not need the
1921   // Metaspace lock.  For the other collectors sum over the
1922   // lists.  Use both methods as a check that "allocated_chunks_words()"
1923   // is correct.  That is, sum_capacity_in_chunks() is too expensive
1924   // to use in the product and allocated_chunks_words() should be used
1925   // but allow for  checking that allocated_chunks_words() returns the same
1926   // value as sum_capacity_in_chunks_in_use() which is the definitive
1927   // answer.
1928   if (UseConcMarkSweepGC) {
1929     return allocated_chunks_words();
1930   } else {
1931     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1932     size_t sum = 0;
1933     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1934       Metachunk* chunk = chunks_in_use(i);
1935       while (chunk != NULL) {
1936         sum += chunk->word_size();
1937         chunk = chunk->next();
1938       }
1939     }
1940   return sum;
1941   }
1942 }
1943 
1944 size_t SpaceManager::sum_count_in_chunks_in_use() {
1945   size_t count = 0;
1946   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1947     count = count + sum_count_in_chunks_in_use(i);
1948   }
1949 
1950   return count;
1951 }
1952 
1953 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1954   size_t count = 0;
1955   Metachunk* chunk = chunks_in_use(i);
1956   while (chunk != NULL) {
1957     count++;
1958     chunk = chunk->next();
1959   }
1960   return count;
1961 }
1962 
1963 
1964 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1965   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1966   size_t used = 0;
1967   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1968     Metachunk* chunk = chunks_in_use(i);
1969     while (chunk != NULL) {
1970       used += chunk->used_word_size();
1971       chunk = chunk->next();
1972     }
1973   }
1974   return used;
1975 }
1976 
1977 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1978 
1979   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1980     Metachunk* chunk = chunks_in_use(i);
1981     st->print("SpaceManager: %s " PTR_FORMAT,
1982                  chunk_size_name(i), chunk);
1983     if (chunk != NULL) {
1984       st->print_cr(" free " SIZE_FORMAT,
1985                    chunk->free_word_size());
1986     } else {
1987       st->print_cr("");
1988     }
1989   }
1990 
1991   chunk_manager()->locked_print_free_chunks(st);
1992   chunk_manager()->locked_print_sum_free_chunks(st);
1993 }
1994 
1995 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1996 
1997   // Decide between a small chunk and a medium chunk.  Up to
1998   // _small_chunk_limit small chunks can be allocated but
1999   // once a medium chunk has been allocated, no more small
2000   // chunks will be allocated.
2001   size_t chunk_word_size;
2002   if (chunks_in_use(MediumIndex) == NULL &&
2003       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2004     chunk_word_size = (size_t) small_chunk_size();
2005     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2006       chunk_word_size = medium_chunk_size();
2007     }
2008   } else {
2009     chunk_word_size = medium_chunk_size();
2010   }
2011 
2012   // Might still need a humongous chunk.  Enforce an
2013   // eight word granularity to facilitate reuse (some
2014   // wastage but better chance of reuse).
2015   size_t if_humongous_sized_chunk =
2016     align_size_up(word_size + Metachunk::overhead(),
2017                   HumongousChunkGranularity);
2018   chunk_word_size =
2019     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2020 
2021   assert(!SpaceManager::is_humongous(word_size) ||
2022          chunk_word_size == if_humongous_sized_chunk,
2023          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2024                  " chunk_word_size " SIZE_FORMAT,
2025                  word_size, chunk_word_size));
2026   if (TraceMetadataHumongousAllocation &&
2027       SpaceManager::is_humongous(word_size)) {
2028     gclog_or_tty->print_cr("Metadata humongous allocation:");
2029     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2030     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2031                            chunk_word_size);
2032     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2033                            Metachunk::overhead());
2034   }
2035   return chunk_word_size;
2036 }
2037 
2038 void SpaceManager::track_metaspace_memory_usage() {
2039   if (is_init_completed()) {
2040     if (is_class()) {
2041       MemoryService::track_compressed_class_memory_usage();
2042     }
2043     MemoryService::track_metaspace_memory_usage();
2044   }
2045 }
2046 
2047 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2048   assert(vs_list()->current_virtual_space() != NULL,
2049          "Should have been set");
2050   assert(current_chunk() == NULL ||
2051          current_chunk()->allocate(word_size) == NULL,
2052          "Don't need to expand");
2053   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2054 
2055   if (TraceMetadataChunkAllocation && Verbose) {
2056     size_t words_left = 0;
2057     size_t words_used = 0;
2058     if (current_chunk() != NULL) {
2059       words_left = current_chunk()->free_word_size();
2060       words_used = current_chunk()->used_word_size();
2061     }
2062     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2063                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2064                            " words left",
2065                             word_size, words_used, words_left);
2066   }
2067 
2068   // Get another chunk out of the virtual space
2069   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2070   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2071 
2072   if (next != NULL) {
2073     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2074   }
2075 
2076   MetaWord* mem = NULL;
2077 
2078   // If a chunk was available, add it to the in-use chunk list
2079   // and do an allocation from it.
2080   if (next != NULL) {
2081     // Add to this manager's list of chunks in use.
2082     add_chunk(next, false);
2083     mem = next->allocate(word_size);
2084   }
2085 
2086   // Track metaspace memory usage statistic.
2087   track_metaspace_memory_usage();
2088 
2089   return mem;
2090 }
2091 
2092 void SpaceManager::print_on(outputStream* st) const {
2093 
2094   for (ChunkIndex i = ZeroIndex;
2095        i < NumberOfInUseLists ;
2096        i = next_chunk_index(i) ) {
2097     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2098                  chunks_in_use(i),
2099                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2100   }
2101   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2102                " Humongous " SIZE_FORMAT,
2103                sum_waste_in_chunks_in_use(SmallIndex),
2104                sum_waste_in_chunks_in_use(MediumIndex),
2105                sum_waste_in_chunks_in_use(HumongousIndex));
2106   // block free lists
2107   if (block_freelists() != NULL) {
2108     st->print_cr("total in block free lists " SIZE_FORMAT,
2109       block_freelists()->total_size());
2110   }
2111 }
2112 
2113 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2114                            Mutex* lock) :
2115   _mdtype(mdtype),
2116   _allocated_blocks_words(0),
2117   _allocated_chunks_words(0),
2118   _allocated_chunks_count(0),
2119   _lock(lock)
2120 {
2121   initialize();
2122 }
2123 
2124 void SpaceManager::inc_size_metrics(size_t words) {
2125   assert_lock_strong(SpaceManager::expand_lock());
2126   // Total of allocated Metachunks and allocated Metachunks count
2127   // for each SpaceManager
2128   _allocated_chunks_words = _allocated_chunks_words + words;
2129   _allocated_chunks_count++;
2130   // Global total of capacity in allocated Metachunks
2131   MetaspaceAux::inc_capacity(mdtype(), words);
2132   // Global total of allocated Metablocks.
2133   // used_words_slow() includes the overhead in each
2134   // Metachunk so include it in the used when the
2135   // Metachunk is first added (so only added once per
2136   // Metachunk).
2137   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2138 }
2139 
2140 void SpaceManager::inc_used_metrics(size_t words) {
2141   // Add to the per SpaceManager total
2142   Atomic::add_ptr(words, &_allocated_blocks_words);
2143   // Add to the global total
2144   MetaspaceAux::inc_used(mdtype(), words);
2145 }
2146 
2147 void SpaceManager::dec_total_from_size_metrics() {
2148   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2149   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2150   // Also deduct the overhead per Metachunk
2151   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2152 }
2153 
2154 void SpaceManager::initialize() {
2155   Metadebug::init_allocation_fail_alot_count();
2156   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2157     _chunks_in_use[i] = NULL;
2158   }
2159   _current_chunk = NULL;
2160   if (TraceMetadataChunkAllocation && Verbose) {
2161     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2162   }
2163 }
2164 
2165 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2166   if (chunks == NULL) {
2167     return;
2168   }
2169   ChunkList* list = free_chunks(index);
2170   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2171   assert_lock_strong(SpaceManager::expand_lock());
2172   Metachunk* cur = chunks;
2173 
2174   // This returns chunks one at a time.  If a new
2175   // class List can be created that is a base class
2176   // of FreeList then something like FreeList::prepend()
2177   // can be used in place of this loop
2178   while (cur != NULL) {
2179     assert(cur->container() != NULL, "Container should have been set");
2180     cur->container()->dec_container_count();
2181     // Capture the next link before it is changed
2182     // by the call to return_chunk_at_head();
2183     Metachunk* next = cur->next();
2184     DEBUG_ONLY(cur->set_is_marked_free(true);)
2185     list->return_chunk_at_head(cur);
2186     cur = next;
2187   }
2188 }
2189 
2190 SpaceManager::~SpaceManager() {
2191   // This call this->_lock which can't be done while holding expand_lock()
2192   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2193     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2194             " allocated_chunks_words() " SIZE_FORMAT,
2195             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2196 
2197   MutexLockerEx fcl(SpaceManager::expand_lock(),
2198                     Mutex::_no_safepoint_check_flag);
2199 
2200   chunk_manager()->slow_locked_verify();
2201 
2202   dec_total_from_size_metrics();
2203 
2204   if (TraceMetadataChunkAllocation && Verbose) {
2205     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2206     locked_print_chunks_in_use_on(gclog_or_tty);
2207   }
2208 
2209   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2210   // is during the freeing of a VirtualSpaceNodes.
2211 
2212   // Have to update before the chunks_in_use lists are emptied
2213   // below.
2214   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2215                                          sum_count_in_chunks_in_use());
2216 
2217   // Add all the chunks in use by this space manager
2218   // to the global list of free chunks.
2219 
2220   // Follow each list of chunks-in-use and add them to the
2221   // free lists.  Each list is NULL terminated.
2222 
2223   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2224     if (TraceMetadataChunkAllocation && Verbose) {
2225       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2226                              sum_count_in_chunks_in_use(i),
2227                              chunk_size_name(i));
2228     }
2229     Metachunk* chunks = chunks_in_use(i);
2230     chunk_manager()->return_chunks(i, chunks);
2231     set_chunks_in_use(i, NULL);
2232     if (TraceMetadataChunkAllocation && Verbose) {
2233       gclog_or_tty->print_cr("updated freelist count %d %s",
2234                              chunk_manager()->free_chunks(i)->count(),
2235                              chunk_size_name(i));
2236     }
2237     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2238   }
2239 
2240   // The medium chunk case may be optimized by passing the head and
2241   // tail of the medium chunk list to add_at_head().  The tail is often
2242   // the current chunk but there are probably exceptions.
2243 
2244   // Humongous chunks
2245   if (TraceMetadataChunkAllocation && Verbose) {
2246     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2247                             sum_count_in_chunks_in_use(HumongousIndex),
2248                             chunk_size_name(HumongousIndex));
2249     gclog_or_tty->print("Humongous chunk dictionary: ");
2250   }
2251   // Humongous chunks are never the current chunk.
2252   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2253 
2254   while (humongous_chunks != NULL) {
2255 #ifdef ASSERT
2256     humongous_chunks->set_is_marked_free(true);
2257 #endif
2258     if (TraceMetadataChunkAllocation && Verbose) {
2259       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2260                           humongous_chunks,
2261                           humongous_chunks->word_size());
2262     }
2263     assert(humongous_chunks->word_size() == (size_t)
2264            align_size_up(humongous_chunks->word_size(),
2265                              HumongousChunkGranularity),
2266            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2267                    " granularity %d",
2268                    humongous_chunks->word_size(), HumongousChunkGranularity));
2269     Metachunk* next_humongous_chunks = humongous_chunks->next();
2270     humongous_chunks->container()->dec_container_count();
2271     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2272     humongous_chunks = next_humongous_chunks;
2273   }
2274   if (TraceMetadataChunkAllocation && Verbose) {
2275     gclog_or_tty->print_cr("");
2276     gclog_or_tty->print_cr("updated dictionary count %d %s",
2277                      chunk_manager()->humongous_dictionary()->total_count(),
2278                      chunk_size_name(HumongousIndex));
2279   }
2280   chunk_manager()->slow_locked_verify();
2281 }
2282 
2283 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2284   switch (index) {
2285     case SpecializedIndex:
2286       return "Specialized";
2287     case SmallIndex:
2288       return "Small";
2289     case MediumIndex:
2290       return "Medium";
2291     case HumongousIndex:
2292       return "Humongous";
2293     default:
2294       return NULL;
2295   }
2296 }
2297 
2298 ChunkIndex ChunkManager::list_index(size_t size) {
2299   switch (size) {
2300     case SpecializedChunk:
2301       assert(SpecializedChunk == ClassSpecializedChunk,
2302              "Need branch for ClassSpecializedChunk");
2303       return SpecializedIndex;
2304     case SmallChunk:
2305     case ClassSmallChunk:
2306       return SmallIndex;
2307     case MediumChunk:
2308     case ClassMediumChunk:
2309       return MediumIndex;
2310     default:
2311       assert(size > MediumChunk || size > ClassMediumChunk,
2312              "Not a humongous chunk");
2313       return HumongousIndex;
2314   }
2315 }
2316 
2317 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2318   assert_lock_strong(_lock);
2319   size_t raw_word_size = get_raw_word_size(word_size);
2320   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2321   assert(raw_word_size >= min_size,
2322          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2323   block_freelists()->return_block(p, raw_word_size);
2324 }
2325 
2326 // Adds a chunk to the list of chunks in use.
2327 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2328 
2329   assert(new_chunk != NULL, "Should not be NULL");
2330   assert(new_chunk->next() == NULL, "Should not be on a list");
2331 
2332   new_chunk->reset_empty();
2333 
2334   // Find the correct list and and set the current
2335   // chunk for that list.
2336   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2337 
2338   if (index != HumongousIndex) {
2339     retire_current_chunk();
2340     set_current_chunk(new_chunk);
2341     new_chunk->set_next(chunks_in_use(index));
2342     set_chunks_in_use(index, new_chunk);
2343   } else {
2344     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2345     // small, so small will be null.  Link this first chunk as the current
2346     // chunk.
2347     if (make_current) {
2348       // Set as the current chunk but otherwise treat as a humongous chunk.
2349       set_current_chunk(new_chunk);
2350     }
2351     // Link at head.  The _current_chunk only points to a humongous chunk for
2352     // the null class loader metaspace (class and data virtual space managers)
2353     // any humongous chunks so will not point to the tail
2354     // of the humongous chunks list.
2355     new_chunk->set_next(chunks_in_use(HumongousIndex));
2356     set_chunks_in_use(HumongousIndex, new_chunk);
2357 
2358     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2359   }
2360 
2361   // Add to the running sum of capacity
2362   inc_size_metrics(new_chunk->word_size());
2363 
2364   assert(new_chunk->is_empty(), "Not ready for reuse");
2365   if (TraceMetadataChunkAllocation && Verbose) {
2366     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2367                         sum_count_in_chunks_in_use());
2368     new_chunk->print_on(gclog_or_tty);
2369     chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2370   }
2371 }
2372 
2373 void SpaceManager::retire_current_chunk() {
2374   if (current_chunk() != NULL) {
2375     size_t remaining_words = current_chunk()->free_word_size();
2376     if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
2377       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2378       inc_used_metrics(remaining_words);
2379     }
2380   }
2381 }
2382 
2383 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2384                                        size_t grow_chunks_by_words) {
2385   // Get a chunk from the chunk freelist
2386   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2387 
2388   if (next == NULL) {
2389     next = vs_list()->get_new_chunk(word_size,
2390                                     grow_chunks_by_words,
2391                                     medium_chunk_bunch());
2392   }
2393 
2394   if (TraceMetadataHumongousAllocation && next != NULL &&
2395       SpaceManager::is_humongous(next->word_size())) {
2396     gclog_or_tty->print_cr("  new humongous chunk word size "
2397                            PTR_FORMAT, next->word_size());
2398   }
2399 
2400   return next;
2401 }
2402 
2403 MetaWord* SpaceManager::allocate(size_t word_size) {
2404   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2405 
2406   size_t raw_word_size = get_raw_word_size(word_size);
2407   BlockFreelist* fl =  block_freelists();
2408   MetaWord* p = NULL;
2409   // Allocation from the dictionary is expensive in the sense that
2410   // the dictionary has to be searched for a size.  Don't allocate
2411   // from the dictionary until it starts to get fat.  Is this
2412   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2413   // for allocations.  Do some profiling.  JJJ
2414   if (fl->total_size() > allocation_from_dictionary_limit) {
2415     p = fl->get_block(raw_word_size);
2416   }
2417   if (p == NULL) {
2418     p = allocate_work(raw_word_size);
2419   }
2420   Metadebug::deallocate_block_a_lot(this, raw_word_size);
2421 
2422   return p;
2423 }
2424 
2425 // Returns the address of spaced allocated for "word_size".
2426 // This methods does not know about blocks (Metablocks)
2427 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2428   assert_lock_strong(_lock);
2429 #ifdef ASSERT
2430   if (Metadebug::test_metadata_failure()) {
2431     return NULL;
2432   }
2433 #endif
2434   // Is there space in the current chunk?
2435   MetaWord* result = NULL;
2436 
2437   // For DumpSharedSpaces, only allocate out of the current chunk which is
2438   // never null because we gave it the size we wanted.   Caller reports out
2439   // of memory if this returns null.
2440   if (DumpSharedSpaces) {
2441     assert(current_chunk() != NULL, "should never happen");
2442     inc_used_metrics(word_size);
2443     return current_chunk()->allocate(word_size); // caller handles null result
2444   }
2445 
2446   if (current_chunk() != NULL) {
2447     result = current_chunk()->allocate(word_size);
2448   }
2449 
2450   if (result == NULL) {
2451     result = grow_and_allocate(word_size);
2452   }
2453 
2454   if (result != NULL) {
2455     inc_used_metrics(word_size);
2456     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2457            "Head of the list is being allocated");
2458   }
2459 
2460   return result;
2461 }
2462 
2463 void SpaceManager::verify() {
2464   // If there are blocks in the dictionary, then
2465   // verfication of chunks does not work since
2466   // being in the dictionary alters a chunk.
2467   if (block_freelists()->total_size() == 0) {
2468     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2469       Metachunk* curr = chunks_in_use(i);
2470       while (curr != NULL) {
2471         curr->verify();
2472         verify_chunk_size(curr);
2473         curr = curr->next();
2474       }
2475     }
2476   }
2477 }
2478 
2479 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2480   assert(is_humongous(chunk->word_size()) ||
2481          chunk->word_size() == medium_chunk_size() ||
2482          chunk->word_size() == small_chunk_size() ||
2483          chunk->word_size() == specialized_chunk_size(),
2484          "Chunk size is wrong");
2485   return;
2486 }
2487 
2488 #ifdef ASSERT
2489 void SpaceManager::verify_allocated_blocks_words() {
2490   // Verification is only guaranteed at a safepoint.
2491   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2492     "Verification can fail if the applications is running");
2493   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2494     err_msg("allocation total is not consistent " SIZE_FORMAT
2495             " vs " SIZE_FORMAT,
2496             allocated_blocks_words(), sum_used_in_chunks_in_use()));
2497 }
2498 
2499 #endif
2500 
2501 void SpaceManager::dump(outputStream* const out) const {
2502   size_t curr_total = 0;
2503   size_t waste = 0;
2504   uint i = 0;
2505   size_t used = 0;
2506   size_t capacity = 0;
2507 
2508   // Add up statistics for all chunks in this SpaceManager.
2509   for (ChunkIndex index = ZeroIndex;
2510        index < NumberOfInUseLists;
2511        index = next_chunk_index(index)) {
2512     for (Metachunk* curr = chunks_in_use(index);
2513          curr != NULL;
2514          curr = curr->next()) {
2515       out->print("%d) ", i++);
2516       curr->print_on(out);
2517       curr_total += curr->word_size();
2518       used += curr->used_word_size();
2519       capacity += curr->word_size();
2520       waste += curr->free_word_size() + curr->overhead();;
2521     }
2522   }
2523 
2524   if (TraceMetadataChunkAllocation && Verbose) {
2525     block_freelists()->print_on(out);
2526   }
2527 
2528   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2529   // Free space isn't wasted.
2530   waste -= free;
2531 
2532   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2533                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2534                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2535 }
2536 
2537 #ifndef PRODUCT
2538 void SpaceManager::mangle_freed_chunks() {
2539   for (ChunkIndex index = ZeroIndex;
2540        index < NumberOfInUseLists;
2541        index = next_chunk_index(index)) {
2542     for (Metachunk* curr = chunks_in_use(index);
2543          curr != NULL;
2544          curr = curr->next()) {
2545       curr->mangle();
2546     }
2547   }
2548 }
2549 #endif // PRODUCT
2550 
2551 // MetaspaceAux
2552 
2553 
2554 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2555 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2556 
2557 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2558   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2559   return list == NULL ? 0 : list->free_bytes();
2560 }
2561 
2562 size_t MetaspaceAux::free_bytes() {
2563   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2564 }
2565 
2566 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2567   assert_lock_strong(SpaceManager::expand_lock());
2568   assert(words <= allocated_capacity_words(mdtype),
2569     err_msg("About to decrement below 0: words " SIZE_FORMAT
2570             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2571             words, mdtype, allocated_capacity_words(mdtype)));
2572   _allocated_capacity_words[mdtype] -= words;
2573 }
2574 
2575 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2576   assert_lock_strong(SpaceManager::expand_lock());
2577   // Needs to be atomic
2578   _allocated_capacity_words[mdtype] += words;
2579 }
2580 
2581 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2582   assert(words <= allocated_used_words(mdtype),
2583     err_msg("About to decrement below 0: words " SIZE_FORMAT
2584             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2585             words, mdtype, allocated_used_words(mdtype)));
2586   // For CMS deallocation of the Metaspaces occurs during the
2587   // sweep which is a concurrent phase.  Protection by the expand_lock()
2588   // is not enough since allocation is on a per Metaspace basis
2589   // and protected by the Metaspace lock.
2590   jlong minus_words = (jlong) - (jlong) words;
2591   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2592 }
2593 
2594 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2595   // _allocated_used_words tracks allocations for
2596   // each piece of metadata.  Those allocations are
2597   // generally done concurrently by different application
2598   // threads so must be done atomically.
2599   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2600 }
2601 
2602 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2603   size_t used = 0;
2604   ClassLoaderDataGraphMetaspaceIterator iter;
2605   while (iter.repeat()) {
2606     Metaspace* msp = iter.get_next();
2607     // Sum allocated_blocks_words for each metaspace
2608     if (msp != NULL) {
2609       used += msp->used_words_slow(mdtype);
2610     }
2611   }
2612   return used * BytesPerWord;
2613 }
2614 
2615 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2616   size_t free = 0;
2617   ClassLoaderDataGraphMetaspaceIterator iter;
2618   while (iter.repeat()) {
2619     Metaspace* msp = iter.get_next();
2620     if (msp != NULL) {
2621       free += msp->free_words_slow(mdtype);
2622     }
2623   }
2624   return free * BytesPerWord;
2625 }
2626 
2627 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2628   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2629     return 0;
2630   }
2631   // Don't count the space in the freelists.  That space will be
2632   // added to the capacity calculation as needed.
2633   size_t capacity = 0;
2634   ClassLoaderDataGraphMetaspaceIterator iter;
2635   while (iter.repeat()) {
2636     Metaspace* msp = iter.get_next();
2637     if (msp != NULL) {
2638       capacity += msp->capacity_words_slow(mdtype);
2639     }
2640   }
2641   return capacity * BytesPerWord;
2642 }
2643 
2644 size_t MetaspaceAux::capacity_bytes_slow() {
2645 #ifdef PRODUCT
2646   // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2647   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2648 #endif
2649   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2650   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2651   assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2652       err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2653         " class_capacity + non_class_capacity " SIZE_FORMAT
2654         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2655         allocated_capacity_bytes(), class_capacity + non_class_capacity,
2656         class_capacity, non_class_capacity));
2657 
2658   return class_capacity + non_class_capacity;
2659 }
2660 
2661 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2662   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2663   return list == NULL ? 0 : list->reserved_bytes();
2664 }
2665 
2666 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2667   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2668   return list == NULL ? 0 : list->committed_bytes();
2669 }
2670 
2671 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2672 
2673 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2674   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2675   if (chunk_manager == NULL) {
2676     return 0;
2677   }
2678   chunk_manager->slow_verify();
2679   return chunk_manager->free_chunks_total_words();
2680 }
2681 
2682 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2683   return free_chunks_total_words(mdtype) * BytesPerWord;
2684 }
2685 
2686 size_t MetaspaceAux::free_chunks_total_words() {
2687   return free_chunks_total_words(Metaspace::ClassType) +
2688          free_chunks_total_words(Metaspace::NonClassType);
2689 }
2690 
2691 size_t MetaspaceAux::free_chunks_total_bytes() {
2692   return free_chunks_total_words() * BytesPerWord;
2693 }
2694 
2695 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2696   gclog_or_tty->print(", [Metaspace:");
2697   if (PrintGCDetails && Verbose) {
2698     gclog_or_tty->print(" "  SIZE_FORMAT
2699                         "->" SIZE_FORMAT
2700                         "("  SIZE_FORMAT ")",
2701                         prev_metadata_used,
2702                         allocated_used_bytes(),
2703                         reserved_bytes());
2704   } else {
2705     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2706                         "->" SIZE_FORMAT "K"
2707                         "("  SIZE_FORMAT "K)",
2708                         prev_metadata_used/K,
2709                         allocated_used_bytes()/K,
2710                         reserved_bytes()/K);
2711   }
2712 
2713   gclog_or_tty->print("]");
2714 }
2715 
2716 // This is printed when PrintGCDetails
2717 void MetaspaceAux::print_on(outputStream* out) {
2718   Metaspace::MetadataType nct = Metaspace::NonClassType;
2719 
2720   out->print_cr(" Metaspace       "
2721                 "used "      SIZE_FORMAT "K, "
2722                 "capacity "  SIZE_FORMAT "K, "
2723                 "committed " SIZE_FORMAT "K, "
2724                 "reserved "  SIZE_FORMAT "K",
2725                 allocated_used_bytes()/K,
2726                 allocated_capacity_bytes()/K,
2727                 committed_bytes()/K,
2728                 reserved_bytes()/K);
2729 
2730   if (Metaspace::using_class_space()) {
2731     Metaspace::MetadataType ct = Metaspace::ClassType;
2732     out->print_cr("  class space    "
2733                   "used "      SIZE_FORMAT "K, "
2734                   "capacity "  SIZE_FORMAT "K, "
2735                   "committed " SIZE_FORMAT "K, "
2736                   "reserved "  SIZE_FORMAT "K",
2737                   allocated_used_bytes(ct)/K,
2738                   allocated_capacity_bytes(ct)/K,
2739                   committed_bytes(ct)/K,
2740                   reserved_bytes(ct)/K);
2741   }
2742 }
2743 
2744 // Print information for class space and data space separately.
2745 // This is almost the same as above.
2746 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2747   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2748   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2749   size_t used_bytes = used_bytes_slow(mdtype);
2750   size_t free_bytes = free_bytes_slow(mdtype);
2751   size_t used_and_free = used_bytes + free_bytes +
2752                            free_chunks_capacity_bytes;
2753   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2754              "K + unused in chunks " SIZE_FORMAT "K  + "
2755              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2756              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2757              used_bytes / K,
2758              free_bytes / K,
2759              free_chunks_capacity_bytes / K,
2760              used_and_free / K,
2761              capacity_bytes / K);
2762   // Accounting can only be correct if we got the values during a safepoint
2763   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2764 }
2765 
2766 // Print total fragmentation for class metaspaces
2767 void MetaspaceAux::print_class_waste(outputStream* out) {
2768   assert(Metaspace::using_class_space(), "class metaspace not used");
2769   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2770   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2771   ClassLoaderDataGraphMetaspaceIterator iter;
2772   while (iter.repeat()) {
2773     Metaspace* msp = iter.get_next();
2774     if (msp != NULL) {
2775       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2776       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2777       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2778       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2779       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2780       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2781       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2782     }
2783   }
2784   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2785                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2786                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2787                 "large count " SIZE_FORMAT,
2788                 cls_specialized_count, cls_specialized_waste,
2789                 cls_small_count, cls_small_waste,
2790                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2791 }
2792 
2793 // Print total fragmentation for data and class metaspaces separately
2794 void MetaspaceAux::print_waste(outputStream* out) {
2795   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2796   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2797 
2798   ClassLoaderDataGraphMetaspaceIterator iter;
2799   while (iter.repeat()) {
2800     Metaspace* msp = iter.get_next();
2801     if (msp != NULL) {
2802       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2803       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2804       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2805       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2806       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2807       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2808       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2809     }
2810   }
2811   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2812   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2813                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2814                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2815                         "large count " SIZE_FORMAT,
2816              specialized_count, specialized_waste, small_count,
2817              small_waste, medium_count, medium_waste, humongous_count);
2818   if (Metaspace::using_class_space()) {
2819     print_class_waste(out);
2820   }
2821 }
2822 
2823 // Dump global metaspace things from the end of ClassLoaderDataGraph
2824 void MetaspaceAux::dump(outputStream* out) {
2825   out->print_cr("All Metaspace:");
2826   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2827   out->print("class space: "); print_on(out, Metaspace::ClassType);
2828   print_waste(out);
2829 }
2830 
2831 void MetaspaceAux::verify_free_chunks() {
2832   Metaspace::chunk_manager_metadata()->verify();
2833   if (Metaspace::using_class_space()) {
2834     Metaspace::chunk_manager_class()->verify();
2835   }
2836 }
2837 
2838 void MetaspaceAux::verify_capacity() {
2839 #ifdef ASSERT
2840   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2841   // For purposes of the running sum of capacity, verify against capacity
2842   size_t capacity_in_use_bytes = capacity_bytes_slow();
2843   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2844     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2845             " capacity_bytes_slow()" SIZE_FORMAT,
2846             running_sum_capacity_bytes, capacity_in_use_bytes));
2847   for (Metaspace::MetadataType i = Metaspace::ClassType;
2848        i < Metaspace:: MetadataTypeCount;
2849        i = (Metaspace::MetadataType)(i + 1)) {
2850     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2851     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2852       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2853               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2854               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2855   }
2856 #endif
2857 }
2858 
2859 void MetaspaceAux::verify_used() {
2860 #ifdef ASSERT
2861   size_t running_sum_used_bytes = allocated_used_bytes();
2862   // For purposes of the running sum of used, verify against used
2863   size_t used_in_use_bytes = used_bytes_slow();
2864   assert(allocated_used_bytes() == used_in_use_bytes,
2865     err_msg("allocated_used_bytes() " SIZE_FORMAT
2866             " used_bytes_slow()" SIZE_FORMAT,
2867             allocated_used_bytes(), used_in_use_bytes));
2868   for (Metaspace::MetadataType i = Metaspace::ClassType;
2869        i < Metaspace:: MetadataTypeCount;
2870        i = (Metaspace::MetadataType)(i + 1)) {
2871     size_t used_in_use_bytes = used_bytes_slow(i);
2872     assert(allocated_used_bytes(i) == used_in_use_bytes,
2873       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2874               " used_bytes_slow(%u)" SIZE_FORMAT,
2875               i, allocated_used_bytes(i), i, used_in_use_bytes));
2876   }
2877 #endif
2878 }
2879 
2880 void MetaspaceAux::verify_metrics() {
2881   verify_capacity();
2882   verify_used();
2883 }
2884 
2885 
2886 // Metaspace methods
2887 
2888 size_t Metaspace::_first_chunk_word_size = 0;
2889 size_t Metaspace::_first_class_chunk_word_size = 0;
2890 
2891 size_t Metaspace::_commit_alignment = 0;
2892 size_t Metaspace::_reserve_alignment = 0;
2893 
2894 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2895   initialize(lock, type);
2896 }
2897 
2898 Metaspace::~Metaspace() {
2899   delete _vsm;
2900   if (using_class_space()) {
2901     delete _class_vsm;
2902   }
2903 }
2904 
2905 VirtualSpaceList* Metaspace::_space_list = NULL;
2906 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2907 
2908 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2909 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2910 
2911 #define VIRTUALSPACEMULTIPLIER 2
2912 
2913 #ifdef _LP64
2914 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2915   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2916   // narrow_klass_base is the lower of the metaspace base and the cds base
2917   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2918   // between the lower base and higher address.
2919   address lower_base;
2920   address higher_address;
2921   if (UseSharedSpaces) {
2922     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2923                           (address)(metaspace_base + class_metaspace_size()));
2924     lower_base = MIN2(metaspace_base, cds_base);
2925   } else {
2926     higher_address = metaspace_base + class_metaspace_size();
2927     lower_base = metaspace_base;
2928   }
2929   Universe::set_narrow_klass_base(lower_base);
2930   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
2931     Universe::set_narrow_klass_shift(0);
2932   } else {
2933     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2934     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2935   }
2936 }
2937 
2938 // Return TRUE if the specified metaspace_base and cds_base are close enough
2939 // to work with compressed klass pointers.
2940 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2941   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2942   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2943   address lower_base = MIN2((address)metaspace_base, cds_base);
2944   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2945                                 (address)(metaspace_base + class_metaspace_size()));
2946   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2947 }
2948 
2949 // Try to allocate the metaspace at the requested addr.
2950 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2951   assert(using_class_space(), "called improperly");
2952   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2953   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2954          "Metaspace size is too big");
2955   assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
2956   assert_is_ptr_aligned(cds_base,                _reserve_alignment);
2957   assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
2958 
2959   // Don't use large pages for the class space.
2960   bool large_pages = false;
2961 
2962   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2963                                              _reserve_alignment,
2964                                              large_pages,
2965                                              requested_addr, 0);
2966   if (!metaspace_rs.is_reserved()) {
2967     if (UseSharedSpaces) {
2968       size_t increment = align_size_up(1*G, _reserve_alignment);
2969 
2970       // Keep trying to allocate the metaspace, increasing the requested_addr
2971       // by 1GB each time, until we reach an address that will no longer allow
2972       // use of CDS with compressed klass pointers.
2973       char *addr = requested_addr;
2974       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2975              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2976         addr = addr + increment;
2977         metaspace_rs = ReservedSpace(class_metaspace_size(),
2978                                      _reserve_alignment, large_pages, addr, 0);
2979       }
2980     }
2981 
2982     // If no successful allocation then try to allocate the space anywhere.  If
2983     // that fails then OOM doom.  At this point we cannot try allocating the
2984     // metaspace as if UseCompressedClassPointers is off because too much
2985     // initialization has happened that depends on UseCompressedClassPointers.
2986     // So, UseCompressedClassPointers cannot be turned off at this point.
2987     if (!metaspace_rs.is_reserved()) {
2988       metaspace_rs = ReservedSpace(class_metaspace_size(),
2989                                    _reserve_alignment, large_pages);
2990       if (!metaspace_rs.is_reserved()) {
2991         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2992                                               class_metaspace_size()));
2993       }
2994     }
2995   }
2996 
2997   // If we got here then the metaspace got allocated.
2998   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
2999 
3000   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3001   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3002     FileMapInfo::stop_sharing_and_unmap(
3003         "Could not allocate metaspace at a compatible address");
3004   }
3005 
3006   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3007                                   UseSharedSpaces ? (address)cds_base : 0);
3008 
3009   initialize_class_space(metaspace_rs);
3010 
3011   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3012     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3013                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3014     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3015                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
3016   }
3017 }
3018 
3019 // For UseCompressedClassPointers the class space is reserved above the top of
3020 // the Java heap.  The argument passed in is at the base of the compressed space.
3021 void Metaspace::initialize_class_space(ReservedSpace rs) {
3022   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3023   assert(rs.size() >= CompressedClassSpaceSize,
3024          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3025   assert(using_class_space(), "Must be using class space");
3026   _class_space_list = new VirtualSpaceList(rs);
3027   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3028 
3029   if (!_class_space_list->initialization_succeeded()) {
3030     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3031   }
3032 }
3033 
3034 #endif
3035 
3036 // Align down. If the aligning result in 0, return 'alignment'.
3037 static size_t restricted_align_down(size_t size, size_t alignment) {
3038   return MAX2(alignment, align_size_down_(size, alignment));
3039 }
3040 
3041 void Metaspace::ergo_initialize() {
3042   if (DumpSharedSpaces) {
3043     // Using large pages when dumping the shared archive is currently not implemented.
3044     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3045   }
3046 
3047   size_t page_size = os::vm_page_size();
3048   if (UseLargePages && UseLargePagesInMetaspace) {
3049     page_size = os::large_page_size();
3050   }
3051 
3052   _commit_alignment  = page_size;
3053   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3054 
3055   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3056   // override if MaxMetaspaceSize was set on the command line or not.
3057   // This information is needed later to conform to the specification of the
3058   // java.lang.management.MemoryUsage API.
3059   //
3060   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3061   // globals.hpp to the aligned value, but this is not possible, since the
3062   // alignment depends on other flags being parsed.
3063   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
3064 
3065   if (MetaspaceSize > MaxMetaspaceSize) {
3066     MetaspaceSize = MaxMetaspaceSize;
3067   }
3068 
3069   MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
3070 
3071   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3072 
3073   if (MetaspaceSize < 256*K) {
3074     vm_exit_during_initialization("Too small initial Metaspace size");
3075   }
3076 
3077   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
3078   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
3079 
3080   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
3081   set_class_metaspace_size(CompressedClassSpaceSize);
3082 }
3083 
3084 void Metaspace::global_initialize() {
3085   // Initialize the alignment for shared spaces.
3086   int max_alignment = os::vm_page_size();
3087   size_t cds_total = 0;
3088 
3089   MetaspaceShared::set_max_alignment(max_alignment);
3090 
3091   if (DumpSharedSpaces) {
3092     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3093     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3094     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3095     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3096 
3097     // Initialize with the sum of the shared space sizes.  The read-only
3098     // and read write metaspace chunks will be allocated out of this and the
3099     // remainder is the misc code and data chunks.
3100     cds_total = FileMapInfo::shared_spaces_size();
3101     cds_total = align_size_up(cds_total, _reserve_alignment);
3102     _space_list = new VirtualSpaceList(cds_total/wordSize);
3103     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3104 
3105     if (!_space_list->initialization_succeeded()) {
3106       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3107     }
3108 
3109 #ifdef _LP64
3110     if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
3111       vm_exit_during_initialization("Unable to dump shared archive.",
3112           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3113                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3114                   "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
3115                   cds_total + class_metaspace_size(), (size_t)max_juint));
3116     }
3117 
3118     // Set the compressed klass pointer base so that decoding of these pointers works
3119     // properly when creating the shared archive.
3120     assert(UseCompressedOops && UseCompressedClassPointers,
3121       "UseCompressedOops and UseCompressedClassPointers must be set");
3122     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3123     if (TraceMetavirtualspaceAllocation && Verbose) {
3124       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3125                              _space_list->current_virtual_space()->bottom());
3126     }
3127 
3128     Universe::set_narrow_klass_shift(0);
3129 #endif
3130 
3131   } else {
3132     // If using shared space, open the file that contains the shared space
3133     // and map in the memory before initializing the rest of metaspace (so
3134     // the addresses don't conflict)
3135     address cds_address = NULL;
3136     if (UseSharedSpaces) {
3137       FileMapInfo* mapinfo = new FileMapInfo();
3138       memset(mapinfo, 0, sizeof(FileMapInfo));
3139 
3140       // Open the shared archive file, read and validate the header. If
3141       // initialization fails, shared spaces [UseSharedSpaces] are
3142       // disabled and the file is closed.
3143       // Map in spaces now also
3144       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3145         FileMapInfo::set_current_info(mapinfo);
3146         cds_total = FileMapInfo::shared_spaces_size();
3147         cds_address = (address)mapinfo->region_base(0);
3148       } else {
3149         assert(!mapinfo->is_open() && !UseSharedSpaces,
3150                "archive file not closed or shared spaces not disabled.");
3151       }
3152     }
3153 
3154 #ifdef _LP64
3155     // If UseCompressedClassPointers is set then allocate the metaspace area
3156     // above the heap and above the CDS area (if it exists).
3157     if (using_class_space()) {
3158       if (UseSharedSpaces) {
3159         char* cds_end = (char*)(cds_address + cds_total);
3160         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3161         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3162       } else {
3163         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
3164       }
3165     }
3166 #endif
3167 
3168     // Initialize these before initializing the VirtualSpaceList
3169     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3170     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3171     // Make the first class chunk bigger than a medium chunk so it's not put
3172     // on the medium chunk list.   The next chunk will be small and progress
3173     // from there.  This size calculated by -version.
3174     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3175                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3176     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3177     // Arbitrarily set the initial virtual space to a multiple
3178     // of the boot class loader size.
3179     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3180     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3181 
3182     // Initialize the list of virtual spaces.
3183     _space_list = new VirtualSpaceList(word_size);
3184     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3185 
3186     if (!_space_list->initialization_succeeded()) {
3187       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3188     }
3189   }
3190 
3191   MetaspaceGC::initialize();
3192 }
3193 
3194 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3195                                                size_t chunk_word_size,
3196                                                size_t chunk_bunch) {
3197   // Get a chunk from the chunk freelist
3198   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3199   if (chunk != NULL) {
3200     return chunk;
3201   }
3202 
3203   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3204 }
3205 
3206 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3207 
3208   assert(space_list() != NULL,
3209     "Metadata VirtualSpaceList has not been initialized");
3210   assert(chunk_manager_metadata() != NULL,
3211     "Metadata ChunkManager has not been initialized");
3212 
3213   _vsm = new SpaceManager(NonClassType, lock);
3214   if (_vsm == NULL) {
3215     return;
3216   }
3217   size_t word_size;
3218   size_t class_word_size;
3219   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3220 
3221   if (using_class_space()) {
3222   assert(class_space_list() != NULL,
3223     "Class VirtualSpaceList has not been initialized");
3224   assert(chunk_manager_class() != NULL,
3225     "Class ChunkManager has not been initialized");
3226 
3227     // Allocate SpaceManager for classes.
3228     _class_vsm = new SpaceManager(ClassType, lock);
3229     if (_class_vsm == NULL) {
3230       return;
3231     }
3232   }
3233 
3234   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3235 
3236   // Allocate chunk for metadata objects
3237   Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3238                                                   word_size,
3239                                                   vsm()->medium_chunk_bunch());
3240   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3241   if (new_chunk != NULL) {
3242     // Add to this manager's list of chunks in use and current_chunk().
3243     vsm()->add_chunk(new_chunk, true);
3244   }
3245 
3246   // Allocate chunk for class metadata objects
3247   if (using_class_space()) {
3248     Metachunk* class_chunk = get_initialization_chunk(ClassType,
3249                                                       class_word_size,
3250                                                       class_vsm()->medium_chunk_bunch());
3251     if (class_chunk != NULL) {
3252       class_vsm()->add_chunk(class_chunk, true);
3253     }
3254   }
3255 
3256   _alloc_record_head = NULL;
3257   _alloc_record_tail = NULL;
3258 }
3259 
3260 size_t Metaspace::align_word_size_up(size_t word_size) {
3261   size_t byte_size = word_size * wordSize;
3262   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3263 }
3264 
3265 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3266   // DumpSharedSpaces doesn't use class metadata area (yet)
3267   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3268   if (is_class_space_allocation(mdtype)) {
3269     return  class_vsm()->allocate(word_size);
3270   } else {
3271     return  vsm()->allocate(word_size);
3272   }
3273 }
3274 
3275 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3276   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3277   assert(delta_bytes > 0, "Must be");
3278 
3279   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3280   size_t before_inc = after_inc - delta_bytes;
3281 
3282   if (PrintGCDetails && Verbose) {
3283     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3284         " to " SIZE_FORMAT, before_inc, after_inc);
3285   }
3286 
3287   return allocate(word_size, mdtype);
3288 }
3289 
3290 // Space allocated in the Metaspace.  This may
3291 // be across several metadata virtual spaces.
3292 char* Metaspace::bottom() const {
3293   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3294   return (char*)vsm()->current_chunk()->bottom();
3295 }
3296 
3297 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3298   if (mdtype == ClassType) {
3299     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3300   } else {
3301     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3302   }
3303 }
3304 
3305 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3306   if (mdtype == ClassType) {
3307     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3308   } else {
3309     return vsm()->sum_free_in_chunks_in_use();
3310   }
3311 }
3312 
3313 // Space capacity in the Metaspace.  It includes
3314 // space in the list of chunks from which allocations
3315 // have been made. Don't include space in the global freelist and
3316 // in the space available in the dictionary which
3317 // is already counted in some chunk.
3318 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3319   if (mdtype == ClassType) {
3320     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3321   } else {
3322     return vsm()->sum_capacity_in_chunks_in_use();
3323   }
3324 }
3325 
3326 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3327   return used_words_slow(mdtype) * BytesPerWord;
3328 }
3329 
3330 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3331   return capacity_words_slow(mdtype) * BytesPerWord;
3332 }
3333 
3334 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3335   if (SafepointSynchronize::is_at_safepoint()) {
3336     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3337     // Don't take Heap_lock
3338     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3339     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3340       // Dark matter.  Too small for dictionary.
3341 #ifdef ASSERT
3342       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3343 #endif
3344       return;
3345     }
3346     if (is_class && using_class_space()) {
3347       class_vsm()->deallocate(ptr, word_size);
3348     } else {
3349       vsm()->deallocate(ptr, word_size);
3350     }
3351   } else {
3352     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3353 
3354     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3355       // Dark matter.  Too small for dictionary.
3356 #ifdef ASSERT
3357       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3358 #endif
3359       return;
3360     }
3361     if (is_class && using_class_space()) {
3362       class_vsm()->deallocate(ptr, word_size);
3363     } else {
3364       vsm()->deallocate(ptr, word_size);
3365     }
3366   }
3367 }
3368 
3369 
3370 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3371                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3372   if (HAS_PENDING_EXCEPTION) {
3373     assert(false, "Should not allocate with exception pending");
3374     return NULL;  // caller does a CHECK_NULL too
3375   }
3376 
3377   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3378         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3379 
3380   // Allocate in metaspaces without taking out a lock, because it deadlocks
3381   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3382   // to revisit this for application class data sharing.
3383   if (DumpSharedSpaces) {
3384     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3385     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3386     MetaWord* result = space->allocate(word_size, NonClassType);
3387     if (result == NULL) {
3388       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3389     }
3390 
3391     space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3392 
3393     // Zero initialize.
3394     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3395 
3396     return result;
3397   }
3398 
3399   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3400 
3401   // Try to allocate metadata.
3402   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3403 
3404   if (result == NULL) {
3405     // Allocation failed.
3406     if (is_init_completed()) {
3407       // Only start a GC if the bootstrapping has completed.
3408 
3409       // Try to clean out some memory and retry.
3410       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3411           loader_data, word_size, mdtype);
3412     }
3413   }
3414 
3415   if (result == NULL) {
3416     report_metadata_oome(loader_data, word_size, mdtype, THREAD);
3417     // Will not reach here.
3418     return NULL;
3419   }
3420 
3421   // Zero initialize.
3422   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3423 
3424   return result;
3425 }
3426 
3427 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3428   // If result is still null, we are out of memory.
3429   if (Verbose && TraceMetadataChunkAllocation) {
3430     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3431         SIZE_FORMAT, word_size);
3432     if (loader_data->metaspace_or_null() != NULL) {
3433       loader_data->dump(gclog_or_tty);
3434     }
3435     MetaspaceAux::dump(gclog_or_tty);
3436   }
3437 
3438   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3439   const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
3440                                                                  "Metadata space";
3441   report_java_out_of_memory(space_string);
3442 
3443   if (JvmtiExport::should_post_resource_exhausted()) {
3444     JvmtiExport::post_resource_exhausted(
3445         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3446         space_string);
3447   }
3448 
3449   if (!is_init_completed()) {
3450     vm_exit_during_initialization("OutOfMemoryError", space_string);
3451   }
3452 
3453   if (is_class_space_allocation(mdtype)) {
3454     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3455   } else {
3456     THROW_OOP(Universe::out_of_memory_error_metaspace());
3457   }
3458 }
3459 
3460 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3461   assert(DumpSharedSpaces, "sanity");
3462 
3463   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3464   if (_alloc_record_head == NULL) {
3465     _alloc_record_head = _alloc_record_tail = rec;
3466   } else {
3467     _alloc_record_tail->_next = rec;
3468     _alloc_record_tail = rec;
3469   }
3470 }
3471 
3472 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3473   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3474 
3475   address last_addr = (address)bottom();
3476 
3477   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3478     address ptr = rec->_ptr;
3479     if (last_addr < ptr) {
3480       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3481     }
3482     closure->doit(ptr, rec->_type, rec->_byte_size);
3483     last_addr = ptr + rec->_byte_size;
3484   }
3485 
3486   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3487   if (last_addr < top) {
3488     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3489   }
3490 }
3491 
3492 void Metaspace::purge(MetadataType mdtype) {
3493   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3494 }
3495 
3496 void Metaspace::purge() {
3497   MutexLockerEx cl(SpaceManager::expand_lock(),
3498                    Mutex::_no_safepoint_check_flag);
3499   purge(NonClassType);
3500   if (using_class_space()) {
3501     purge(ClassType);
3502   }
3503 }
3504 
3505 void Metaspace::print_on(outputStream* out) const {
3506   // Print both class virtual space counts and metaspace.
3507   if (Verbose) {
3508     vsm()->print_on(out);
3509     if (using_class_space()) {
3510       class_vsm()->print_on(out);
3511     }
3512   }
3513 }
3514 
3515 bool Metaspace::contains(const void * ptr) {
3516   if (MetaspaceShared::is_in_shared_space(ptr)) {
3517     return true;
3518   }
3519   // This is checked while unlocked.  As long as the virtualspaces are added
3520   // at the end, the pointer will be in one of them.  The virtual spaces
3521   // aren't deleted presently.  When they are, some sort of locking might
3522   // be needed.  Note, locking this can cause inversion problems with the
3523   // caller in MetaspaceObj::is_metadata() function.
3524   return space_list()->contains(ptr) ||
3525          (using_class_space() && class_space_list()->contains(ptr));
3526 }
3527 
3528 void Metaspace::verify() {
3529   vsm()->verify();
3530   if (using_class_space()) {
3531     class_vsm()->verify();
3532   }
3533 }
3534 
3535 void Metaspace::dump(outputStream* const out) const {
3536   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3537   vsm()->dump(out);
3538   if (using_class_space()) {
3539     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3540     class_vsm()->dump(out);
3541   }
3542 }
3543 
3544 /////////////// Unit tests ///////////////
3545 
3546 #ifndef PRODUCT
3547 
3548 class TestMetaspaceAuxTest : AllStatic {
3549  public:
3550   static void test_reserved() {
3551     size_t reserved = MetaspaceAux::reserved_bytes();
3552 
3553     assert(reserved > 0, "assert");
3554 
3555     size_t committed  = MetaspaceAux::committed_bytes();
3556     assert(committed <= reserved, "assert");
3557 
3558     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3559     assert(reserved_metadata > 0, "assert");
3560     assert(reserved_metadata <= reserved, "assert");
3561 
3562     if (UseCompressedClassPointers) {
3563       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3564       assert(reserved_class > 0, "assert");
3565       assert(reserved_class < reserved, "assert");
3566     }
3567   }
3568 
3569   static void test_committed() {
3570     size_t committed = MetaspaceAux::committed_bytes();
3571 
3572     assert(committed > 0, "assert");
3573 
3574     size_t reserved  = MetaspaceAux::reserved_bytes();
3575     assert(committed <= reserved, "assert");
3576 
3577     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3578     assert(committed_metadata > 0, "assert");
3579     assert(committed_metadata <= committed, "assert");
3580 
3581     if (UseCompressedClassPointers) {
3582       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3583       assert(committed_class > 0, "assert");
3584       assert(committed_class < committed, "assert");
3585     }
3586   }
3587 
3588   static void test_virtual_space_list_large_chunk() {
3589     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3590     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3591     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3592     // vm_allocation_granularity aligned on Windows.
3593     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3594     large_size += (os::vm_page_size()/BytesPerWord);
3595     vs_list->get_new_chunk(large_size, large_size, 0);
3596   }
3597 
3598   static void test() {
3599     test_reserved();
3600     test_committed();
3601     test_virtual_space_list_large_chunk();
3602   }
3603 };
3604 
3605 void TestMetaspaceAux_test() {
3606   TestMetaspaceAuxTest::test();
3607 }
3608 
3609 #endif