1 /*
   2  * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/binaryTreeDictionary.hpp"
  27 #include "memory/freeList.hpp"
  28 #include "memory/collectorPolicy.hpp"
  29 #include "memory/filemap.hpp"
  30 #include "memory/freeList.hpp"
  31 #include "memory/metablock.hpp"
  32 #include "memory/metachunk.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceShared.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/globals.hpp"
  38 #include "runtime/mutex.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 #include "services/memTracker.hpp"
  41 #include "utilities/copy.hpp"
  42 #include "utilities/debug.hpp"
  43 
  44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  46 // Define this macro to enable slow integrity checking of
  47 // the free chunk lists
  48 const bool metaspace_slow_verify = false;
  49 
  50 
  51 // Parameters for stress mode testing
  52 const uint metadata_deallocate_a_lot_block = 10;
  53 const uint metadata_deallocate_a_lock_chunk = 3;
  54 size_t const allocation_from_dictionary_limit = 64 * K;
  55 const size_t metadata_deallocate = 0xf5f5f5f5;
  56 
  57 MetaWord* last_allocated = 0;
  58 
  59 // Used in declarations in SpaceManager and ChunkManager
  60 enum ChunkIndex {
  61   ZeroIndex = 0,
  62   SpecializedIndex = ZeroIndex,
  63   SmallIndex = SpecializedIndex + 1,
  64   MediumIndex = SmallIndex + 1,
  65   HumongousIndex = MediumIndex + 1,
  66   NumberOfFreeLists = 3,
  67   NumberOfInUseLists = 4
  68 };
  69 
  70 enum ChunkSizes {    // in words.
  71   ClassSpecializedChunk = 128,
  72   SpecializedChunk = 128,
  73   ClassSmallChunk = 256,
  74   SmallChunk = 512,
  75   ClassMediumChunk = 1 * K,
  76   MediumChunk = 8 * K,
  77   HumongousChunkGranularity = 8
  78 };
  79 
  80 static ChunkIndex next_chunk_index(ChunkIndex i) {
  81   assert(i < NumberOfInUseLists, "Out of bound");
  82   return (ChunkIndex) (i+1);
  83 }
  84 
  85 // Originally _capacity_until_GC was set to MetaspaceSize here but
  86 // the default MetaspaceSize before argument processing was being
  87 // used which was not the desired value.  See the code
  88 // in should_expand() to see how the initialization is handled
  89 // now.
  90 size_t MetaspaceGC::_capacity_until_GC = 0;
  91 bool MetaspaceGC::_expand_after_GC = false;
  92 uint MetaspaceGC::_shrink_factor = 0;
  93 bool MetaspaceGC::_should_concurrent_collect = false;
  94 
  95 // Blocks of space for metadata are allocated out of Metachunks.
  96 //
  97 // Metachunk are allocated out of MetadataVirtualspaces and once
  98 // allocated there is no explicit link between a Metachunk and
  99 // the MetadataVirtualspaces from which it was allocated.
 100 //
 101 // Each SpaceManager maintains a
 102 // list of the chunks it is using and the current chunk.  The current
 103 // chunk is the chunk from which allocations are done.  Space freed in
 104 // a chunk is placed on the free list of blocks (BlockFreelist) and
 105 // reused from there.
 106 
 107 // Pointer to list of Metachunks.
 108 class ChunkList VALUE_OBJ_CLASS_SPEC {
 109   // List of free chunks
 110   Metachunk* _head;
 111 
 112  public:
 113   // Constructor
 114   ChunkList() : _head(NULL) {}
 115 
 116   // Accessors
 117   Metachunk* head() { return _head; }
 118   void set_head(Metachunk* v) { _head = v; }
 119 
 120   // Link at head of the list
 121   void add_at_head(Metachunk* head, Metachunk* tail);
 122   void add_at_head(Metachunk* head);
 123 
 124   size_t sum_list_size();
 125   size_t sum_list_count();
 126   size_t sum_list_capacity();
 127 };
 128 
 129 // Manages the global free lists of chunks.
 130 // Has three lists of free chunks, and a total size and
 131 // count that includes all three
 132 
 133 class ChunkManager VALUE_OBJ_CLASS_SPEC {
 134 
 135   // Free list of chunks of different sizes.
 136   //   SmallChunk
 137   //   MediumChunk
 138   //   HumongousChunk
 139   ChunkList _free_chunks[NumberOfFreeLists];
 140 
 141 
 142   //   HumongousChunk
 143   ChunkTreeDictionary _humongous_dictionary;
 144 
 145   // ChunkManager in all lists of this type
 146   size_t _free_chunks_total;
 147   size_t _free_chunks_count;
 148 
 149   void dec_free_chunks_total(size_t v) {
 150     assert(_free_chunks_count > 0 &&
 151              _free_chunks_total > 0,
 152              "About to go negative");
 153     Atomic::add_ptr(-1, &_free_chunks_count);
 154     jlong minus_v = (jlong) - (jlong) v;
 155     Atomic::add_ptr(minus_v, &_free_chunks_total);
 156   }
 157 
 158   // Debug support
 159 
 160   size_t sum_free_chunks();
 161   size_t sum_free_chunks_count();
 162 
 163   void locked_verify_free_chunks_total();
 164   void slow_locked_verify_free_chunks_total() {
 165     if (metaspace_slow_verify) {
 166       locked_verify_free_chunks_total();
 167     }
 168   }
 169   void locked_verify_free_chunks_count();
 170   void slow_locked_verify_free_chunks_count() {
 171     if (metaspace_slow_verify) {
 172       locked_verify_free_chunks_count();
 173     }
 174   }
 175   void verify_free_chunks_count();
 176 
 177  public:
 178 
 179   ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
 180 
 181   // add or delete (return) a chunk to the global freelist.
 182   Metachunk* chunk_freelist_allocate(size_t word_size);
 183   void chunk_freelist_deallocate(Metachunk* chunk);
 184 
 185   // Map a size to a list index assuming that there are lists
 186   // for special, small, medium, and humongous chunks.
 187   static ChunkIndex list_index(size_t size);
 188 
 189   // Total of the space in the free chunks list
 190   size_t free_chunks_total();
 191   size_t free_chunks_total_in_bytes();
 192 
 193   // Number of chunks in the free chunks list
 194   size_t free_chunks_count();
 195 
 196   void inc_free_chunks_total(size_t v, size_t count = 1) {
 197     Atomic::add_ptr(count, &_free_chunks_count);
 198     Atomic::add_ptr(v, &_free_chunks_total);
 199   }
 200   ChunkTreeDictionary* humongous_dictionary() {
 201     return &_humongous_dictionary;
 202   }
 203 
 204   ChunkList* free_chunks(ChunkIndex index);
 205 
 206   // Returns the list for the given chunk word size.
 207   ChunkList* find_free_chunks_list(size_t word_size);
 208 
 209   // Add and remove from a list by size.  Selects
 210   // list based on size of chunk.
 211   void free_chunks_put(Metachunk* chuck);
 212   Metachunk* free_chunks_get(size_t chunk_word_size);
 213 
 214   // Debug support
 215   void verify();
 216   void slow_verify() {
 217     if (metaspace_slow_verify) {
 218       verify();
 219     }
 220   }
 221   void locked_verify();
 222   void slow_locked_verify() {
 223     if (metaspace_slow_verify) {
 224       locked_verify();
 225     }
 226   }
 227   void verify_free_chunks_total();
 228 
 229   void locked_print_free_chunks(outputStream* st);
 230   void locked_print_sum_free_chunks(outputStream* st);
 231 
 232   void print_on(outputStream* st);
 233 };
 234 
 235 
 236 // Used to manage the free list of Metablocks (a block corresponds
 237 // to the allocation of a quantum of metadata).
 238 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
 239   BlockTreeDictionary* _dictionary;
 240   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
 241 
 242   // Accessors
 243   BlockTreeDictionary* dictionary() const { return _dictionary; }
 244 
 245  public:
 246   BlockFreelist();
 247   ~BlockFreelist();
 248 
 249   // Get and return a block to the free list
 250   MetaWord* get_block(size_t word_size);
 251   void return_block(MetaWord* p, size_t word_size);
 252 
 253   size_t total_size() {
 254   if (dictionary() == NULL) {
 255     return 0;
 256   } else {
 257     return dictionary()->total_size();
 258   }
 259 }
 260 
 261   void print_on(outputStream* st) const;
 262 };
 263 
 264 class VirtualSpaceNode : public CHeapObj<mtClass> {
 265   friend class VirtualSpaceList;
 266 
 267   // Link to next VirtualSpaceNode
 268   VirtualSpaceNode* _next;
 269 
 270   // total in the VirtualSpace
 271   MemRegion _reserved;
 272   ReservedSpace _rs;
 273   VirtualSpace _virtual_space;
 274   MetaWord* _top;
 275 
 276   // Convenience functions for logical bottom and end
 277   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 278   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 279 
 280   // Convenience functions to access the _virtual_space
 281   char* low()  const { return virtual_space()->low(); }
 282   char* high() const { return virtual_space()->high(); }
 283 
 284  public:
 285 
 286   VirtualSpaceNode(size_t byte_size);
 287   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
 288   ~VirtualSpaceNode();
 289 
 290   // address of next available space in _virtual_space;
 291   // Accessors
 292   VirtualSpaceNode* next() { return _next; }
 293   void set_next(VirtualSpaceNode* v) { _next = v; }
 294 
 295   void set_reserved(MemRegion const v) { _reserved = v; }
 296   void set_top(MetaWord* v) { _top = v; }
 297 
 298   // Accessors
 299   MemRegion* reserved() { return &_reserved; }
 300   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 301 
 302   // Returns true if "word_size" is available in the virtual space
 303   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 304 
 305   MetaWord* top() const { return _top; }
 306   void inc_top(size_t word_size) { _top += word_size; }
 307 
 308   // used and capacity in this single entry in the list
 309   size_t used_words_in_vs() const;
 310   size_t capacity_words_in_vs() const;
 311 
 312   bool initialize();
 313 
 314   // get space from the virtual space
 315   Metachunk* take_from_committed(size_t chunk_word_size);
 316 
 317   // Allocate a chunk from the virtual space and return it.
 318   Metachunk* get_chunk_vs(size_t chunk_word_size);
 319   Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
 320 
 321   // Expands/shrinks the committed space in a virtual space.  Delegates
 322   // to Virtualspace
 323   bool expand_by(size_t words, bool pre_touch = false);
 324   bool shrink_by(size_t words);
 325 
 326 #ifdef ASSERT
 327   // Debug support
 328   static void verify_virtual_space_total();
 329   static void verify_virtual_space_count();
 330   void mangle();
 331 #endif
 332 
 333   void print_on(outputStream* st) const;
 334 };
 335 
 336   // byte_size is the size of the associated virtualspace.
 337 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
 338   // This allocates memory with mmap.  For DumpSharedspaces, allocate the
 339   // space at low memory so that other shared images don't conflict.
 340   // This is the same address as memory needed for UseCompressedOops but
 341   // compressed oops don't work with CDS (offsets in metadata are wrong), so
 342   // borrow the same address.
 343   if (DumpSharedSpaces) {
 344     char* shared_base = (char*)HeapBaseMinAddress;
 345     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
 346     if (_rs.is_reserved()) {
 347       assert(_rs.base() == shared_base, "should match");
 348     } else {
 349       // If we are dumping the heap, then allocate a wasted block of address
 350       // space in order to push the heap to a lower address.  This extra
 351       // address range allows for other (or larger) libraries to be loaded
 352       // without them occupying the space required for the shared spaces.
 353       uintx reserved = 0;
 354       uintx block_size = 64*1024*1024;
 355       while (reserved < SharedDummyBlockSize) {
 356         char* dummy = os::reserve_memory(block_size);
 357         reserved += block_size;
 358       }
 359       _rs = ReservedSpace(byte_size);
 360     }
 361     MetaspaceShared::set_shared_rs(&_rs);
 362   } else {
 363     _rs = ReservedSpace(byte_size);
 364   }
 365 
 366   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 367 }
 368 
 369 // List of VirtualSpaces for metadata allocation.
 370 // It has a  _next link for singly linked list and a MemRegion
 371 // for total space in the VirtualSpace.
 372 class VirtualSpaceList : public CHeapObj<mtClass> {
 373   friend class VirtualSpaceNode;
 374 
 375   enum VirtualSpaceSizes {
 376     VirtualSpaceSize = 256 * K
 377   };
 378 
 379   // Global list of virtual spaces
 380   // Head of the list
 381   VirtualSpaceNode* _virtual_space_list;
 382   // virtual space currently being used for allocations
 383   VirtualSpaceNode* _current_virtual_space;
 384   // Free chunk list for all other metadata
 385   ChunkManager      _chunk_manager;
 386 
 387   // Can this virtual list allocate >1 spaces?  Also, used to determine
 388   // whether to allocate unlimited small chunks in this virtual space
 389   bool _is_class;
 390   bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
 391 
 392   // Sum of space in all virtual spaces and number of virtual spaces
 393   size_t _virtual_space_total;
 394   size_t _virtual_space_count;
 395 
 396   ~VirtualSpaceList();
 397 
 398   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 399 
 400   void set_virtual_space_list(VirtualSpaceNode* v) {
 401     _virtual_space_list = v;
 402   }
 403   void set_current_virtual_space(VirtualSpaceNode* v) {
 404     _current_virtual_space = v;
 405   }
 406 
 407   void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
 408 
 409   // Get another virtual space and add it to the list.  This
 410   // is typically prompted by a failed attempt to allocate a chunk
 411   // and is typically followed by the allocation of a chunk.
 412   bool grow_vs(size_t vs_word_size);
 413 
 414  public:
 415   VirtualSpaceList(size_t word_size);
 416   VirtualSpaceList(ReservedSpace rs);
 417 
 418   Metachunk* get_new_chunk(size_t word_size,
 419                            size_t grow_chunks_by_words,
 420                            size_t medium_chunk_bunch);
 421 
 422   // Get the first chunk for a Metaspace.  Used for
 423   // special cases such as the boot class loader, reflection
 424   // class loader and anonymous class loader.
 425   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
 426 
 427   VirtualSpaceNode* current_virtual_space() {
 428     return _current_virtual_space;
 429   }
 430 
 431   ChunkManager* chunk_manager() { return &_chunk_manager; }
 432   bool is_class() const { return _is_class; }
 433 
 434   // Allocate the first virtualspace.
 435   void initialize(size_t word_size);
 436 
 437   size_t virtual_space_total() { return _virtual_space_total; }
 438   void inc_virtual_space_total(size_t v) {
 439     Atomic::add_ptr(v, &_virtual_space_total);
 440   }
 441 
 442   size_t virtual_space_count() { return _virtual_space_count; }
 443   void inc_virtual_space_count() {
 444     Atomic::inc_ptr(&_virtual_space_count);
 445   }
 446 
 447   // Used and capacity in the entire list of virtual spaces.
 448   // These are global values shared by all Metaspaces
 449   size_t capacity_words_sum();
 450   size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
 451   size_t used_words_sum();
 452   size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
 453 
 454   bool contains(const void *ptr);
 455 
 456   void print_on(outputStream* st) const;
 457 
 458   class VirtualSpaceListIterator : public StackObj {
 459     VirtualSpaceNode* _virtual_spaces;
 460    public:
 461     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
 462       _virtual_spaces(virtual_spaces) {}
 463 
 464     bool repeat() {
 465       return _virtual_spaces != NULL;
 466     }
 467 
 468     VirtualSpaceNode* get_next() {
 469       VirtualSpaceNode* result = _virtual_spaces;
 470       if (_virtual_spaces != NULL) {
 471         _virtual_spaces = _virtual_spaces->next();
 472       }
 473       return result;
 474     }
 475   };
 476 };
 477 
 478 class Metadebug : AllStatic {
 479   // Debugging support for Metaspaces
 480   static int _deallocate_block_a_lot_count;
 481   static int _deallocate_chunk_a_lot_count;
 482   static int _allocation_fail_alot_count;
 483 
 484  public:
 485   static int deallocate_block_a_lot_count() {
 486     return _deallocate_block_a_lot_count;
 487   }
 488   static void set_deallocate_block_a_lot_count(int v) {
 489     _deallocate_block_a_lot_count = v;
 490   }
 491   static void inc_deallocate_block_a_lot_count() {
 492     _deallocate_block_a_lot_count++;
 493   }
 494   static int deallocate_chunk_a_lot_count() {
 495     return _deallocate_chunk_a_lot_count;
 496   }
 497   static void reset_deallocate_chunk_a_lot_count() {
 498     _deallocate_chunk_a_lot_count = 1;
 499   }
 500   static void inc_deallocate_chunk_a_lot_count() {
 501     _deallocate_chunk_a_lot_count++;
 502   }
 503 
 504   static void init_allocation_fail_alot_count();
 505 #ifdef ASSERT
 506   static bool test_metadata_failure();
 507 #endif
 508 
 509   static void deallocate_chunk_a_lot(SpaceManager* sm,
 510                                      size_t chunk_word_size);
 511   static void deallocate_block_a_lot(SpaceManager* sm,
 512                                      size_t chunk_word_size);
 513 
 514 };
 515 
 516 int Metadebug::_deallocate_block_a_lot_count = 0;
 517 int Metadebug::_deallocate_chunk_a_lot_count = 0;
 518 int Metadebug::_allocation_fail_alot_count = 0;
 519 
 520 //  SpaceManager - used by Metaspace to handle allocations
 521 class SpaceManager : public CHeapObj<mtClass> {
 522   friend class Metaspace;
 523   friend class Metadebug;
 524 
 525  private:
 526 
 527   // protects allocations and contains.
 528   Mutex* const _lock;
 529 
 530   // Chunk related size
 531   size_t _medium_chunk_bunch;
 532 
 533   // List of chunks in use by this SpaceManager.  Allocations
 534   // are done from the current chunk.  The list is used for deallocating
 535   // chunks when the SpaceManager is freed.
 536   Metachunk* _chunks_in_use[NumberOfInUseLists];
 537   Metachunk* _current_chunk;
 538 
 539   // Virtual space where allocation comes from.
 540   VirtualSpaceList* _vs_list;
 541 
 542   // Number of small chunks to allocate to a manager
 543   // If class space manager, small chunks are unlimited
 544   static uint const _small_chunk_limit;
 545   bool has_small_chunk_limit() { return !vs_list()->is_class(); }
 546 
 547   // Sum of all space in allocated chunks
 548   size_t _allocation_total;
 549 
 550   // Free lists of blocks are per SpaceManager since they
 551   // are assumed to be in chunks in use by the SpaceManager
 552   // and all chunks in use by a SpaceManager are freed when
 553   // the class loader using the SpaceManager is collected.
 554   BlockFreelist _block_freelists;
 555 
 556   // protects virtualspace and chunk expansions
 557   static const char*  _expand_lock_name;
 558   static const int    _expand_lock_rank;
 559   static Mutex* const _expand_lock;
 560 
 561  private:
 562   // Accessors
 563   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
 564   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
 565 
 566   BlockFreelist* block_freelists() const {
 567     return (BlockFreelist*) &_block_freelists;
 568   }
 569 
 570   VirtualSpaceList* vs_list() const    { return _vs_list; }
 571 
 572   Metachunk* current_chunk() const { return _current_chunk; }
 573   void set_current_chunk(Metachunk* v) {
 574     _current_chunk = v;
 575   }
 576 
 577   Metachunk* find_current_chunk(size_t word_size);
 578 
 579   // Add chunk to the list of chunks in use
 580   void add_chunk(Metachunk* v, bool make_current);
 581 
 582   Mutex* lock() const { return _lock; }
 583 
 584   const char* chunk_size_name(ChunkIndex index) const;
 585 
 586  protected:
 587   void initialize();
 588 
 589  public:
 590   SpaceManager(Mutex* lock,
 591                VirtualSpaceList* vs_list);
 592   ~SpaceManager();
 593 
 594   enum ChunkMultiples {
 595     MediumChunkMultiple = 4
 596   };
 597 
 598   // Accessors
 599   size_t specialized_chunk_size() { return SpecializedChunk; }
 600   size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
 601   size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
 602   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 603 
 604   size_t allocation_total() const { return _allocation_total; }
 605   void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
 606   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 607 
 608   static Mutex* expand_lock() { return _expand_lock; }
 609 
 610   // Set the sizes for the initial chunks.
 611   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
 612                                size_t* chunk_word_size,
 613                                size_t* class_chunk_word_size);
 614 
 615   size_t sum_capacity_in_chunks_in_use() const;
 616   size_t sum_used_in_chunks_in_use() const;
 617   size_t sum_free_in_chunks_in_use() const;
 618   size_t sum_waste_in_chunks_in_use() const;
 619   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
 620 
 621   size_t sum_count_in_chunks_in_use();
 622   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 623 
 624   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
 625 
 626   // Block allocation and deallocation.
 627   // Allocates a block from the current chunk
 628   MetaWord* allocate(size_t word_size);
 629 
 630   // Helper for allocations
 631   MetaWord* allocate_work(size_t word_size);
 632 
 633   // Returns a block to the per manager freelist
 634   void deallocate(MetaWord* p, size_t word_size);
 635 
 636   // Based on the allocation size and a minimum chunk size,
 637   // returned chunk size (for expanding space for chunk allocation).
 638   size_t calc_chunk_size(size_t allocation_word_size);
 639 
 640   // Called when an allocation from the current chunk fails.
 641   // Gets a new chunk (may require getting a new virtual space),
 642   // and allocates from that chunk.
 643   MetaWord* grow_and_allocate(size_t word_size);
 644 
 645   // debugging support.
 646 
 647   void dump(outputStream* const out) const;
 648   void print_on(outputStream* st) const;
 649   void locked_print_chunks_in_use_on(outputStream* st) const;
 650 
 651   void verify();
 652   void verify_chunk_size(Metachunk* chunk);
 653   NOT_PRODUCT(void mangle_freed_chunks();)
 654 #ifdef ASSERT
 655   void verify_allocation_total();
 656 #endif
 657 };
 658 
 659 uint const SpaceManager::_small_chunk_limit = 4;
 660 
 661 const char* SpaceManager::_expand_lock_name =
 662   "SpaceManager chunk allocation lock";
 663 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 664 Mutex* const SpaceManager::_expand_lock =
 665   new Mutex(SpaceManager::_expand_lock_rank,
 666             SpaceManager::_expand_lock_name,
 667             Mutex::_allow_vm_block_flag);
 668 
 669 // BlockFreelist methods
 670 
 671 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
 672 
 673 BlockFreelist::~BlockFreelist() {
 674   if (_dictionary != NULL) {
 675     if (Verbose && TraceMetadataChunkAllocation) {
 676       _dictionary->print_free_lists(gclog_or_tty);
 677     }
 678     delete _dictionary;
 679   }
 680 }
 681 
 682 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
 683   Metablock* block = (Metablock*) p;
 684   block->set_word_size(word_size);
 685   block->set_prev(NULL);
 686   block->set_next(NULL);
 687 
 688   return block;
 689 }
 690 
 691 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 692   Metablock* free_chunk = initialize_free_chunk(p, word_size);
 693   if (dictionary() == NULL) {
 694    _dictionary = new BlockTreeDictionary();
 695   }
 696   dictionary()->return_chunk(free_chunk, false);
 697 }
 698 
 699 MetaWord* BlockFreelist::get_block(size_t word_size) {
 700   if (dictionary() == NULL) {
 701     return NULL;
 702   }
 703 
 704   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 705     // Dark matter.  Too small for dictionary.
 706     return NULL;
 707   }
 708 
 709   Metablock* free_block =
 710     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
 711   if (free_block == NULL) {
 712     return NULL;
 713   }
 714 
 715   return (MetaWord*) free_block;
 716 }
 717 
 718 void BlockFreelist::print_on(outputStream* st) const {
 719   if (dictionary() == NULL) {
 720     return;
 721   }
 722   dictionary()->print_free_lists(st);
 723 }
 724 
 725 // VirtualSpaceNode methods
 726 
 727 VirtualSpaceNode::~VirtualSpaceNode() {
 728   _rs.release();
 729 }
 730 
 731 size_t VirtualSpaceNode::used_words_in_vs() const {
 732   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 733 }
 734 
 735 // Space committed in the VirtualSpace
 736 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 737   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 738 }
 739 
 740 
 741 // Allocates the chunk from the virtual space only.
 742 // This interface is also used internally for debugging.  Not all
 743 // chunks removed here are necessarily used for allocation.
 744 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 745   // Bottom of the new chunk
 746   MetaWord* chunk_limit = top();
 747   assert(chunk_limit != NULL, "Not safe to call this method");
 748 
 749   if (!is_available(chunk_word_size)) {
 750     if (TraceMetadataChunkAllocation) {
 751       tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 752       // Dump some information about the virtual space that is nearly full
 753       print_on(tty);
 754     }
 755     return NULL;
 756   }
 757 
 758   // Take the space  (bump top on the current virtual space).
 759   inc_top(chunk_word_size);
 760 
 761   // Point the chunk at the space
 762   Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
 763   return result;
 764 }
 765 
 766 
 767 // Expand the virtual space (commit more of the reserved space)
 768 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
 769   size_t bytes = words * BytesPerWord;
 770   bool result =  virtual_space()->expand_by(bytes, pre_touch);
 771   if (TraceMetavirtualspaceAllocation && !result) {
 772     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
 773                            "for byte size " SIZE_FORMAT, bytes);
 774     virtual_space()->print();
 775   }
 776   return result;
 777 }
 778 
 779 // Shrink the virtual space (commit more of the reserved space)
 780 bool VirtualSpaceNode::shrink_by(size_t words) {
 781   size_t bytes = words * BytesPerWord;
 782   virtual_space()->shrink_by(bytes);
 783   return true;
 784 }
 785 
 786 // Add another chunk to the chunk list.
 787 
 788 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 789   assert_lock_strong(SpaceManager::expand_lock());
 790   Metachunk* result = NULL;
 791 
 792   return take_from_committed(chunk_word_size);
 793 }
 794 
 795 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
 796   assert_lock_strong(SpaceManager::expand_lock());
 797 
 798   Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
 799 
 800   if (new_chunk == NULL) {
 801     // Only a small part of the virtualspace is committed when first
 802     // allocated so committing more here can be expected.
 803     size_t page_size_words = os::vm_page_size() / BytesPerWord;
 804     size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
 805                                                     page_size_words);
 806     expand_by(aligned_expand_vs_by_words, false);
 807     new_chunk = get_chunk_vs(chunk_word_size);
 808   }
 809   return new_chunk;
 810 }
 811 
 812 bool VirtualSpaceNode::initialize() {
 813 
 814   if (!_rs.is_reserved()) {
 815     return false;
 816   }
 817 
 818   // An allocation out of this Virtualspace that is larger
 819   // than an initial commit size can waste that initial committed
 820   // space.
 821   size_t committed_byte_size = 0;
 822   bool result = virtual_space()->initialize(_rs, committed_byte_size);
 823   if (result) {
 824     set_top((MetaWord*)virtual_space()->low());
 825     set_reserved(MemRegion((HeapWord*)_rs.base(),
 826                  (HeapWord*)(_rs.base() + _rs.size())));
 827 
 828     assert(reserved()->start() == (HeapWord*) _rs.base(),
 829       err_msg("Reserved start was not set properly " PTR_FORMAT
 830         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 831     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 832       err_msg("Reserved size was not set properly " SIZE_FORMAT
 833         " != " SIZE_FORMAT, reserved()->word_size(),
 834         _rs.size() / BytesPerWord));
 835   }
 836 
 837   return result;
 838 }
 839 
 840 void VirtualSpaceNode::print_on(outputStream* st) const {
 841   size_t used = used_words_in_vs();
 842   size_t capacity = capacity_words_in_vs();
 843   VirtualSpace* vs = virtual_space();
 844   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
 845            "[" PTR_FORMAT ", " PTR_FORMAT ", "
 846            PTR_FORMAT ", " PTR_FORMAT ")",
 847            vs, capacity / K,
 848            capacity == 0 ? 0 : used * 100 / capacity,
 849            bottom(), top(), end(),
 850            vs->high_boundary());
 851 }
 852 
 853 #ifdef ASSERT
 854 void VirtualSpaceNode::mangle() {
 855   size_t word_size = capacity_words_in_vs();
 856   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 857 }
 858 #endif // ASSERT
 859 
 860 // VirtualSpaceList methods
 861 // Space allocated from the VirtualSpace
 862 
 863 VirtualSpaceList::~VirtualSpaceList() {
 864   VirtualSpaceListIterator iter(virtual_space_list());
 865   while (iter.repeat()) {
 866     VirtualSpaceNode* vsl = iter.get_next();
 867     delete vsl;
 868   }
 869 }
 870 
 871 size_t VirtualSpaceList::used_words_sum() {
 872   size_t allocated_by_vs = 0;
 873   VirtualSpaceListIterator iter(virtual_space_list());
 874   while (iter.repeat()) {
 875     VirtualSpaceNode* vsl = iter.get_next();
 876     // Sum used region [bottom, top) in each virtualspace
 877     allocated_by_vs += vsl->used_words_in_vs();
 878   }
 879   assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
 880     err_msg("Total in free chunks " SIZE_FORMAT
 881             " greater than total from virtual_spaces " SIZE_FORMAT,
 882             allocated_by_vs, chunk_manager()->free_chunks_total()));
 883   size_t used =
 884     allocated_by_vs - chunk_manager()->free_chunks_total();
 885   return used;
 886 }
 887 
 888 // Space available in all MetadataVirtualspaces allocated
 889 // for metadata.  This is the upper limit on the capacity
 890 // of chunks allocated out of all the MetadataVirtualspaces.
 891 size_t VirtualSpaceList::capacity_words_sum() {
 892   size_t capacity = 0;
 893   VirtualSpaceListIterator iter(virtual_space_list());
 894   while (iter.repeat()) {
 895     VirtualSpaceNode* vsl = iter.get_next();
 896     capacity += vsl->capacity_words_in_vs();
 897   }
 898   return capacity;
 899 }
 900 
 901 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
 902                                    _is_class(false),
 903                                    _virtual_space_list(NULL),
 904                                    _current_virtual_space(NULL),
 905                                    _virtual_space_total(0),
 906                                    _virtual_space_count(0) {
 907   MutexLockerEx cl(SpaceManager::expand_lock(),
 908                    Mutex::_no_safepoint_check_flag);
 909   bool initialization_succeeded = grow_vs(word_size);
 910 
 911   assert(initialization_succeeded,
 912     " VirtualSpaceList initialization should not fail");
 913 }
 914 
 915 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
 916                                    _is_class(true),
 917                                    _virtual_space_list(NULL),
 918                                    _current_virtual_space(NULL),
 919                                    _virtual_space_total(0),
 920                                    _virtual_space_count(0) {
 921   MutexLockerEx cl(SpaceManager::expand_lock(),
 922                    Mutex::_no_safepoint_check_flag);
 923   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
 924   bool succeeded = class_entry->initialize();
 925   assert(succeeded, " VirtualSpaceList initialization should not fail");
 926   link_vs(class_entry, rs.size()/BytesPerWord);
 927 }
 928 
 929 // Allocate another meta virtual space and add it to the list.
 930 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
 931   assert_lock_strong(SpaceManager::expand_lock());
 932   if (vs_word_size == 0) {
 933     return false;
 934   }
 935   // Reserve the space
 936   size_t vs_byte_size = vs_word_size * BytesPerWord;
 937   assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
 938 
 939   // Allocate the meta virtual space and initialize it.
 940   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
 941   if (!new_entry->initialize()) {
 942     delete new_entry;
 943     return false;
 944   } else {
 945     // ensure lock-free iteration sees fully initialized node
 946     OrderAccess::storestore();
 947     link_vs(new_entry, vs_word_size);
 948     return true;
 949   }
 950 }
 951 
 952 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
 953   if (virtual_space_list() == NULL) {
 954       set_virtual_space_list(new_entry);
 955   } else {
 956     current_virtual_space()->set_next(new_entry);
 957   }
 958   set_current_virtual_space(new_entry);
 959   inc_virtual_space_total(vs_word_size);
 960   inc_virtual_space_count();
 961 #ifdef ASSERT
 962   new_entry->mangle();
 963 #endif
 964   if (TraceMetavirtualspaceAllocation && Verbose) {
 965     VirtualSpaceNode* vsl = current_virtual_space();
 966     vsl->print_on(tty);
 967   }
 968 }
 969 
 970 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
 971                                            size_t grow_chunks_by_words,
 972                                            size_t medium_chunk_bunch) {
 973 
 974   // Get a chunk from the chunk freelist
 975   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
 976 
 977   // Allocate a chunk out of the current virtual space.
 978   if (next == NULL) {
 979     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
 980   }
 981 
 982   if (next == NULL) {
 983     // Not enough room in current virtual space.  Try to commit
 984     // more space.
 985     size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
 986                                      grow_chunks_by_words);
 987     size_t page_size_words = os::vm_page_size() / BytesPerWord;
 988     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
 989                                                         page_size_words);
 990     bool vs_expanded =
 991       current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
 992     if (!vs_expanded) {
 993       // Should the capacity of the metaspaces be expanded for
 994       // this allocation?  If it's the virtual space for classes and is
 995       // being used for CompressedHeaders, don't allocate a new virtualspace.
 996       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
 997         // Get another virtual space.
 998           size_t grow_vs_words =
 999             MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1000         if (grow_vs(grow_vs_words)) {
1001           // Got it.  It's on the list now.  Get a chunk from it.
1002           next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1003         }
1004       } else {
1005         // Allocation will fail and induce a GC
1006         if (TraceMetadataChunkAllocation && Verbose) {
1007           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1008             " Fail instead of expand the metaspace");
1009         }
1010       }
1011     } else {
1012       // The virtual space expanded, get a new chunk
1013       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1014       assert(next != NULL, "Just expanded, should succeed");
1015     }
1016   }
1017 
1018   assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1019          "New chunk is still on some list");
1020   return next;
1021 }
1022 
1023 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1024                                                       size_t chunk_bunch) {
1025   // Get a chunk from the chunk freelist
1026   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1027                                        chunk_word_size,
1028                                        chunk_bunch);
1029   return new_chunk;
1030 }
1031 
1032 void VirtualSpaceList::print_on(outputStream* st) const {
1033   if (TraceMetadataChunkAllocation && Verbose) {
1034     VirtualSpaceListIterator iter(virtual_space_list());
1035     while (iter.repeat()) {
1036       VirtualSpaceNode* node = iter.get_next();
1037       node->print_on(st);
1038     }
1039   }
1040 }
1041 
1042 bool VirtualSpaceList::contains(const void *ptr) {
1043   VirtualSpaceNode* list = virtual_space_list();
1044   VirtualSpaceListIterator iter(list);
1045   while (iter.repeat()) {
1046     VirtualSpaceNode* node = iter.get_next();
1047     if (node->reserved()->contains(ptr)) {
1048       return true;
1049     }
1050   }
1051   return false;
1052 }
1053 
1054 
1055 // MetaspaceGC methods
1056 
1057 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1058 // Within the VM operation after the GC the attempt to allocate the metadata
1059 // should succeed.  If the GC did not free enough space for the metaspace
1060 // allocation, the HWM is increased so that another virtualspace will be
1061 // allocated for the metadata.  With perm gen the increase in the perm
1062 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1063 // metaspace policy uses those as the small and large steps for the HWM.
1064 //
1065 // After the GC the compute_new_size() for MetaspaceGC is called to
1066 // resize the capacity of the metaspaces.  The current implementation
1067 // is based on the flags MinHeapFreeRatio and MaxHeapFreeRatio used
1068 // to resize the Java heap by some GC's.  New flags can be implemented
1069 // if really needed.  MinHeapFreeRatio is used to calculate how much
1070 // free space is desirable in the metaspace capacity to decide how much
1071 // to increase the HWM.  MaxHeapFreeRatio is used to decide how much
1072 // free space is desirable in the metaspace capacity before decreasing
1073 // the HWM.
1074 
1075 // Calculate the amount to increase the high water mark (HWM).
1076 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1077 // another expansion is not requested too soon.  If that is not
1078 // enough to satisfy the allocation (i.e. big enough for a word_size
1079 // allocation), increase by MaxMetaspaceExpansion.  If that is still
1080 // not enough, expand by the size of the allocation (word_size) plus
1081 // some.
1082 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1083   size_t before_inc = MetaspaceGC::capacity_until_GC();
1084   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1085   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1086   size_t page_size_words = os::vm_page_size() / BytesPerWord;
1087   size_t size_delta_words = align_size_up(word_size, page_size_words);
1088   size_t delta_words = MAX2(size_delta_words, min_delta_words);
1089   if (delta_words > min_delta_words) {
1090     // Don't want to hit the high water mark on the next
1091     // allocation so make the delta greater than just enough
1092     // for this allocation.
1093     delta_words = MAX2(delta_words, max_delta_words);
1094     if (delta_words > max_delta_words) {
1095       // This allocation is large but the next ones are probably not
1096       // so increase by the minimum.
1097       delta_words = delta_words + min_delta_words;
1098     }
1099   }
1100   return delta_words;
1101 }
1102 
1103 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1104 
1105   // Class virtual space should always be expanded.  Call GC for the other
1106   // metadata virtual space.
1107   if (vsl == Metaspace::class_space_list()) return true;
1108 
1109   // If the user wants a limit, impose one.
1110   size_t max_metaspace_size_words = MaxMetaspaceSize / BytesPerWord;
1111   size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
1112   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1113       vsl->capacity_words_sum() >= max_metaspace_size_words) {
1114     return false;
1115   }
1116 
1117   // If this is part of an allocation after a GC, expand
1118   // unconditionally.
1119   if(MetaspaceGC::expand_after_GC()) {
1120     return true;
1121   }
1122 
1123   // If the capacity is below the minimum capacity, allow the
1124   // expansion.  Also set the high-water-mark (capacity_until_GC)
1125   // to that minimum capacity so that a GC will not be induced
1126   // until that minimum capacity is exceeded.
1127   if (vsl->capacity_words_sum() < metaspace_size_words ||
1128       capacity_until_GC() == 0) {
1129     set_capacity_until_GC(metaspace_size_words);
1130     return true;
1131   } else {
1132     if (vsl->capacity_words_sum() < capacity_until_GC()) {
1133       return true;
1134     } else {
1135       if (TraceMetadataChunkAllocation && Verbose) {
1136         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
1137                         "  capacity_until_GC " SIZE_FORMAT
1138                         "  capacity_words_sum " SIZE_FORMAT
1139                         "  used_words_sum " SIZE_FORMAT
1140                         "  free chunks " SIZE_FORMAT
1141                         "  free chunks count %d",
1142                         word_size,
1143                         capacity_until_GC(),
1144                         vsl->capacity_words_sum(),
1145                         vsl->used_words_sum(),
1146                         vsl->chunk_manager()->free_chunks_total(),
1147                         vsl->chunk_manager()->free_chunks_count());
1148       }
1149       return false;
1150     }
1151   }
1152 }
1153 
1154 // Variables are in bytes
1155 
1156 void MetaspaceGC::compute_new_size() {
1157   assert(_shrink_factor <= 100, "invalid shrink factor");
1158   uint current_shrink_factor = _shrink_factor;
1159   _shrink_factor = 0;
1160 
1161   VirtualSpaceList *vsl = Metaspace::space_list();
1162 
1163   size_t capacity_after_gc = vsl->capacity_bytes_sum();
1164   // Check to see if these two can be calculated without walking the CLDG
1165   size_t used_after_gc = vsl->used_bytes_sum();
1166   size_t capacity_until_GC = vsl->capacity_bytes_sum();
1167   size_t free_after_gc = capacity_until_GC - used_after_gc;
1168 
1169   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
1170   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1171 
1172   const double min_tmp = used_after_gc / maximum_used_percentage;
1173   size_t minimum_desired_capacity =
1174     (size_t)MIN2(min_tmp, double(max_uintx));
1175   // Don't shrink less than the initial generation size
1176   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1177                                   MetaspaceSize);
1178 
1179   if (PrintGCDetails && Verbose) {
1180     const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
1181     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1182     gclog_or_tty->print_cr("  "
1183                   "  minimum_free_percentage: %6.2f"
1184                   "  maximum_used_percentage: %6.2f",
1185                   minimum_free_percentage,
1186                   maximum_used_percentage);
1187     double d_free_after_gc = free_after_gc / (double) K;
1188     gclog_or_tty->print_cr("  "
1189                   "   free_after_gc       : %6.1fK"
1190                   "   used_after_gc       : %6.1fK"
1191                   "   capacity_after_gc   : %6.1fK"
1192                   "   metaspace HWM     : %6.1fK",
1193                   free_after_gc / (double) K,
1194                   used_after_gc / (double) K,
1195                   capacity_after_gc / (double) K,
1196                   capacity_until_GC / (double) K);
1197     gclog_or_tty->print_cr("  "
1198                   "   free_percentage: %6.2f",
1199                   free_percentage);
1200   }
1201 
1202 
1203   if (capacity_until_GC < minimum_desired_capacity) {
1204     // If we have less capacity below the metaspace HWM, then
1205     // increment the HWM.
1206     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1207     // Don't expand unless it's significant
1208     if (expand_bytes >= MinMetaspaceExpansion) {
1209       size_t expand_words = expand_bytes / BytesPerWord;
1210       MetaspaceGC::inc_capacity_until_GC(expand_words);
1211     }
1212     if (PrintGCDetails && Verbose) {
1213       size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1214       gclog_or_tty->print_cr("    expanding:"
1215                     "  minimum_desired_capacity: %6.1fK"
1216                     "  expand_words: %6.1fK"
1217                     "  MinMetaspaceExpansion: %6.1fK"
1218                     "  new metaspace HWM:  %6.1fK",
1219                     minimum_desired_capacity / (double) K,
1220                     expand_bytes / (double) K,
1221                     MinMetaspaceExpansion / (double) K,
1222                     new_capacity_until_GC / (double) K);
1223     }
1224     return;
1225   }
1226 
1227   // No expansion, now see if we want to shrink
1228   size_t shrink_words = 0;
1229   // We would never want to shrink more than this
1230   size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
1231   assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
1232     max_shrink_words));
1233 
1234   // Should shrinking be considered?
1235   if (MaxHeapFreeRatio < 100) {
1236     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
1237     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1238     const double max_tmp = used_after_gc / minimum_used_percentage;
1239     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1240     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1241                                     MetaspaceSize);
1242     if (PrintGC && Verbose) {
1243       gclog_or_tty->print_cr("  "
1244                              "  maximum_free_percentage: %6.2f"
1245                              "  minimum_used_percentage: %6.2f",
1246                              maximum_free_percentage,
1247                              minimum_used_percentage);
1248       gclog_or_tty->print_cr("  "
1249                              "  capacity_until_GC: %6.1fK"
1250                              "  minimum_desired_capacity: %6.1fK"
1251                              "  maximum_desired_capacity: %6.1fK",
1252                              capacity_until_GC / (double) K,
1253                              minimum_desired_capacity / (double) K,
1254                              maximum_desired_capacity / (double) K);
1255     }
1256 
1257     assert(minimum_desired_capacity <= maximum_desired_capacity,
1258            "sanity check");
1259 
1260     if (capacity_until_GC > maximum_desired_capacity) {
1261       // Capacity too large, compute shrinking size
1262       shrink_words = capacity_until_GC - maximum_desired_capacity;
1263       // We don't want shrink all the way back to initSize if people call
1264       // System.gc(), because some programs do that between "phases" and then
1265       // we'd just have to grow the heap up again for the next phase.  So we
1266       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1267       // on the third call, and 100% by the fourth call.  But if we recompute
1268       // size without shrinking, it goes back to 0%.
1269       shrink_words = shrink_words / 100 * current_shrink_factor;
1270       assert(shrink_words <= max_shrink_words,
1271         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1272           shrink_words, max_shrink_words));
1273       if (current_shrink_factor == 0) {
1274         _shrink_factor = 10;
1275       } else {
1276         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1277       }
1278       if (PrintGCDetails && Verbose) {
1279         gclog_or_tty->print_cr("  "
1280                       "  shrinking:"
1281                       "  initSize: %.1fK"
1282                       "  maximum_desired_capacity: %.1fK",
1283                       MetaspaceSize / (double) K,
1284                       maximum_desired_capacity / (double) K);
1285         gclog_or_tty->print_cr("  "
1286                       "  shrink_words: %.1fK"
1287                       "  current_shrink_factor: %d"
1288                       "  new shrink factor: %d"
1289                       "  MinMetaspaceExpansion: %.1fK",
1290                       shrink_words / (double) K,
1291                       current_shrink_factor,
1292                       _shrink_factor,
1293                       MinMetaspaceExpansion / (double) K);
1294       }
1295     }
1296   }
1297 
1298 
1299   // Don't shrink unless it's significant
1300   if (shrink_words >= MinMetaspaceExpansion) {
1301     VirtualSpaceNode* csp = vsl->current_virtual_space();
1302     size_t available_to_shrink = csp->capacity_words_in_vs() -
1303       csp->used_words_in_vs();
1304     shrink_words = MIN2(shrink_words, available_to_shrink);
1305     csp->shrink_by(shrink_words);
1306     MetaspaceGC::dec_capacity_until_GC(shrink_words);
1307     if (PrintGCDetails && Verbose) {
1308       size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1309       gclog_or_tty->print_cr("  metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
1310     }
1311   }
1312   assert(vsl->used_bytes_sum() == used_after_gc &&
1313          used_after_gc <= vsl->capacity_bytes_sum(),
1314          "sanity check");
1315 
1316 }
1317 
1318 // Metadebug methods
1319 
1320 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1321                                        size_t chunk_word_size){
1322 #ifdef ASSERT
1323   VirtualSpaceList* vsl = sm->vs_list();
1324   if (MetaDataDeallocateALot &&
1325       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1326     Metadebug::reset_deallocate_chunk_a_lot_count();
1327     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1328       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1329       if (dummy_chunk == NULL) {
1330         break;
1331       }
1332       vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1333 
1334       if (TraceMetadataChunkAllocation && Verbose) {
1335         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1336                                sm->sum_count_in_chunks_in_use());
1337         dummy_chunk->print_on(gclog_or_tty);
1338         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
1339                                vsl->chunk_manager()->free_chunks_total(),
1340                                vsl->chunk_manager()->free_chunks_count());
1341       }
1342     }
1343   } else {
1344     Metadebug::inc_deallocate_chunk_a_lot_count();
1345   }
1346 #endif
1347 }
1348 
1349 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1350                                        size_t raw_word_size){
1351 #ifdef ASSERT
1352   if (MetaDataDeallocateALot &&
1353         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1354     Metadebug::set_deallocate_block_a_lot_count(0);
1355     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1356       MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1357       if (dummy_block == 0) {
1358         break;
1359       }
1360       sm->deallocate(dummy_block, raw_word_size);
1361     }
1362   } else {
1363     Metadebug::inc_deallocate_block_a_lot_count();
1364   }
1365 #endif
1366 }
1367 
1368 void Metadebug::init_allocation_fail_alot_count() {
1369   if (MetadataAllocationFailALot) {
1370     _allocation_fail_alot_count =
1371       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1372   }
1373 }
1374 
1375 #ifdef ASSERT
1376 bool Metadebug::test_metadata_failure() {
1377   if (MetadataAllocationFailALot &&
1378       Threads::is_vm_complete()) {
1379     if (_allocation_fail_alot_count > 0) {
1380       _allocation_fail_alot_count--;
1381     } else {
1382       if (TraceMetadataChunkAllocation && Verbose) {
1383         gclog_or_tty->print_cr("Metadata allocation failing for "
1384                                "MetadataAllocationFailALot");
1385       }
1386       init_allocation_fail_alot_count();
1387       return true;
1388     }
1389   }
1390   return false;
1391 }
1392 #endif
1393 
1394 // ChunkList methods
1395 
1396 size_t ChunkList::sum_list_size() {
1397   size_t result = 0;
1398   Metachunk* cur = head();
1399   while (cur != NULL) {
1400     result += cur->word_size();
1401     cur = cur->next();
1402   }
1403   return result;
1404 }
1405 
1406 size_t ChunkList::sum_list_count() {
1407   size_t result = 0;
1408   Metachunk* cur = head();
1409   while (cur != NULL) {
1410     result++;
1411     cur = cur->next();
1412   }
1413   return result;
1414 }
1415 
1416 size_t ChunkList::sum_list_capacity() {
1417   size_t result = 0;
1418   Metachunk* cur = head();
1419   while (cur != NULL) {
1420     result += cur->capacity_word_size();
1421     cur = cur->next();
1422   }
1423   return result;
1424 }
1425 
1426 void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
1427   assert_lock_strong(SpaceManager::expand_lock());
1428   assert(head == tail || tail->next() == NULL,
1429          "Not the tail or the head has already been added to a list");
1430 
1431   if (TraceMetadataChunkAllocation && Verbose) {
1432     gclog_or_tty->print("ChunkList::add_at_head(head, tail): ");
1433     Metachunk* cur = head;
1434     while (cur != NULL) {
1435       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
1436       cur = cur->next();
1437     }
1438     gclog_or_tty->print_cr("");
1439   }
1440 
1441   if (tail != NULL) {
1442     tail->set_next(_head);
1443   }
1444   set_head(head);
1445 }
1446 
1447 void ChunkList::add_at_head(Metachunk* list) {
1448   if (list == NULL) {
1449     // Nothing to add
1450     return;
1451   }
1452   assert_lock_strong(SpaceManager::expand_lock());
1453   Metachunk* head = list;
1454   Metachunk* tail = list;
1455   Metachunk* cur = head->next();
1456   // Search for the tail since it is not passed.
1457   while (cur != NULL) {
1458     tail = cur;
1459     cur = cur->next();
1460   }
1461   add_at_head(head, tail);
1462 }
1463 
1464 // ChunkManager methods
1465 
1466 // Verification of _free_chunks_total and _free_chunks_count does not
1467 // work with the CMS collector because its use of additional locks
1468 // complicate the mutex deadlock detection but it can still be useful
1469 // for detecting errors in the chunk accounting with other collectors.
1470 
1471 size_t ChunkManager::free_chunks_total() {
1472 #ifdef ASSERT
1473   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1474     MutexLockerEx cl(SpaceManager::expand_lock(),
1475                      Mutex::_no_safepoint_check_flag);
1476     slow_locked_verify_free_chunks_total();
1477   }
1478 #endif
1479   return _free_chunks_total;
1480 }
1481 
1482 size_t ChunkManager::free_chunks_total_in_bytes() {
1483   return free_chunks_total() * BytesPerWord;
1484 }
1485 
1486 size_t ChunkManager::free_chunks_count() {
1487 #ifdef ASSERT
1488   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1489     MutexLockerEx cl(SpaceManager::expand_lock(),
1490                      Mutex::_no_safepoint_check_flag);
1491     // This lock is only needed in debug because the verification
1492     // of the _free_chunks_totals walks the list of free chunks
1493     slow_locked_verify_free_chunks_count();
1494   }
1495 #endif
1496   return _free_chunks_count;
1497 }
1498 
1499 void ChunkManager::locked_verify_free_chunks_total() {
1500   assert_lock_strong(SpaceManager::expand_lock());
1501   assert(sum_free_chunks() == _free_chunks_total,
1502     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1503            " same as sum " SIZE_FORMAT, _free_chunks_total,
1504            sum_free_chunks()));
1505 }
1506 
1507 void ChunkManager::verify_free_chunks_total() {
1508   MutexLockerEx cl(SpaceManager::expand_lock(),
1509                      Mutex::_no_safepoint_check_flag);
1510   locked_verify_free_chunks_total();
1511 }
1512 
1513 void ChunkManager::locked_verify_free_chunks_count() {
1514   assert_lock_strong(SpaceManager::expand_lock());
1515   assert(sum_free_chunks_count() == _free_chunks_count,
1516     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1517            " same as sum " SIZE_FORMAT, _free_chunks_count,
1518            sum_free_chunks_count()));
1519 }
1520 
1521 void ChunkManager::verify_free_chunks_count() {
1522 #ifdef ASSERT
1523   MutexLockerEx cl(SpaceManager::expand_lock(),
1524                      Mutex::_no_safepoint_check_flag);
1525   locked_verify_free_chunks_count();
1526 #endif
1527 }
1528 
1529 void ChunkManager::verify() {
1530   MutexLockerEx cl(SpaceManager::expand_lock(),
1531                      Mutex::_no_safepoint_check_flag);
1532   locked_verify();
1533 }
1534 
1535 void ChunkManager::locked_verify() {
1536   locked_verify_free_chunks_count();
1537   locked_verify_free_chunks_total();
1538 }
1539 
1540 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1541   assert_lock_strong(SpaceManager::expand_lock());
1542   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1543                 _free_chunks_total, _free_chunks_count);
1544 }
1545 
1546 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1547   assert_lock_strong(SpaceManager::expand_lock());
1548   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1549                 sum_free_chunks(), sum_free_chunks_count());
1550 }
1551 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1552   return &_free_chunks[index];
1553 }
1554 
1555 // These methods that sum the free chunk lists are used in printing
1556 // methods that are used in product builds.
1557 size_t ChunkManager::sum_free_chunks() {
1558   assert_lock_strong(SpaceManager::expand_lock());
1559   size_t result = 0;
1560   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1561     ChunkList* list = free_chunks(i);
1562 
1563     if (list == NULL) {
1564       continue;
1565     }
1566 
1567     result = result + list->sum_list_capacity();
1568   }
1569   result = result + humongous_dictionary()->total_size();
1570   return result;
1571 }
1572 
1573 size_t ChunkManager::sum_free_chunks_count() {
1574   assert_lock_strong(SpaceManager::expand_lock());
1575   size_t count = 0;
1576   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1577     ChunkList* list = free_chunks(i);
1578     if (list == NULL) {
1579       continue;
1580     }
1581     count = count + list->sum_list_count();
1582   }
1583   count = count + humongous_dictionary()->total_free_blocks();
1584   return count;
1585 }
1586 
1587 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1588   ChunkIndex index = list_index(word_size);
1589   assert(index < HumongousIndex, "No humongous list");
1590   return free_chunks(index);
1591 }
1592 
1593 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1594   assert_lock_strong(SpaceManager::expand_lock());
1595   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1596   chunk->set_next(free_list->head());
1597   free_list->set_head(chunk);
1598   // chunk is being returned to the chunk free list
1599   inc_free_chunks_total(chunk->capacity_word_size());
1600   slow_locked_verify();
1601 }
1602 
1603 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1604   // The deallocation of a chunk originates in the freelist
1605   // manangement code for a Metaspace and does not hold the
1606   // lock.
1607   assert(chunk != NULL, "Deallocating NULL");
1608   assert_lock_strong(SpaceManager::expand_lock());
1609   slow_locked_verify();
1610   if (TraceMetadataChunkAllocation) {
1611     tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1612                   PTR_FORMAT "  size " SIZE_FORMAT,
1613                   chunk, chunk->word_size());
1614   }
1615   free_chunks_put(chunk);
1616 }
1617 
1618 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1619   assert_lock_strong(SpaceManager::expand_lock());
1620 
1621   slow_locked_verify();
1622 
1623   Metachunk* chunk = NULL;
1624   if (list_index(word_size) != HumongousIndex) {
1625     ChunkList* free_list = find_free_chunks_list(word_size);
1626     assert(free_list != NULL, "Sanity check");
1627 
1628     chunk = free_list->head();
1629     debug_only(Metachunk* debug_head = chunk;)
1630 
1631     if (chunk == NULL) {
1632       return NULL;
1633     }
1634 
1635     // Remove the chunk as the head of the list.
1636     free_list->set_head(chunk->next());
1637 
1638     // Chunk is being removed from the chunks free list.
1639     dec_free_chunks_total(chunk->capacity_word_size());
1640 
1641     if (TraceMetadataChunkAllocation && Verbose) {
1642       tty->print_cr("ChunkManager::free_chunks_get: free_list "
1643                     PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1644                     free_list, chunk, chunk->word_size());
1645     }
1646   } else {
1647     chunk = humongous_dictionary()->get_chunk(
1648       word_size,
1649       FreeBlockDictionary<Metachunk>::atLeast);
1650 
1651     if (chunk != NULL) {
1652       if (TraceMetadataHumongousAllocation) {
1653         size_t waste = chunk->word_size() - word_size;
1654         tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1655                       " for requested size " SIZE_FORMAT
1656                       " waste " SIZE_FORMAT,
1657                       chunk->word_size(), word_size, waste);
1658       }
1659       // Chunk is being removed from the chunks free list.
1660       dec_free_chunks_total(chunk->capacity_word_size());
1661 #ifdef ASSERT
1662       chunk->set_is_free(false);
1663 #endif
1664     } else {
1665       return NULL;
1666     }
1667   }
1668 
1669   // Remove it from the links to this freelist
1670   chunk->set_next(NULL);
1671   chunk->set_prev(NULL);
1672   slow_locked_verify();
1673   return chunk;
1674 }
1675 
1676 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1677   assert_lock_strong(SpaceManager::expand_lock());
1678   slow_locked_verify();
1679 
1680   // Take from the beginning of the list
1681   Metachunk* chunk = free_chunks_get(word_size);
1682   if (chunk == NULL) {
1683     return NULL;
1684   }
1685 
1686   assert((word_size <= chunk->word_size()) ||
1687          list_index(chunk->word_size() == HumongousIndex),
1688          "Non-humongous variable sized chunk");
1689   if (TraceMetadataChunkAllocation) {
1690     size_t list_count;
1691     if (list_index(word_size) < HumongousIndex) {
1692       ChunkList* list = find_free_chunks_list(word_size);
1693       list_count = list->sum_list_count();
1694     } else {
1695       list_count = humongous_dictionary()->total_count();
1696     }
1697     tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1698                PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1699                this, chunk, chunk->word_size(), list_count);
1700     locked_print_free_chunks(tty);
1701   }
1702 
1703   return chunk;
1704 }
1705 
1706 void ChunkManager::print_on(outputStream* out) {
1707   if (PrintFLSStatistics != 0) {
1708     humongous_dictionary()->report_statistics();
1709   }
1710 }
1711 
1712 // SpaceManager methods
1713 
1714 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1715                                            size_t* chunk_word_size,
1716                                            size_t* class_chunk_word_size) {
1717   switch (type) {
1718   case Metaspace::BootMetaspaceType:
1719     *chunk_word_size = Metaspace::first_chunk_word_size();
1720     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1721     break;
1722   case Metaspace::ROMetaspaceType:
1723     *chunk_word_size = SharedReadOnlySize / wordSize;
1724     *class_chunk_word_size = ClassSpecializedChunk;
1725     break;
1726   case Metaspace::ReadWriteMetaspaceType:
1727     *chunk_word_size = SharedReadWriteSize / wordSize;
1728     *class_chunk_word_size = ClassSpecializedChunk;
1729     break;
1730   case Metaspace::AnonymousMetaspaceType:
1731   case Metaspace::ReflectionMetaspaceType:
1732     *chunk_word_size = SpecializedChunk;
1733     *class_chunk_word_size = ClassSpecializedChunk;
1734     break;
1735   default:
1736     *chunk_word_size = SmallChunk;
1737     *class_chunk_word_size = ClassSmallChunk;
1738     break;
1739   }
1740   assert(chunk_word_size != 0 && class_chunk_word_size != 0,
1741     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
1742             " class " SIZE_FORMAT,
1743             chunk_word_size, class_chunk_word_size));
1744 }
1745 
1746 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1747   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1748   size_t free = 0;
1749   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1750     Metachunk* chunk = chunks_in_use(i);
1751     while (chunk != NULL) {
1752       free += chunk->free_word_size();
1753       chunk = chunk->next();
1754     }
1755   }
1756   return free;
1757 }
1758 
1759 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1760   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1761   size_t result = 0;
1762   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1763    result += sum_waste_in_chunks_in_use(i);
1764   }
1765 
1766   return result;
1767 }
1768 
1769 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1770   size_t result = 0;
1771   Metachunk* chunk = chunks_in_use(index);
1772   // Count the free space in all the chunk but not the
1773   // current chunk from which allocations are still being done.
1774   if (chunk != NULL) {
1775     Metachunk* prev = chunk;
1776     while (chunk != NULL && chunk != current_chunk()) {
1777       result += chunk->free_word_size();
1778       prev = chunk;
1779       chunk = chunk->next();
1780     }
1781   }
1782   return result;
1783 }
1784 
1785 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1786   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1787   size_t sum = 0;
1788   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1789     Metachunk* chunk = chunks_in_use(i);
1790     while (chunk != NULL) {
1791       // Just changed this sum += chunk->capacity_word_size();
1792       // sum += chunk->word_size() - Metachunk::overhead();
1793       sum += chunk->capacity_word_size();
1794       chunk = chunk->next();
1795     }
1796   }
1797   return sum;
1798 }
1799 
1800 size_t SpaceManager::sum_count_in_chunks_in_use() {
1801   size_t count = 0;
1802   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1803     count = count + sum_count_in_chunks_in_use(i);
1804   }
1805 
1806   return count;
1807 }
1808 
1809 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1810   size_t count = 0;
1811   Metachunk* chunk = chunks_in_use(i);
1812   while (chunk != NULL) {
1813     count++;
1814     chunk = chunk->next();
1815   }
1816   return count;
1817 }
1818 
1819 
1820 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1821   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1822   size_t used = 0;
1823   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1824     Metachunk* chunk = chunks_in_use(i);
1825     while (chunk != NULL) {
1826       used += chunk->used_word_size();
1827       chunk = chunk->next();
1828     }
1829   }
1830   return used;
1831 }
1832 
1833 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1834 
1835   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1836     Metachunk* chunk = chunks_in_use(i);
1837     st->print("SpaceManager: %s " PTR_FORMAT,
1838                  chunk_size_name(i), chunk);
1839     if (chunk != NULL) {
1840       st->print_cr(" free " SIZE_FORMAT,
1841                    chunk->free_word_size());
1842     } else {
1843       st->print_cr("");
1844     }
1845   }
1846 
1847   vs_list()->chunk_manager()->locked_print_free_chunks(st);
1848   vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1849 }
1850 
1851 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1852 
1853   // Decide between a small chunk and a medium chunk.  Up to
1854   // _small_chunk_limit small chunks can be allocated but
1855   // once a medium chunk has been allocated, no more small
1856   // chunks will be allocated.
1857   size_t chunk_word_size;
1858   if (chunks_in_use(MediumIndex) == NULL &&
1859       (!has_small_chunk_limit() ||
1860        sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1861     chunk_word_size = (size_t) small_chunk_size();
1862     if (word_size + Metachunk::overhead() > small_chunk_size()) {
1863       chunk_word_size = medium_chunk_size();
1864     }
1865   } else {
1866     chunk_word_size = medium_chunk_size();
1867   }
1868 
1869   // Might still need a humongous chunk.  Enforce an
1870   // eight word granularity to facilitate reuse (some
1871   // wastage but better chance of reuse).
1872   size_t if_humongous_sized_chunk =
1873     align_size_up(word_size + Metachunk::overhead(),
1874                   HumongousChunkGranularity);
1875   chunk_word_size =
1876     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1877 
1878   assert(!SpaceManager::is_humongous(word_size) ||
1879          chunk_word_size == if_humongous_sized_chunk,
1880          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1881                  " chunk_word_size " SIZE_FORMAT,
1882                  word_size, chunk_word_size));
1883   if (TraceMetadataHumongousAllocation &&
1884       SpaceManager::is_humongous(word_size)) {
1885     gclog_or_tty->print_cr("Metadata humongous allocation:");
1886     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
1887     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
1888                            chunk_word_size);
1889     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
1890                            Metachunk::overhead());
1891   }
1892   return chunk_word_size;
1893 }
1894 
1895 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1896   assert(vs_list()->current_virtual_space() != NULL,
1897          "Should have been set");
1898   assert(current_chunk() == NULL ||
1899          current_chunk()->allocate(word_size) == NULL,
1900          "Don't need to expand");
1901   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1902 
1903   if (TraceMetadataChunkAllocation && Verbose) {
1904     size_t words_left = 0;
1905     size_t words_used = 0;
1906     if (current_chunk() != NULL) {
1907       words_left = current_chunk()->free_word_size();
1908       words_used = current_chunk()->used_word_size();
1909     }
1910     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1911                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
1912                            " words left",
1913                             word_size, words_used, words_left);
1914   }
1915 
1916   // Get another chunk out of the virtual space
1917   size_t grow_chunks_by_words = calc_chunk_size(word_size);
1918   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
1919 
1920   // If a chunk was available, add it to the in-use chunk list
1921   // and do an allocation from it.
1922   if (next != NULL) {
1923     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
1924     // Add to this manager's list of chunks in use.
1925     add_chunk(next, false);
1926     return next->allocate(word_size);
1927   }
1928   return NULL;
1929 }
1930 
1931 void SpaceManager::print_on(outputStream* st) const {
1932 
1933   for (ChunkIndex i = ZeroIndex;
1934        i < NumberOfInUseLists ;
1935        i = next_chunk_index(i) ) {
1936     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
1937                  chunks_in_use(i),
1938                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
1939   }
1940   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
1941                " Humongous " SIZE_FORMAT,
1942                sum_waste_in_chunks_in_use(SmallIndex),
1943                sum_waste_in_chunks_in_use(MediumIndex),
1944                sum_waste_in_chunks_in_use(HumongousIndex));
1945   // block free lists
1946   if (block_freelists() != NULL) {
1947     st->print_cr("total in block free lists " SIZE_FORMAT,
1948       block_freelists()->total_size());
1949   }
1950 }
1951 
1952 SpaceManager::SpaceManager(Mutex* lock,
1953                            VirtualSpaceList* vs_list) :
1954   _vs_list(vs_list),
1955   _allocation_total(0),
1956   _lock(lock)
1957 {
1958   initialize();
1959 }
1960 
1961 void SpaceManager::initialize() {
1962   Metadebug::init_allocation_fail_alot_count();
1963   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1964     _chunks_in_use[i] = NULL;
1965   }
1966   _current_chunk = NULL;
1967   if (TraceMetadataChunkAllocation && Verbose) {
1968     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
1969   }
1970 }
1971 
1972 SpaceManager::~SpaceManager() {
1973   MutexLockerEx fcl(SpaceManager::expand_lock(),
1974                     Mutex::_no_safepoint_check_flag);
1975 
1976   ChunkManager* chunk_manager = vs_list()->chunk_manager();
1977 
1978   chunk_manager->slow_locked_verify();
1979 
1980   if (TraceMetadataChunkAllocation && Verbose) {
1981     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
1982     locked_print_chunks_in_use_on(gclog_or_tty);
1983   }
1984 
1985   // Mangle freed memory.
1986   NOT_PRODUCT(mangle_freed_chunks();)
1987 
1988   // Have to update before the chunks_in_use lists are emptied
1989   // below.
1990   chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
1991                                        sum_count_in_chunks_in_use());
1992 
1993   // Add all the chunks in use by this space manager
1994   // to the global list of free chunks.
1995 
1996   // Follow each list of chunks-in-use and add them to the
1997   // free lists.  Each list is NULL terminated.
1998 
1999   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2000     if (TraceMetadataChunkAllocation && Verbose) {
2001       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2002                              sum_count_in_chunks_in_use(i),
2003                              chunk_size_name(i));
2004     }
2005     Metachunk* chunks = chunks_in_use(i);
2006     chunk_manager->free_chunks(i)->add_at_head(chunks);
2007     set_chunks_in_use(i, NULL);
2008     if (TraceMetadataChunkAllocation && Verbose) {
2009       gclog_or_tty->print_cr("updated freelist count %d %s",
2010                              chunk_manager->free_chunks(i)->sum_list_count(),
2011                              chunk_size_name(i));
2012     }
2013     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2014   }
2015 
2016   // The medium chunk case may be optimized by passing the head and
2017   // tail of the medium chunk list to add_at_head().  The tail is often
2018   // the current chunk but there are probably exceptions.
2019 
2020   // Humongous chunks
2021   if (TraceMetadataChunkAllocation && Verbose) {
2022     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2023                             sum_count_in_chunks_in_use(HumongousIndex),
2024                             chunk_size_name(HumongousIndex));
2025     gclog_or_tty->print("Humongous chunk dictionary: ");
2026   }
2027   // Humongous chunks are never the current chunk.
2028   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2029 
2030   while (humongous_chunks != NULL) {
2031 #ifdef ASSERT
2032     humongous_chunks->set_is_free(true);
2033 #endif
2034     if (TraceMetadataChunkAllocation && Verbose) {
2035       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2036                           humongous_chunks,
2037                           humongous_chunks->word_size());
2038     }
2039     assert(humongous_chunks->word_size() == (size_t)
2040            align_size_up(humongous_chunks->word_size(),
2041                              HumongousChunkGranularity),
2042            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2043                    " granularity " SIZE_FORMAT,
2044                    humongous_chunks->word_size(), HumongousChunkGranularity));
2045     Metachunk* next_humongous_chunks = humongous_chunks->next();
2046     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks, false);
2047     humongous_chunks = next_humongous_chunks;
2048   }
2049   if (TraceMetadataChunkAllocation && Verbose) {
2050     gclog_or_tty->print_cr("");
2051     gclog_or_tty->print_cr("updated dictionary count %d %s",
2052                      chunk_manager->humongous_dictionary()->total_count(),
2053                      chunk_size_name(HumongousIndex));
2054   }
2055   set_chunks_in_use(HumongousIndex, NULL);
2056   chunk_manager->slow_locked_verify();
2057 }
2058 
2059 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2060   switch (index) {
2061     case SpecializedIndex:
2062       return "Specialized";
2063     case SmallIndex:
2064       return "Small";
2065     case MediumIndex:
2066       return "Medium";
2067     case HumongousIndex:
2068       return "Humongous";
2069     default:
2070       return NULL;
2071   }
2072 }
2073 
2074 ChunkIndex ChunkManager::list_index(size_t size) {
2075   switch (size) {
2076     case SpecializedChunk:
2077       assert(SpecializedChunk == ClassSpecializedChunk,
2078              "Need branch for ClassSpecializedChunk");
2079       return SpecializedIndex;
2080     case SmallChunk:
2081     case ClassSmallChunk:
2082       return SmallIndex;
2083     case MediumChunk:
2084     case ClassMediumChunk:
2085       return MediumIndex;
2086     default:
2087       assert(size > MediumChunk || size > ClassMediumChunk,
2088              "Not a humongous chunk");
2089       return HumongousIndex;
2090   }
2091 }
2092 
2093 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2094   assert_lock_strong(_lock);
2095   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2096   assert(word_size >= min_size,
2097     err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2098   block_freelists()->return_block(p, word_size);
2099 }
2100 
2101 // Adds a chunk to the list of chunks in use.
2102 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2103 
2104   assert(new_chunk != NULL, "Should not be NULL");
2105   assert(new_chunk->next() == NULL, "Should not be on a list");
2106 
2107   new_chunk->reset_empty();
2108 
2109   // Find the correct list and and set the current
2110   // chunk for that list.
2111   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2112 
2113   if (index != HumongousIndex) {
2114     set_current_chunk(new_chunk);
2115     new_chunk->set_next(chunks_in_use(index));
2116     set_chunks_in_use(index, new_chunk);
2117   } else {
2118     // For null class loader data and DumpSharedSpaces, the first chunk isn't
2119     // small, so small will be null.  Link this first chunk as the current
2120     // chunk.
2121     if (make_current) {
2122       // Set as the current chunk but otherwise treat as a humongous chunk.
2123       set_current_chunk(new_chunk);
2124     }
2125     // Link at head.  The _current_chunk only points to a humongous chunk for
2126     // the null class loader metaspace (class and data virtual space managers)
2127     // any humongous chunks so will not point to the tail
2128     // of the humongous chunks list.
2129     new_chunk->set_next(chunks_in_use(HumongousIndex));
2130     set_chunks_in_use(HumongousIndex, new_chunk);
2131 
2132     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2133   }
2134 
2135   assert(new_chunk->is_empty(), "Not ready for reuse");
2136   if (TraceMetadataChunkAllocation && Verbose) {
2137     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2138                         sum_count_in_chunks_in_use());
2139     new_chunk->print_on(gclog_or_tty);
2140     vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2141   }
2142 }
2143 
2144 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2145                                        size_t grow_chunks_by_words) {
2146 
2147   Metachunk* next = vs_list()->get_new_chunk(word_size,
2148                                              grow_chunks_by_words,
2149                                              medium_chunk_bunch());
2150 
2151   if (TraceMetadataHumongousAllocation &&
2152       SpaceManager::is_humongous(next->word_size())) {
2153     gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
2154                            next->word_size());
2155   }
2156 
2157   return next;
2158 }
2159 
2160 MetaWord* SpaceManager::allocate(size_t word_size) {
2161   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2162 
2163   // If only the dictionary is going to be used (i.e., no
2164   // indexed free list), then there is a minimum size requirement.
2165   // MinChunkSize is a placeholder for the real minimum size JJJ
2166   size_t byte_size = word_size * BytesPerWord;
2167 
2168   size_t byte_size_with_overhead = byte_size + Metablock::overhead();
2169 
2170   size_t raw_bytes_size = MAX2(byte_size_with_overhead,
2171                                Metablock::min_block_byte_size());
2172   raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
2173   size_t raw_word_size = raw_bytes_size / BytesPerWord;
2174   assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
2175 
2176   BlockFreelist* fl =  block_freelists();
2177   MetaWord* p = NULL;
2178   // Allocation from the dictionary is expensive in the sense that
2179   // the dictionary has to be searched for a size.  Don't allocate
2180   // from the dictionary until it starts to get fat.  Is this
2181   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2182   // for allocations.  Do some profiling.  JJJ
2183   if (fl->total_size() > allocation_from_dictionary_limit) {
2184     p = fl->get_block(raw_word_size);
2185   }
2186   if (p == NULL) {
2187     p = allocate_work(raw_word_size);
2188   }
2189   Metadebug::deallocate_block_a_lot(this, raw_word_size);
2190 
2191   return p;
2192 }
2193 
2194 // Returns the address of spaced allocated for "word_size".
2195 // This methods does not know about blocks (Metablocks)
2196 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2197   assert_lock_strong(_lock);
2198 #ifdef ASSERT
2199   if (Metadebug::test_metadata_failure()) {
2200     return NULL;
2201   }
2202 #endif
2203   // Is there space in the current chunk?
2204   MetaWord* result = NULL;
2205 
2206   // For DumpSharedSpaces, only allocate out of the current chunk which is
2207   // never null because we gave it the size we wanted.   Caller reports out
2208   // of memory if this returns null.
2209   if (DumpSharedSpaces) {
2210     assert(current_chunk() != NULL, "should never happen");
2211     inc_allocation_total(word_size);
2212     return current_chunk()->allocate(word_size); // caller handles null result
2213   }
2214   if (current_chunk() != NULL) {
2215     result = current_chunk()->allocate(word_size);
2216   }
2217 
2218   if (result == NULL) {
2219     result = grow_and_allocate(word_size);
2220   }
2221   if (result > 0) {
2222     inc_allocation_total(word_size);
2223     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2224            "Head of the list is being allocated");
2225   }
2226 
2227   return result;
2228 }
2229 
2230 void SpaceManager::verify() {
2231   // If there are blocks in the dictionary, then
2232   // verfication of chunks does not work since
2233   // being in the dictionary alters a chunk.
2234   if (block_freelists()->total_size() == 0) {
2235     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2236       Metachunk* curr = chunks_in_use(i);
2237       while (curr != NULL) {
2238         curr->verify();
2239         verify_chunk_size(curr);
2240         curr = curr->next();
2241       }
2242     }
2243   }
2244 }
2245 
2246 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2247   assert(is_humongous(chunk->word_size()) ||
2248          chunk->word_size() == medium_chunk_size() ||
2249          chunk->word_size() == small_chunk_size() ||
2250          chunk->word_size() == specialized_chunk_size(),
2251          "Chunk size is wrong");
2252   return;
2253 }
2254 
2255 #ifdef ASSERT
2256 void SpaceManager::verify_allocation_total() {
2257   // Verification is only guaranteed at a safepoint.
2258   if (SafepointSynchronize::is_at_safepoint()) {
2259     gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
2260                            " sum_used_in_chunks_in_use " SIZE_FORMAT,
2261                            this,
2262                            allocation_total(),
2263                            sum_used_in_chunks_in_use());
2264   }
2265   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2266   assert(allocation_total() == sum_used_in_chunks_in_use(),
2267     err_msg("allocation total is not consistent %d vs %d",
2268             allocation_total(), sum_used_in_chunks_in_use()));
2269 }
2270 
2271 #endif
2272 
2273 void SpaceManager::dump(outputStream* const out) const {
2274   size_t curr_total = 0;
2275   size_t waste = 0;
2276   uint i = 0;
2277   size_t used = 0;
2278   size_t capacity = 0;
2279 
2280   // Add up statistics for all chunks in this SpaceManager.
2281   for (ChunkIndex index = ZeroIndex;
2282        index < NumberOfInUseLists;
2283        index = next_chunk_index(index)) {
2284     for (Metachunk* curr = chunks_in_use(index);
2285          curr != NULL;
2286          curr = curr->next()) {
2287       out->print("%d) ", i++);
2288       curr->print_on(out);
2289       if (TraceMetadataChunkAllocation && Verbose) {
2290         block_freelists()->print_on(out);
2291       }
2292       curr_total += curr->word_size();
2293       used += curr->used_word_size();
2294       capacity += curr->capacity_word_size();
2295       waste += curr->free_word_size() + curr->overhead();;
2296     }
2297   }
2298 
2299   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2300   // Free space isn't wasted.
2301   waste -= free;
2302 
2303   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2304                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2305                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2306 }
2307 
2308 #ifndef PRODUCT
2309 void SpaceManager::mangle_freed_chunks() {
2310   for (ChunkIndex index = ZeroIndex;
2311        index < NumberOfInUseLists;
2312        index = next_chunk_index(index)) {
2313     for (Metachunk* curr = chunks_in_use(index);
2314          curr != NULL;
2315          curr = curr->next()) {
2316       curr->mangle();
2317     }
2318   }
2319 }
2320 #endif // PRODUCT
2321 
2322 // MetaspaceAux
2323 
2324 size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
2325   size_t used = 0;
2326   ClassLoaderDataGraphMetaspaceIterator iter;
2327   while (iter.repeat()) {
2328     Metaspace* msp = iter.get_next();
2329     // Sum allocation_total for each metaspace
2330     if (msp != NULL) {
2331       used += msp->used_words(mdtype);
2332     }
2333   }
2334   return used * BytesPerWord;
2335 }
2336 
2337 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2338   size_t free = 0;
2339   ClassLoaderDataGraphMetaspaceIterator iter;
2340   while (iter.repeat()) {
2341     Metaspace* msp = iter.get_next();
2342     if (msp != NULL) {
2343       free += msp->free_words(mdtype);
2344     }
2345   }
2346   return free * BytesPerWord;
2347 }
2348 
2349 size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
2350   size_t capacity = free_chunks_total(mdtype);
2351   ClassLoaderDataGraphMetaspaceIterator iter;
2352   while (iter.repeat()) {
2353     Metaspace* msp = iter.get_next();
2354     if (msp != NULL) {
2355       capacity += msp->capacity_words(mdtype);
2356     }
2357   }
2358   return capacity * BytesPerWord;
2359 }
2360 
2361 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2362   size_t reserved = (mdtype == Metaspace::ClassType) ?
2363                        Metaspace::class_space_list()->virtual_space_total() :
2364                        Metaspace::space_list()->virtual_space_total();
2365   return reserved * BytesPerWord;
2366 }
2367 
2368 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2369 
2370 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2371   ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2372                             Metaspace::class_space_list()->chunk_manager() :
2373                             Metaspace::space_list()->chunk_manager();
2374   chunk->slow_verify();
2375   return chunk->free_chunks_total();
2376 }
2377 
2378 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2379   return free_chunks_total(mdtype) * BytesPerWord;
2380 }
2381 
2382 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2383   gclog_or_tty->print(", [Metaspace:");
2384   if (PrintGCDetails && Verbose) {
2385     gclog_or_tty->print(" "  SIZE_FORMAT
2386                         "->" SIZE_FORMAT
2387                         "("  SIZE_FORMAT "/" SIZE_FORMAT ")",
2388                         prev_metadata_used,
2389                         used_in_bytes(),
2390                         capacity_in_bytes(),
2391                         reserved_in_bytes());
2392   } else {
2393     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2394                         "->" SIZE_FORMAT "K"
2395                         "("  SIZE_FORMAT "K/" SIZE_FORMAT "K)",
2396                         prev_metadata_used / K,
2397                         used_in_bytes()/ K,
2398                         capacity_in_bytes()/K,
2399                         reserved_in_bytes()/ K);
2400   }
2401 
2402   gclog_or_tty->print("]");
2403 }
2404 
2405 // This is printed when PrintGCDetails
2406 void MetaspaceAux::print_on(outputStream* out) {
2407   Metaspace::MetadataType ct = Metaspace::ClassType;
2408   Metaspace::MetadataType nct = Metaspace::NonClassType;
2409 
2410   out->print_cr(" Metaspace total "
2411                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2412                 " reserved " SIZE_FORMAT "K",
2413                 capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
2414   out->print_cr("  data space     "
2415                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2416                 " reserved " SIZE_FORMAT "K",
2417                 capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
2418   out->print_cr("  class space    "
2419                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2420                 " reserved " SIZE_FORMAT "K",
2421                 capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
2422 }
2423 
2424 // Print information for class space and data space separately.
2425 // This is almost the same as above.
2426 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2427   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2428   size_t capacity_bytes = capacity_in_bytes(mdtype);
2429   size_t used_bytes = used_in_bytes(mdtype);
2430   size_t free_bytes = free_in_bytes(mdtype);
2431   size_t used_and_free = used_bytes + free_bytes +
2432                            free_chunks_capacity_bytes;
2433   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2434              "K + unused in chunks " SIZE_FORMAT "K  + "
2435              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2436              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2437              used_bytes / K,
2438              free_bytes / K,
2439              free_chunks_capacity_bytes / K,
2440              used_and_free / K,
2441              capacity_bytes / K);
2442   assert(used_and_free == capacity_bytes, "Accounting is wrong");
2443 }
2444 
2445 // Print total fragmentation for class and data metaspaces separately
2446 void MetaspaceAux::print_waste(outputStream* out) {
2447 
2448   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
2449   size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
2450   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2451   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
2452 
2453   ClassLoaderDataGraphMetaspaceIterator iter;
2454   while (iter.repeat()) {
2455     Metaspace* msp = iter.get_next();
2456     if (msp != NULL) {
2457       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2458       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2459       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2460       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2461       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2462       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2463       large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2464       large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2465 
2466       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2467       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2468       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2469       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2470       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2471       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2472       cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2473       cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2474     }
2475   }
2476   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2477   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2478                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2479                         SIZE_FORMAT " medium(s) " SIZE_FORMAT,
2480              specialized_count, specialized_waste, small_count,
2481              small_waste, medium_count, medium_waste);
2482   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2483                            SIZE_FORMAT " small(s) " SIZE_FORMAT,
2484              cls_specialized_count, cls_specialized_waste,
2485              cls_small_count, cls_small_waste);
2486 }
2487 
2488 // Dump global metaspace things from the end of ClassLoaderDataGraph
2489 void MetaspaceAux::dump(outputStream* out) {
2490   out->print_cr("All Metaspace:");
2491   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2492   out->print("class space: "); print_on(out, Metaspace::ClassType);
2493   print_waste(out);
2494 }
2495 
2496 void MetaspaceAux::verify_free_chunks() {
2497   Metaspace::space_list()->chunk_manager()->verify();
2498   Metaspace::class_space_list()->chunk_manager()->verify();
2499 }
2500 
2501 // Metaspace methods
2502 
2503 size_t Metaspace::_first_chunk_word_size = 0;
2504 size_t Metaspace::_first_class_chunk_word_size = 0;
2505 
2506 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2507   initialize(lock, type);
2508 }
2509 
2510 Metaspace::~Metaspace() {
2511   delete _vsm;
2512   delete _class_vsm;
2513 }
2514 
2515 VirtualSpaceList* Metaspace::_space_list = NULL;
2516 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2517 
2518 #define VIRTUALSPACEMULTIPLIER 2
2519 
2520 void Metaspace::global_initialize() {
2521   // Initialize the alignment for shared spaces.
2522   int max_alignment = os::vm_page_size();
2523   MetaspaceShared::set_max_alignment(max_alignment);
2524 
2525   if (DumpSharedSpaces) {
2526     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2527     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2528     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
2529     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
2530 
2531     // Initialize with the sum of the shared space sizes.  The read-only
2532     // and read write metaspace chunks will be allocated out of this and the
2533     // remainder is the misc code and data chunks.
2534     size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2535                                  SharedMiscDataSize + SharedMiscCodeSize,
2536                                  os::vm_allocation_granularity());
2537     size_t word_size = total/wordSize;
2538     _space_list = new VirtualSpaceList(word_size);
2539   } else {
2540     // If using shared space, open the file that contains the shared space
2541     // and map in the memory before initializing the rest of metaspace (so
2542     // the addresses don't conflict)
2543     if (UseSharedSpaces) {
2544       FileMapInfo* mapinfo = new FileMapInfo();
2545       memset(mapinfo, 0, sizeof(FileMapInfo));
2546 
2547       // Open the shared archive file, read and validate the header. If
2548       // initialization fails, shared spaces [UseSharedSpaces] are
2549       // disabled and the file is closed.
2550       // Map in spaces now also
2551       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2552         FileMapInfo::set_current_info(mapinfo);
2553       } else {
2554         assert(!mapinfo->is_open() && !UseSharedSpaces,
2555                "archive file not closed or shared spaces not disabled.");
2556       }
2557     }
2558 
2559     // Initialize these before initializing the VirtualSpaceList
2560     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2561     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2562     // Make the first class chunk bigger than a medium chunk so it's not put
2563     // on the medium chunk list.   The next chunk will be small and progress
2564     // from there.  This size calculated by -version.
2565     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2566                                        (ClassMetaspaceSize/BytesPerWord)*2);
2567     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2568     // Arbitrarily set the initial virtual space to a multiple
2569     // of the boot class loader size.
2570     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2571     // Initialize the list of virtual spaces.
2572     _space_list = new VirtualSpaceList(word_size);
2573   }
2574 }
2575 
2576 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2577 // Java heap because the compression algorithm is the same for each.  The
2578 // argument passed in is at the top of the compressed space
2579 void Metaspace::initialize_class_space(ReservedSpace rs) {
2580   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2581   assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
2582   _class_space_list = new VirtualSpaceList(rs);
2583 }
2584 
2585 void Metaspace::initialize(Mutex* lock,
2586                            MetaspaceType type) {
2587 
2588   assert(space_list() != NULL,
2589     "Metadata VirtualSpaceList has not been initialized");
2590 
2591   _vsm = new SpaceManager(lock, space_list());
2592   if (_vsm == NULL) {
2593     return;
2594   }
2595   size_t word_size;
2596   size_t class_word_size;
2597   vsm()->get_initial_chunk_sizes(type,
2598                                  &word_size,
2599                                  &class_word_size);
2600 
2601   assert(class_space_list() != NULL,
2602     "Class VirtualSpaceList has not been initialized");
2603 
2604   // Allocate SpaceManager for classes.
2605   _class_vsm = new SpaceManager(lock, class_space_list());
2606   if (_class_vsm == NULL) {
2607     return;
2608   }
2609 
2610   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2611 
2612   // Allocate chunk for metadata objects
2613   Metachunk* new_chunk =
2614      space_list()->get_initialization_chunk(word_size,
2615                                             vsm()->medium_chunk_bunch());
2616   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2617   if (new_chunk != NULL) {
2618     // Add to this manager's list of chunks in use and current_chunk().
2619     vsm()->add_chunk(new_chunk, true);
2620   }
2621 
2622   // Allocate chunk for class metadata objects
2623   Metachunk* class_chunk =
2624      class_space_list()->get_initialization_chunk(class_word_size,
2625                                                   class_vsm()->medium_chunk_bunch());
2626   if (class_chunk != NULL) {
2627     class_vsm()->add_chunk(class_chunk, true);
2628   }
2629 }
2630 
2631 size_t Metaspace::align_word_size_up(size_t word_size) {
2632   size_t byte_size = word_size * wordSize;
2633   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2634 }
2635 
2636 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2637   // DumpSharedSpaces doesn't use class metadata area (yet)
2638   if (mdtype == ClassType && !DumpSharedSpaces) {
2639     return  class_vsm()->allocate(word_size);
2640   } else {
2641     return  vsm()->allocate(word_size);
2642   }
2643 }
2644 
2645 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2646   MetaWord* result;
2647   MetaspaceGC::set_expand_after_GC(true);
2648   size_t before_inc = MetaspaceGC::capacity_until_GC();
2649   size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
2650   MetaspaceGC::inc_capacity_until_GC(delta_words);
2651   if (PrintGCDetails && Verbose) {
2652     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2653       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2654   }
2655 
2656   result = allocate(word_size, mdtype);
2657 
2658   return result;
2659 }
2660 
2661 // Space allocated in the Metaspace.  This may
2662 // be across several metadata virtual spaces.
2663 char* Metaspace::bottom() const {
2664   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2665   return (char*)vsm()->current_chunk()->bottom();
2666 }
2667 
2668 size_t Metaspace::used_words(MetadataType mdtype) const {
2669   // return vsm()->allocation_total();
2670   return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2671                                vsm()->sum_used_in_chunks_in_use();  // includes overhead!
2672 }
2673 
2674 size_t Metaspace::free_words(MetadataType mdtype) const {
2675   return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2676                                vsm()->sum_free_in_chunks_in_use();
2677 }
2678 
2679 // Space capacity in the Metaspace.  It includes
2680 // space in the list of chunks from which allocations
2681 // have been made. Don't include space in the global freelist and
2682 // in the space available in the dictionary which
2683 // is already counted in some chunk.
2684 size_t Metaspace::capacity_words(MetadataType mdtype) const {
2685   return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2686                                vsm()->sum_capacity_in_chunks_in_use();
2687 }
2688 
2689 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2690   if (SafepointSynchronize::is_at_safepoint()) {
2691     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2692     // Don't take Heap_lock
2693     MutexLocker ml(vsm()->lock());
2694     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2695       // Dark matter.  Too small for dictionary.
2696 #ifdef ASSERT
2697       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2698 #endif
2699       return;
2700     }
2701     if (is_class) {
2702        class_vsm()->deallocate(ptr, word_size);
2703     } else {
2704       vsm()->deallocate(ptr, word_size);
2705     }
2706   } else {
2707     MutexLocker ml(vsm()->lock());
2708 
2709     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2710       // Dark matter.  Too small for dictionary.
2711 #ifdef ASSERT
2712       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2713 #endif
2714       return;
2715     }
2716     if (is_class) {
2717       class_vsm()->deallocate(ptr, word_size);
2718     } else {
2719       vsm()->deallocate(ptr, word_size);
2720     }
2721   }
2722 }
2723 
2724 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
2725                               bool read_only, MetadataType mdtype, TRAPS) {
2726   if (HAS_PENDING_EXCEPTION) {
2727     assert(false, "Should not allocate with exception pending");
2728     return NULL;  // caller does a CHECK_NULL too
2729   }
2730 
2731   // SSS: Should we align the allocations and make sure the sizes are aligned.
2732   MetaWord* result = NULL;
2733 
2734   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
2735         "ClassLoaderData::the_null_class_loader_data() should have been used.");
2736   // Allocate in metaspaces without taking out a lock, because it deadlocks
2737   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
2738   // to revisit this for application class data sharing.
2739   if (DumpSharedSpaces) {
2740     if (read_only) {
2741       result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
2742     } else {
2743       result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
2744     }
2745     if (result == NULL) {
2746       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
2747     }
2748     return Metablock::initialize(result, word_size);
2749   }
2750 
2751   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
2752 
2753   if (result == NULL) {
2754     // Try to clean out some memory and retry.
2755     result =
2756       Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
2757         loader_data, word_size, mdtype);
2758 
2759     // If result is still null, we are out of memory.
2760     if (result == NULL) {
2761       if (Verbose && TraceMetadataChunkAllocation) {
2762         gclog_or_tty->print_cr("Metaspace allocation failed for size "
2763           SIZE_FORMAT, word_size);
2764         if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
2765         MetaspaceAux::dump(gclog_or_tty);
2766       }
2767       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
2768       report_java_out_of_memory("Metadata space");
2769 
2770       if (JvmtiExport::should_post_resource_exhausted()) {
2771         JvmtiExport::post_resource_exhausted(
2772             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
2773             "Metadata space");
2774       }
2775       THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
2776     }
2777   }
2778   return Metablock::initialize(result, word_size);
2779 }
2780 
2781 void Metaspace::print_on(outputStream* out) const {
2782   // Print both class virtual space counts and metaspace.
2783   if (Verbose) {
2784       vsm()->print_on(out);
2785       class_vsm()->print_on(out);
2786   }
2787 }
2788 
2789 bool Metaspace::contains(const void * ptr) {
2790   if (MetaspaceShared::is_in_shared_space(ptr)) {
2791     return true;
2792   }
2793   // This is checked while unlocked.  As long as the virtualspaces are added
2794   // at the end, the pointer will be in one of them.  The virtual spaces
2795   // aren't deleted presently.  When they are, some sort of locking might
2796   // be needed.  Note, locking this can cause inversion problems with the
2797   // caller in MetaspaceObj::is_metadata() function.
2798   return space_list()->contains(ptr) || class_space_list()->contains(ptr);
2799 }
2800 
2801 void Metaspace::verify() {
2802   vsm()->verify();
2803   class_vsm()->verify();
2804 }
2805 
2806 void Metaspace::dump(outputStream* const out) const {
2807   if (UseMallocOnly) {
2808     // Just print usage for now
2809     out->print_cr("usage %d", used_words(Metaspace::NonClassType));
2810   }
2811   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
2812   vsm()->dump(out);
2813   out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
2814   class_vsm()->dump(out);
2815 }