src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hsx-gc Sdiff src/share/vm/memory

src/share/vm/memory/metaspace.cpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"

  32 #include "memory/metablock.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"

  38 #include "runtime/globals.hpp"

  39 #include "runtime/java.hpp"
  40 #include "runtime/mutex.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 #include "services/memTracker.hpp"
  43 #include "utilities/copy.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  47 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  48 // Define this macro to enable slow integrity checking of
  49 // the free chunk lists
  50 const bool metaspace_slow_verify = false;
  51 
  52 // Parameters for stress mode testing
  53 const uint metadata_deallocate_a_lot_block = 10;
  54 const uint metadata_deallocate_a_lock_chunk = 3;
  55 size_t const allocation_from_dictionary_limit = 4 * K;
  56 
  57 MetaWord* last_allocated = 0;
  58 


  67   HumongousIndex = MediumIndex + 1,
  68   NumberOfFreeLists = 3,
  69   NumberOfInUseLists = 4
  70 };
  71 
  72 enum ChunkSizes {    // in words.
  73   ClassSpecializedChunk = 128,
  74   SpecializedChunk = 128,
  75   ClassSmallChunk = 256,
  76   SmallChunk = 512,
  77   ClassMediumChunk = 4 * K,
  78   MediumChunk = 8 * K,
  79   HumongousChunkGranularity = 8
  80 };
  81 
  82 static ChunkIndex next_chunk_index(ChunkIndex i) {
  83   assert(i < NumberOfInUseLists, "Out of bound");
  84   return (ChunkIndex) (i+1);
  85 }
  86 
  87 // Originally _capacity_until_GC was set to MetaspaceSize here but
  88 // the default MetaspaceSize before argument processing was being
  89 // used which was not the desired value.  See the code
  90 // in should_expand() to see how the initialization is handled
  91 // now.
  92 size_t MetaspaceGC::_capacity_until_GC = 0;
  93 bool MetaspaceGC::_expand_after_GC = false;
  94 uint MetaspaceGC::_shrink_factor = 0;
  95 bool MetaspaceGC::_should_concurrent_collect = false;
  96 
  97 // Blocks of space for metadata are allocated out of Metachunks.
  98 //
  99 // Metachunk are allocated out of MetadataVirtualspaces and once
 100 // allocated there is no explicit link between a Metachunk and
 101 // the MetadataVirtualspaces from which it was allocated.
 102 //
 103 // Each SpaceManager maintains a
 104 // list of the chunks it is using and the current chunk.  The current
 105 // chunk is the chunk from which allocations are done.  Space freed in
 106 // a chunk is placed on the free list of blocks (BlockFreelist) and
 107 // reused from there.
 108 
 109 typedef class FreeList<Metachunk> ChunkList;
 110 
 111 // Manages the global free lists of chunks.
 112 // Has three lists of free chunks, and a total size and
 113 // count that includes all three


 276 
 277   // Convenience functions to access the _virtual_space
 278   char* low()  const { return virtual_space()->low(); }
 279   char* high() const { return virtual_space()->high(); }
 280 
 281   // The first Metachunk will be allocated at the bottom of the
 282   // VirtualSpace
 283   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 284 
 285  public:
 286 
 287   VirtualSpaceNode(size_t byte_size);
 288   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 289   ~VirtualSpaceNode();
 290 
 291   // Convenience functions for logical bottom and end
 292   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 293   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 294 
 295   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 296   size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
 297   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 298 


 299   // address of next available space in _virtual_space;
 300   // Accessors
 301   VirtualSpaceNode* next() { return _next; }
 302   void set_next(VirtualSpaceNode* v) { _next = v; }
 303 
 304   void set_reserved(MemRegion const v) { _reserved = v; }
 305   void set_top(MetaWord* v) { _top = v; }
 306 
 307   // Accessors
 308   MemRegion* reserved() { return &_reserved; }
 309   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 310 
 311   // Returns true if "word_size" is available in the VirtualSpace
 312   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 313 
 314   MetaWord* top() const { return _top; }
 315   void inc_top(size_t word_size) { _top += word_size; }
 316 
 317   uintx container_count() { return _container_count; }
 318   void inc_container_count();


 320 #ifdef ASSERT
 321   uint container_count_slow();
 322   void verify_container_count();
 323 #endif
 324 
 325   // used and capacity in this single entry in the list
 326   size_t used_words_in_vs() const;
 327   size_t capacity_words_in_vs() const;
 328   size_t free_words_in_vs() const;
 329 
 330   bool initialize();
 331 
 332   // get space from the virtual space
 333   Metachunk* take_from_committed(size_t chunk_word_size);
 334 
 335   // Allocate a chunk from the virtual space and return it.
 336   Metachunk* get_chunk_vs(size_t chunk_word_size);
 337 
 338   // Expands/shrinks the committed space in a virtual space.  Delegates
 339   // to Virtualspace
 340   bool expand_by(size_t words, bool pre_touch = false);
 341 
 342   // In preparation for deleting this node, remove all the chunks
 343   // in the node from any freelist.
 344   void purge(ChunkManager* chunk_manager);
 345 
 346 #ifdef ASSERT
 347   // Debug support
 348   void mangle();
 349 #endif
 350 
 351   void print_on(outputStream* st) const;
 352 };
 353 
























 354   // byte_size is the size of the associated virtualspace.
 355 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 356   // align up to vm allocation granularity
 357   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 358 
 359   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 360   // configurable address, generally at the top of the Java heap so other
 361   // memory addresses don't conflict.
 362   if (DumpSharedSpaces) {
 363     char* shared_base = (char*)SharedBaseAddress;
 364     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);


 365     if (_rs.is_reserved()) {
 366       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 367     } else {
 368       // Get a mmap region anywhere if the SharedBaseAddress fails.
 369       _rs = ReservedSpace(byte_size);
 370     }
 371     MetaspaceShared::set_shared_rs(&_rs);
 372   } else {
 373     _rs = ReservedSpace(byte_size);



 374   }
 375 






 376   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);

 377 }
 378 
 379 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 380   Metachunk* chunk = first_chunk();
 381   Metachunk* invalid_chunk = (Metachunk*) top();
 382   while (chunk < invalid_chunk ) {
 383     assert(chunk->is_free(), "Should be marked free");
 384       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 385       chunk_manager->remove_chunk(chunk);
 386       assert(chunk->next() == NULL &&
 387              chunk->prev() == NULL,
 388              "Was not removed from its list");
 389       chunk = (Metachunk*) next;
 390   }
 391 }
 392 
 393 #ifdef ASSERT
 394 uint VirtualSpaceNode::container_count_slow() {
 395   uint count = 0;
 396   Metachunk* chunk = first_chunk();
 397   Metachunk* invalid_chunk = (Metachunk*) top();
 398   while (chunk < invalid_chunk ) {
 399     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 400     // Don't count the chunks on the free lists.  Those are
 401     // still part of the VirtualSpaceNode but not currently
 402     // counted.
 403     if (!chunk->is_free()) {
 404       count++;
 405     }
 406     chunk = (Metachunk*) next;
 407   }
 408   return count;
 409 }
 410 #endif
 411 
 412 // List of VirtualSpaces for metadata allocation.
 413 // It has a  _next link for singly linked list and a MemRegion
 414 // for total space in the VirtualSpace.
 415 class VirtualSpaceList : public CHeapObj<mtClass> {
 416   friend class VirtualSpaceNode;
 417 
 418   enum VirtualSpaceSizes {
 419     VirtualSpaceSize = 256 * K
 420   };
 421 
 422   // Global list of virtual spaces
 423   // Head of the list
 424   VirtualSpaceNode* _virtual_space_list;
 425   // virtual space currently being used for allocations
 426   VirtualSpaceNode* _current_virtual_space;
 427 
 428   // Can this virtual list allocate >1 spaces?  Also, used to determine
 429   // whether to allocate unlimited small chunks in this virtual space
 430   bool _is_class;
 431   bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
 432 
 433   // Sum of reserved and committed memory in the virtual spaces
 434   size_t _reserved_words;
 435   size_t _committed_words;
 436 
 437   // Number of virtual spaces
 438   size_t _virtual_space_count;
 439 
 440   ~VirtualSpaceList();
 441 
 442   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 443 
 444   void set_virtual_space_list(VirtualSpaceNode* v) {
 445     _virtual_space_list = v;
 446   }
 447   void set_current_virtual_space(VirtualSpaceNode* v) {
 448     _current_virtual_space = v;
 449   }
 450 
 451   void link_vs(VirtualSpaceNode* new_entry);
 452 
 453   // Get another virtual space and add it to the list.  This
 454   // is typically prompted by a failed attempt to allocate a chunk
 455   // and is typically followed by the allocation of a chunk.
 456   bool grow_vs(size_t vs_word_size);
 457 
 458  public:
 459   VirtualSpaceList(size_t word_size);
 460   VirtualSpaceList(ReservedSpace rs);
 461 
 462   size_t free_bytes();
 463 
 464   Metachunk* get_new_chunk(size_t word_size,
 465                            size_t grow_chunks_by_words,
 466                            size_t medium_chunk_bunch);
 467 
 468   bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);





 469 
 470   // Get the first chunk for a Metaspace.  Used for
 471   // special cases such as the boot class loader, reflection
 472   // class loader and anonymous class loader.
 473   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
 474 
 475   VirtualSpaceNode* current_virtual_space() {
 476     return _current_virtual_space;
 477   }
 478 
 479   bool is_class() const { return _is_class; }
 480 
 481   // Allocate the first virtualspace.
 482   void initialize(size_t word_size);
 483 
 484   size_t reserved_words()  { return _reserved_words; }
 485   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 486   size_t committed_words() { return _committed_words; }
 487   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 488 
 489   void inc_reserved_words(size_t v);
 490   void dec_reserved_words(size_t v);
 491   void inc_committed_words(size_t v);
 492   void dec_committed_words(size_t v);
 493   void inc_virtual_space_count();
 494   void dec_virtual_space_count();
 495 
 496   // Unlink empty VirtualSpaceNodes and free it.
 497   void purge(ChunkManager* chunk_manager);
 498 
 499   bool contains(const void *ptr);
 500 
 501   void print_on(outputStream* st) const;
 502 


 852   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 853 }
 854 
 855 // Space committed in the VirtualSpace
 856 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 857   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 858 }
 859 
 860 size_t VirtualSpaceNode::free_words_in_vs() const {
 861   return pointer_delta(end(), top(), sizeof(MetaWord));
 862 }
 863 
 864 // Allocates the chunk from the virtual space only.
 865 // This interface is also used internally for debugging.  Not all
 866 // chunks removed here are necessarily used for allocation.
 867 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 868   // Bottom of the new chunk
 869   MetaWord* chunk_limit = top();
 870   assert(chunk_limit != NULL, "Not safe to call this method");
 871 






 872   if (!is_available(chunk_word_size)) {
 873     if (TraceMetadataChunkAllocation) {
 874       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 875       // Dump some information about the virtual space that is nearly full
 876       print_on(gclog_or_tty);
 877     }
 878     return NULL;
 879   }
 880 
 881   // Take the space  (bump top on the current virtual space).
 882   inc_top(chunk_word_size);
 883 
 884   // Initialize the chunk
 885   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 886   return result;
 887 }
 888 
 889 
 890 // Expand the virtual space (commit more of the reserved space)
 891 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
 892   size_t bytes = words * BytesPerWord;
 893   bool result =  virtual_space()->expand_by(bytes, pre_touch);
 894   if (TraceMetavirtualspaceAllocation && !result) {
 895     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
 896                            "for byte size " SIZE_FORMAT, bytes);
 897     virtual_space()->print_on(gclog_or_tty);

 898   }






 899   return result;
 900 }
 901 
 902 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 903   assert_lock_strong(SpaceManager::expand_lock());
 904   Metachunk* result = take_from_committed(chunk_word_size);
 905   if (result != NULL) {
 906     inc_container_count();
 907   }
 908   return result;
 909 }
 910 
 911 bool VirtualSpaceNode::initialize() {
 912 
 913   if (!_rs.is_reserved()) {
 914     return false;
 915   }
 916 
 917   // An allocation out of this Virtualspace that is larger
 918   // than an initial commit size can waste that initial committed
 919   // space.
 920   size_t committed_byte_size = 0;
 921   bool result = virtual_space()->initialize(_rs, committed_byte_size);








 922   if (result) {



 923     set_top((MetaWord*)virtual_space()->low());
 924     set_reserved(MemRegion((HeapWord*)_rs.base(),
 925                  (HeapWord*)(_rs.base() + _rs.size())));
 926 
 927     assert(reserved()->start() == (HeapWord*) _rs.base(),
 928       err_msg("Reserved start was not set properly " PTR_FORMAT
 929         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 930     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 931       err_msg("Reserved size was not set properly " SIZE_FORMAT
 932         " != " SIZE_FORMAT, reserved()->word_size(),
 933         _rs.size() / BytesPerWord));
 934   }
 935 
 936   return result;
 937 }
 938 
 939 void VirtualSpaceNode::print_on(outputStream* st) const {
 940   size_t used = used_words_in_vs();
 941   size_t capacity = capacity_words_in_vs();
 942   VirtualSpace* vs = virtual_space();


 959 // VirtualSpaceList methods
 960 // Space allocated from the VirtualSpace
 961 
 962 VirtualSpaceList::~VirtualSpaceList() {
 963   VirtualSpaceListIterator iter(virtual_space_list());
 964   while (iter.repeat()) {
 965     VirtualSpaceNode* vsl = iter.get_next();
 966     delete vsl;
 967   }
 968 }
 969 
 970 void VirtualSpaceList::inc_reserved_words(size_t v) {
 971   assert_lock_strong(SpaceManager::expand_lock());
 972   _reserved_words = _reserved_words + v;
 973 }
 974 void VirtualSpaceList::dec_reserved_words(size_t v) {
 975   assert_lock_strong(SpaceManager::expand_lock());
 976   _reserved_words = _reserved_words - v;
 977 }
 978 






 979 void VirtualSpaceList::inc_committed_words(size_t v) {
 980   assert_lock_strong(SpaceManager::expand_lock());
 981   _committed_words = _committed_words + v;


 982 }
 983 void VirtualSpaceList::dec_committed_words(size_t v) {
 984   assert_lock_strong(SpaceManager::expand_lock());
 985   _committed_words = _committed_words - v;


 986 }
 987 
 988 void VirtualSpaceList::inc_virtual_space_count() {
 989   assert_lock_strong(SpaceManager::expand_lock());
 990   _virtual_space_count++;
 991 }
 992 void VirtualSpaceList::dec_virtual_space_count() {
 993   assert_lock_strong(SpaceManager::expand_lock());
 994   _virtual_space_count--;
 995 }
 996 
 997 void ChunkManager::remove_chunk(Metachunk* chunk) {
 998   size_t word_size = chunk->word_size();
 999   ChunkIndex index = list_index(word_size);
1000   if (index != HumongousIndex) {
1001     free_chunks(index)->remove_chunk(chunk);
1002   } else {
1003     humongous_dictionary()->remove_chunk(chunk);
1004   }
1005 


1008 }
1009 
1010 // Walk the list of VirtualSpaceNodes and delete
1011 // nodes with a 0 container_count.  Remove Metachunks in
1012 // the node from their respective freelists.
1013 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1014   assert_lock_strong(SpaceManager::expand_lock());
1015   // Don't use a VirtualSpaceListIterator because this
1016   // list is being changed and a straightforward use of an iterator is not safe.
1017   VirtualSpaceNode* purged_vsl = NULL;
1018   VirtualSpaceNode* prev_vsl = virtual_space_list();
1019   VirtualSpaceNode* next_vsl = prev_vsl;
1020   while (next_vsl != NULL) {
1021     VirtualSpaceNode* vsl = next_vsl;
1022     next_vsl = vsl->next();
1023     // Don't free the current virtual space since it will likely
1024     // be needed soon.
1025     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1026       // Unlink it from the list
1027       if (prev_vsl == vsl) {
1028         // This is the case of the current note being the first note.
1029         assert(vsl == virtual_space_list(), "Expected to be the first note");
1030         set_virtual_space_list(vsl->next());
1031       } else {
1032         prev_vsl->set_next(vsl->next());
1033       }
1034 
1035       vsl->purge(chunk_manager);
1036       dec_reserved_words(vsl->reserved_words());
1037       dec_committed_words(vsl->committed_words());
1038       dec_virtual_space_count();
1039       purged_vsl = vsl;
1040       delete vsl;
1041     } else {
1042       prev_vsl = vsl;
1043     }
1044   }
1045 #ifdef ASSERT
1046   if (purged_vsl != NULL) {
1047   // List should be stable enough to use an iterator here.
1048   VirtualSpaceListIterator iter(virtual_space_list());
1049     while (iter.repeat()) {
1050       VirtualSpaceNode* vsl = iter.get_next();
1051       assert(vsl != purged_vsl, "Purge of vsl failed");
1052     }
1053   }
1054 #endif
1055 }
1056 
1057 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
1058                                    _is_class(false),
1059                                    _virtual_space_list(NULL),
1060                                    _current_virtual_space(NULL),
1061                                    _reserved_words(0),
1062                                    _committed_words(0),
1063                                    _virtual_space_count(0) {
1064   MutexLockerEx cl(SpaceManager::expand_lock(),
1065                    Mutex::_no_safepoint_check_flag);
1066   bool initialization_succeeded = grow_vs(word_size);
1067   assert(initialization_succeeded,
1068     " VirtualSpaceList initialization should not fail");
1069 }
1070 
1071 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1072                                    _is_class(true),
1073                                    _virtual_space_list(NULL),
1074                                    _current_virtual_space(NULL),
1075                                    _reserved_words(0),
1076                                    _committed_words(0),
1077                                    _virtual_space_count(0) {
1078   MutexLockerEx cl(SpaceManager::expand_lock(),
1079                    Mutex::_no_safepoint_check_flag);
1080   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1081   bool succeeded = class_entry->initialize();
1082   assert(succeeded, " VirtualSpaceList initialization should not fail");
1083   link_vs(class_entry);

1084 }
1085 
1086 size_t VirtualSpaceList::free_bytes() {
1087   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1088 }
1089 
1090 // Allocate another meta virtual space and add it to the list.
1091 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
1092   assert_lock_strong(SpaceManager::expand_lock());








1093   if (vs_word_size == 0) {

1094     return false;
1095   }

1096   // Reserve the space
1097   size_t vs_byte_size = vs_word_size * BytesPerWord;
1098   assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
1099 
1100   // Allocate the meta virtual space and initialize it.
1101   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1102   if (!new_entry->initialize()) {
1103     delete new_entry;
1104     return false;
1105   } else {
1106     assert(new_entry->reserved_words() == vs_word_size, "Must be");

1107     // ensure lock-free iteration sees fully initialized node
1108     OrderAccess::storestore();
1109     link_vs(new_entry);
1110     return true;
1111   }
1112 }
1113 
1114 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1115   if (virtual_space_list() == NULL) {
1116       set_virtual_space_list(new_entry);
1117   } else {
1118     current_virtual_space()->set_next(new_entry);
1119   }
1120   set_current_virtual_space(new_entry);
1121   inc_reserved_words(new_entry->reserved_words());
1122   inc_committed_words(new_entry->committed_words());
1123   inc_virtual_space_count();
1124 #ifdef ASSERT
1125   new_entry->mangle();
1126 #endif
1127   if (TraceMetavirtualspaceAllocation && Verbose) {
1128     VirtualSpaceNode* vsl = current_virtual_space();
1129     vsl->print_on(gclog_or_tty);
1130   }
1131 }
1132 
1133 bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {


1134   size_t before = node->committed_words();
1135 
1136   bool result = node->expand_by(word_size, pre_touch);
1137 
1138   size_t after = node->committed_words();
1139 
1140   // after and before can be the same if the memory was pre-committed.
1141   assert(after >= before, "Must be");
1142   inc_committed_words(after - before);
1143 
1144   return result;
1145 }
1146 













































1147 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1148                                            size_t grow_chunks_by_words,
1149                                            size_t medium_chunk_bunch) {
1150 
1151   // Allocate a chunk out of the current virtual space.
1152   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1153 
1154   if (next == NULL) {
1155     // Not enough room in current virtual space.  Try to commit
1156     // more space.
1157     size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
1158                                      grow_chunks_by_words);
1159     size_t page_size_words = os::vm_page_size() / BytesPerWord;
1160     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1161                                                         page_size_words);
1162     bool vs_expanded =
1163       expand_by(current_virtual_space(), aligned_expand_vs_by_words);
1164     if (!vs_expanded) {
1165       // Should the capacity of the metaspaces be expanded for
1166       // this allocation?  If it's the virtual space for classes and is
1167       // being used for CompressedHeaders, don't allocate a new virtualspace.
1168       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1169         // Get another virtual space.
1170         size_t allocation_aligned_expand_words =
1171             align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
1172         size_t grow_vs_words =
1173             MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
1174         if (grow_vs(grow_vs_words)) {
1175           // Got it.  It's on the list now.  Get a chunk from it.
1176           assert(current_virtual_space()->expanded_words() == 0,
1177               "New virtual space nodes should not have expanded");
1178 
1179           size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
1180                                                               page_size_words);
1181           // We probably want to expand by aligned_expand_vs_by_words here.
1182           expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
1183           next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1184         }
1185       } else {
1186         // Allocation will fail and induce a GC
1187         if (TraceMetadataChunkAllocation && Verbose) {
1188           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1189             " Fail instead of expand the metaspace");
1190         }









1191       }
1192     } else {
1193       // The virtual space expanded, get a new chunk

1194       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1195       assert(next != NULL, "Just expanded, should succeed");
1196     }
1197   }
1198 
1199   assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1200          "New chunk is still on some list");
1201   return next;
1202 }
1203 
1204 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1205                                                       size_t chunk_bunch) {
1206   // Get a chunk from the chunk freelist
1207   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1208                                        chunk_word_size,
1209                                        chunk_bunch);
1210   return new_chunk;
1211 }
1212 
1213 void VirtualSpaceList::print_on(outputStream* st) const {
1214   if (TraceMetadataChunkAllocation && Verbose) {
1215     VirtualSpaceListIterator iter(virtual_space_list());
1216     while (iter.repeat()) {
1217       VirtualSpaceNode* node = iter.get_next();
1218       node->print_on(st);
1219     }
1220   }


1239 // Within the VM operation after the GC the attempt to allocate the metadata
1240 // should succeed.  If the GC did not free enough space for the metaspace
1241 // allocation, the HWM is increased so that another virtualspace will be
1242 // allocated for the metadata.  With perm gen the increase in the perm
1243 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1244 // metaspace policy uses those as the small and large steps for the HWM.
1245 //
1246 // After the GC the compute_new_size() for MetaspaceGC is called to
1247 // resize the capacity of the metaspaces.  The current implementation
1248 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1249 // to resize the Java heap by some GC's.  New flags can be implemented
1250 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1251 // free space is desirable in the metaspace capacity to decide how much
1252 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1253 // free space is desirable in the metaspace capacity before decreasing
1254 // the HWM.
1255 
1256 // Calculate the amount to increase the high water mark (HWM).
1257 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1258 // another expansion is not requested too soon.  If that is not
1259 // enough to satisfy the allocation (i.e. big enough for a word_size
1260 // allocation), increase by MaxMetaspaceExpansion.  If that is still
1261 // not enough, expand by the size of the allocation (word_size) plus
1262 // some.
1263 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1264   size_t before_inc = MetaspaceGC::capacity_until_GC();
1265   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1266   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1267   size_t page_size_words = os::vm_page_size() / BytesPerWord;
1268   size_t size_delta_words = align_size_up(word_size, page_size_words);
1269   size_t delta_words = MAX2(size_delta_words, min_delta_words);
1270   if (delta_words > min_delta_words) {
1271     // Don't want to hit the high water mark on the next
1272     // allocation so make the delta greater than just enough
1273     // for this allocation.
1274     delta_words = MAX2(delta_words, max_delta_words);
1275     if (delta_words > max_delta_words) {
1276       // This allocation is large but the next ones are probably not
1277       // so increase by the minimum.
1278       delta_words = delta_words + min_delta_words;
1279     }
1280   }
1281   return delta_words;









1282 }
1283 
1284 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {




1285 
1286   // If the user wants a limit, impose one.
1287   // The reason for someone using this flag is to limit reserved space.  So
1288   // for non-class virtual space, compare against virtual spaces that are reserved.
1289   // For class virtual space, we only compare against the committed space, not
1290   // reserved space, because this is a larger space prereserved for compressed
1291   // class pointers.
1292   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1293     size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
1294     size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
1295     size_t real_allocated     = nonclass_allocated + class_allocated;
1296     if (real_allocated >= MaxMetaspaceSize) {
1297       return false;
1298     }
1299   }
1300 
1301   // Class virtual space should always be expanded.  Call GC for the other
1302   // metadata virtual space.
1303   if (Metaspace::using_class_space() &&
1304       (vsl == Metaspace::class_space_list())) return true;
1305 
1306   // If this is part of an allocation after a GC, expand
1307   // unconditionally.
1308   if (MetaspaceGC::expand_after_GC()) {
1309     return true;
1310   }
1311 
1312 
1313   // If the capacity is below the minimum capacity, allow the
1314   // expansion.  Also set the high-water-mark (capacity_until_GC)
1315   // to that minimum capacity so that a GC will not be induced
1316   // until that minimum capacity is exceeded.
1317   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1318   size_t metaspace_size_bytes = MetaspaceSize;
1319   if (committed_capacity_bytes < metaspace_size_bytes ||
1320       capacity_until_GC() == 0) {
1321     set_capacity_until_GC(metaspace_size_bytes);
1322     return true;
1323   } else {
1324     if (committed_capacity_bytes < capacity_until_GC()) {
1325       return true;
1326     } else {
1327       if (TraceMetadataChunkAllocation && Verbose) {
1328         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
1329                         "  capacity_until_GC " SIZE_FORMAT
1330                         "  allocated_capacity_bytes " SIZE_FORMAT,
1331                         word_size,
1332                         capacity_until_GC(),
1333                         MetaspaceAux::allocated_capacity_bytes());
1334       }
1335       return false;
1336     }





1337   }
1338 }
1339 


1340 


1341 
1342 void MetaspaceGC::compute_new_size() {
1343   assert(_shrink_factor <= 100, "invalid shrink factor");
1344   uint current_shrink_factor = _shrink_factor;
1345   _shrink_factor = 0;
1346 
1347   // Until a faster way of calculating the "used" quantity is implemented,
1348   // use "capacity".
1349   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1350   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1351 
1352   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1353   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1354 
1355   const double min_tmp = used_after_gc / maximum_used_percentage;
1356   size_t minimum_desired_capacity =
1357     (size_t)MIN2(min_tmp, double(max_uintx));
1358   // Don't shrink less than the initial generation size
1359   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1360                                   MetaspaceSize);
1361 
1362   if (PrintGCDetails && Verbose) {
1363     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1364     gclog_or_tty->print_cr("  "
1365                   "  minimum_free_percentage: %6.2f"
1366                   "  maximum_used_percentage: %6.2f",
1367                   minimum_free_percentage,
1368                   maximum_used_percentage);
1369     gclog_or_tty->print_cr("  "
1370                   "   used_after_gc       : %6.1fKB",
1371                   used_after_gc / (double) K);
1372   }
1373 
1374 
1375   size_t shrink_bytes = 0;
1376   if (capacity_until_GC < minimum_desired_capacity) {
1377     // If we have less capacity below the metaspace HWM, then
1378     // increment the HWM.
1379     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;

1380     // Don't expand unless it's significant
1381     if (expand_bytes >= MinMetaspaceExpansion) {
1382       MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1383     }
1384     if (PrintGCDetails && Verbose) {
1385       size_t new_capacity_until_GC = capacity_until_GC;
1386       gclog_or_tty->print_cr("    expanding:"
1387                     "  minimum_desired_capacity: %6.1fKB"
1388                     "  expand_bytes: %6.1fKB"
1389                     "  MinMetaspaceExpansion: %6.1fKB"
1390                     "  new metaspace HWM:  %6.1fKB",
1391                     minimum_desired_capacity / (double) K,
1392                     expand_bytes / (double) K,
1393                     MinMetaspaceExpansion / (double) K,
1394                     new_capacity_until_GC / (double) K);
1395     }
1396     return;
1397   }
1398 
1399   // No expansion, now see if we want to shrink
1400   // We would never want to shrink more than this
1401   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1402   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,


1419       gclog_or_tty->print_cr("  "
1420                              "  minimum_desired_capacity: %6.1fKB"
1421                              "  maximum_desired_capacity: %6.1fKB",
1422                              minimum_desired_capacity / (double) K,
1423                              maximum_desired_capacity / (double) K);
1424     }
1425 
1426     assert(minimum_desired_capacity <= maximum_desired_capacity,
1427            "sanity check");
1428 
1429     if (capacity_until_GC > maximum_desired_capacity) {
1430       // Capacity too large, compute shrinking size
1431       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1432       // We don't want shrink all the way back to initSize if people call
1433       // System.gc(), because some programs do that between "phases" and then
1434       // we'd just have to grow the heap up again for the next phase.  So we
1435       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1436       // on the third call, and 100% by the fourth call.  But if we recompute
1437       // size without shrinking, it goes back to 0%.
1438       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;



1439       assert(shrink_bytes <= max_shrink_bytes,
1440         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1441           shrink_bytes, max_shrink_bytes));
1442       if (current_shrink_factor == 0) {
1443         _shrink_factor = 10;
1444       } else {
1445         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1446       }
1447       if (PrintGCDetails && Verbose) {
1448         gclog_or_tty->print_cr("  "
1449                       "  shrinking:"
1450                       "  initSize: %.1fK"
1451                       "  maximum_desired_capacity: %.1fK",
1452                       MetaspaceSize / (double) K,
1453                       maximum_desired_capacity / (double) K);
1454         gclog_or_tty->print_cr("  "
1455                       "  shrink_bytes: %.1fK"
1456                       "  current_shrink_factor: %d"
1457                       "  new shrink factor: %d"
1458                       "  MinMetaspaceExpansion: %.1fK",
1459                       shrink_bytes / (double) K,
1460                       current_shrink_factor,
1461                       _shrink_factor,
1462                       MinMetaspaceExpansion / (double) K);
1463       }
1464     }
1465   }
1466 
1467   // Don't shrink unless it's significant
1468   if (shrink_bytes >= MinMetaspaceExpansion &&
1469       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1470     MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1471   }
1472 }
1473 
1474 // Metadebug methods
1475 
1476 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1477                                        size_t chunk_word_size){
1478 #ifdef ASSERT
1479   VirtualSpaceList* vsl = sm->vs_list();
1480   if (MetaDataDeallocateALot &&
1481       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1482     Metadebug::reset_deallocate_chunk_a_lot_count();
1483     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1484       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1485       if (dummy_chunk == NULL) {
1486         break;
1487       }
1488       sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1489 
1490       if (TraceMetadataChunkAllocation && Verbose) {


1683   slow_locked_verify();
1684   if (TraceMetadataChunkAllocation) {
1685     gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1686                            PTR_FORMAT "  size " SIZE_FORMAT,
1687                            chunk, chunk->word_size());
1688   }
1689   free_chunks_put(chunk);
1690 }
1691 
1692 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1693   assert_lock_strong(SpaceManager::expand_lock());
1694 
1695   slow_locked_verify();
1696 
1697   Metachunk* chunk = NULL;
1698   if (list_index(word_size) != HumongousIndex) {
1699     ChunkList* free_list = find_free_chunks_list(word_size);
1700     assert(free_list != NULL, "Sanity check");
1701 
1702     chunk = free_list->head();
1703     debug_only(Metachunk* debug_head = chunk;)
1704 
1705     if (chunk == NULL) {
1706       return NULL;
1707     }
1708 
1709     // Remove the chunk as the head of the list.
1710     free_list->remove_chunk(chunk);
1711 
1712     // Chunk is being removed from the chunks free list.
1713     dec_free_chunks_total(chunk->capacity_word_size());
1714 
1715     if (TraceMetadataChunkAllocation && Verbose) {
1716       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1717                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1718                              free_list, chunk, chunk->word_size());
1719     }
1720   } else {
1721     chunk = humongous_dictionary()->get_chunk(
1722       word_size,
1723       FreeBlockDictionary<Metachunk>::atLeast);
1724 
1725     if (chunk != NULL) {



1726       if (TraceMetadataHumongousAllocation) {
1727         size_t waste = chunk->word_size() - word_size;
1728         gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1729                                SIZE_FORMAT " for requested size " SIZE_FORMAT
1730                                " waste " SIZE_FORMAT,
1731                                chunk->word_size(), word_size, waste);
1732       }


1733       // Chunk is being removed from the chunks free list.
1734       dec_free_chunks_total(chunk->capacity_word_size());
1735     } else {
1736       return NULL;
1737     }
1738   }
1739 
1740   // Remove it from the links to this freelist
1741   chunk->set_next(NULL);
1742   chunk->set_prev(NULL);
1743 #ifdef ASSERT
1744   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1745   // work.
1746   chunk->set_is_free(false);
1747 #endif
1748   chunk->container()->inc_container_count();
1749 
1750   slow_locked_verify();
1751   return chunk;
1752 }
1753 
1754 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1755   assert_lock_strong(SpaceManager::expand_lock());
1756   slow_locked_verify();
1757 
1758   // Take from the beginning of the list


1985          "Don't need to expand");
1986   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1987 
1988   if (TraceMetadataChunkAllocation && Verbose) {
1989     size_t words_left = 0;
1990     size_t words_used = 0;
1991     if (current_chunk() != NULL) {
1992       words_left = current_chunk()->free_word_size();
1993       words_used = current_chunk()->used_word_size();
1994     }
1995     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1996                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
1997                            " words left",
1998                             word_size, words_used, words_left);
1999   }
2000 
2001   // Get another chunk out of the virtual space
2002   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2003   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2004 






2005   // If a chunk was available, add it to the in-use chunk list
2006   // and do an allocation from it.
2007   if (next != NULL) {
2008     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2009     // Add to this manager's list of chunks in use.
2010     add_chunk(next, false);
2011     return next->allocate(word_size);
2012   }
2013   return NULL;

2014 }
2015 
2016 void SpaceManager::print_on(outputStream* st) const {
2017 
2018   for (ChunkIndex i = ZeroIndex;
2019        i < NumberOfInUseLists ;
2020        i = next_chunk_index(i) ) {
2021     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2022                  chunks_in_use(i),
2023                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2024   }
2025   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2026                " Humongous " SIZE_FORMAT,
2027                sum_waste_in_chunks_in_use(SmallIndex),
2028                sum_waste_in_chunks_in_use(MediumIndex),
2029                sum_waste_in_chunks_in_use(HumongousIndex));
2030   // block free lists
2031   if (block_freelists() != NULL) {
2032     st->print_cr("total in block free lists " SIZE_FORMAT,
2033       block_freelists()->total_size());


2349 // Returns the address of spaced allocated for "word_size".
2350 // This methods does not know about blocks (Metablocks)
2351 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2352   assert_lock_strong(_lock);
2353 #ifdef ASSERT
2354   if (Metadebug::test_metadata_failure()) {
2355     return NULL;
2356   }
2357 #endif
2358   // Is there space in the current chunk?
2359   MetaWord* result = NULL;
2360 
2361   // For DumpSharedSpaces, only allocate out of the current chunk which is
2362   // never null because we gave it the size we wanted.   Caller reports out
2363   // of memory if this returns null.
2364   if (DumpSharedSpaces) {
2365     assert(current_chunk() != NULL, "should never happen");
2366     inc_used_metrics(word_size);
2367     return current_chunk()->allocate(word_size); // caller handles null result
2368   }

2369   if (current_chunk() != NULL) {
2370     result = current_chunk()->allocate(word_size);
2371   }
2372 
2373   if (result == NULL) {
2374     result = grow_and_allocate(word_size);
2375   }
2376   if (result != 0) {

2377     inc_used_metrics(word_size);
2378     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2379            "Head of the list is being allocated");
2380   }
2381 
2382   return result;
2383 }
2384 
2385 void SpaceManager::verify() {
2386   // If there are blocks in the dictionary, then
2387   // verfication of chunks does not work since
2388   // being in the dictionary alters a chunk.
2389   if (block_freelists()->total_size() == 0) {
2390     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2391       Metachunk* curr = chunks_in_use(i);
2392       while (curr != NULL) {
2393         curr->verify();
2394         verify_chunk_size(curr);
2395         curr = curr->next();
2396       }


2622                         "("  SIZE_FORMAT ")",
2623                         prev_metadata_used,
2624                         allocated_used_bytes(),
2625                         reserved_bytes());
2626   } else {
2627     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2628                         "->" SIZE_FORMAT "K"
2629                         "("  SIZE_FORMAT "K)",
2630                         prev_metadata_used/K,
2631                         allocated_used_bytes()/K,
2632                         reserved_bytes()/K);
2633   }
2634 
2635   gclog_or_tty->print("]");
2636 }
2637 
2638 // This is printed when PrintGCDetails
2639 void MetaspaceAux::print_on(outputStream* out) {
2640   Metaspace::MetadataType nct = Metaspace::NonClassType;
2641 
2642   out->print_cr(" Metaspace total "
2643                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2644                 " reserved " SIZE_FORMAT "K",
2645                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
2646 
2647   out->print_cr("  data space     "
2648                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2649                 " reserved " SIZE_FORMAT "K",
2650                 allocated_capacity_bytes(nct)/K,
2651                 allocated_used_bytes(nct)/K,
2652                 reserved_bytes(nct)/K);
2653   if (Metaspace::using_class_space()) {
2654     Metaspace::MetadataType ct = Metaspace::ClassType;
2655     out->print_cr("  class space    "
2656                   SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2657                   " reserved " SIZE_FORMAT "K",
2658                   allocated_capacity_bytes(ct)/K,

2659                   allocated_used_bytes(ct)/K,


2660                   reserved_bytes(ct)/K);
2661   }
2662 }
2663 
2664 // Print information for class space and data space separately.
2665 // This is almost the same as above.
2666 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2667   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2668   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2669   size_t used_bytes = used_bytes_slow(mdtype);
2670   size_t free_bytes = free_bytes_slow(mdtype);
2671   size_t used_and_free = used_bytes + free_bytes +
2672                            free_chunks_capacity_bytes;
2673   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2674              "K + unused in chunks " SIZE_FORMAT "K  + "
2675              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2676              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2677              used_bytes / K,
2678              free_bytes / K,
2679              free_chunks_capacity_bytes / K,


2791     size_t used_in_use_bytes = used_bytes_slow(i);
2792     assert(allocated_used_bytes(i) == used_in_use_bytes,
2793       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2794               " used_bytes_slow(%u)" SIZE_FORMAT,
2795               i, allocated_used_bytes(i), i, used_in_use_bytes));
2796   }
2797 #endif
2798 }
2799 
2800 void MetaspaceAux::verify_metrics() {
2801   verify_capacity();
2802   verify_used();
2803 }
2804 
2805 
2806 // Metaspace methods
2807 
2808 size_t Metaspace::_first_chunk_word_size = 0;
2809 size_t Metaspace::_first_class_chunk_word_size = 0;
2810 



2811 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2812   initialize(lock, type);
2813 }
2814 
2815 Metaspace::~Metaspace() {
2816   delete _vsm;
2817   if (using_class_space()) {
2818     delete _class_vsm;
2819   }
2820 }
2821 
2822 VirtualSpaceList* Metaspace::_space_list = NULL;
2823 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2824 
2825 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2826 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2827 
2828 #define VIRTUALSPACEMULTIPLIER 2
2829 
2830 #ifdef _LP64


2852   }
2853 }
2854 
2855 // Return TRUE if the specified metaspace_base and cds_base are close enough
2856 // to work with compressed klass pointers.
2857 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2858   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2859   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2860   address lower_base = MIN2((address)metaspace_base, cds_base);
2861   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2862                                 (address)(metaspace_base + class_metaspace_size()));
2863   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2864 }
2865 
2866 // Try to allocate the metaspace at the requested addr.
2867 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2868   assert(using_class_space(), "called improperly");
2869   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2870   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2871          "Metaspace size is too big");






2872 
2873   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2874                                              os::vm_allocation_granularity(),
2875                                              false, requested_addr, 0);

2876   if (!metaspace_rs.is_reserved()) {
2877     if (UseSharedSpaces) {


2878       // Keep trying to allocate the metaspace, increasing the requested_addr
2879       // by 1GB each time, until we reach an address that will no longer allow
2880       // use of CDS with compressed klass pointers.
2881       char *addr = requested_addr;
2882       while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
2883              can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
2884         addr = addr + 1*G;
2885         metaspace_rs = ReservedSpace(class_metaspace_size(),
2886                                      os::vm_allocation_granularity(), false, addr, 0);
2887       }
2888     }
2889 
2890     // If no successful allocation then try to allocate the space anywhere.  If
2891     // that fails then OOM doom.  At this point we cannot try allocating the
2892     // metaspace as if UseCompressedClassPointers is off because too much
2893     // initialization has happened that depends on UseCompressedClassPointers.
2894     // So, UseCompressedClassPointers cannot be turned off at this point.
2895     if (!metaspace_rs.is_reserved()) {
2896       metaspace_rs = ReservedSpace(class_metaspace_size(),
2897                                    os::vm_allocation_granularity(), false);
2898       if (!metaspace_rs.is_reserved()) {
2899         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2900                                               class_metaspace_size()));
2901       }
2902     }
2903   }
2904 
2905   // If we got here then the metaspace got allocated.
2906   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
2907 
2908   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
2909   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
2910     FileMapInfo::stop_sharing_and_unmap(
2911         "Could not allocate metaspace at a compatible address");
2912   }
2913 
2914   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
2915                                   UseSharedSpaces ? (address)cds_base : 0);
2916 
2917   initialize_class_space(metaspace_rs);
2918 
2919   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2920     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2921                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2922     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2923                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
2924   }
2925 }
2926 
2927 // For UseCompressedClassPointers the class space is reserved above the top of
2928 // the Java heap.  The argument passed in is at the base of the compressed space.
2929 void Metaspace::initialize_class_space(ReservedSpace rs) {
2930   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2931   assert(rs.size() >= CompressedClassSpaceSize,
2932          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2933   assert(using_class_space(), "Must be using class space");
2934   _class_space_list = new VirtualSpaceList(rs);
2935   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);




2936 }
2937 
2938 #endif
2939 
















































2940 void Metaspace::global_initialize() {
2941   // Initialize the alignment for shared spaces.
2942   int max_alignment = os::vm_page_size();
2943   size_t cds_total = 0;
2944 
2945   set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
2946                                          os::vm_allocation_granularity()));
2947 
2948   MetaspaceShared::set_max_alignment(max_alignment);
2949 
2950   if (DumpSharedSpaces) {
2951     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2952     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2953     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
2954     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
2955 
2956     // Initialize with the sum of the shared space sizes.  The read-only
2957     // and read write metaspace chunks will be allocated out of this and the
2958     // remainder is the misc code and data chunks.
2959     cds_total = FileMapInfo::shared_spaces_size();

2960     _space_list = new VirtualSpaceList(cds_total/wordSize);
2961     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
2962 




2963 #ifdef _LP64








2964     // Set the compressed klass pointer base so that decoding of these pointers works
2965     // properly when creating the shared archive.
2966     assert(UseCompressedOops && UseCompressedClassPointers,
2967       "UseCompressedOops and UseCompressedClassPointers must be set");
2968     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
2969     if (TraceMetavirtualspaceAllocation && Verbose) {
2970       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
2971                              _space_list->current_virtual_space()->bottom());
2972     }
2973 
2974     // Set the shift to zero.
2975     assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
2976            "CDS region is too large");
2977     Universe::set_narrow_klass_shift(0);
2978 #endif
2979 
2980   } else {
2981     // If using shared space, open the file that contains the shared space
2982     // and map in the memory before initializing the rest of metaspace (so
2983     // the addresses don't conflict)
2984     address cds_address = NULL;
2985     if (UseSharedSpaces) {
2986       FileMapInfo* mapinfo = new FileMapInfo();
2987       memset(mapinfo, 0, sizeof(FileMapInfo));
2988 
2989       // Open the shared archive file, read and validate the header. If
2990       // initialization fails, shared spaces [UseSharedSpaces] are
2991       // disabled and the file is closed.
2992       // Map in spaces now also
2993       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2994         FileMapInfo::set_current_info(mapinfo);


2995       } else {
2996         assert(!mapinfo->is_open() && !UseSharedSpaces,
2997                "archive file not closed or shared spaces not disabled.");
2998       }
2999       cds_total = FileMapInfo::shared_spaces_size();
3000       cds_address = (address)mapinfo->region_base(0);
3001     }
3002 
3003 #ifdef _LP64
3004     // If UseCompressedClassPointers is set then allocate the metaspace area
3005     // above the heap and above the CDS area (if it exists).
3006     if (using_class_space()) {
3007       if (UseSharedSpaces) {
3008         allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);


3009       } else {
3010         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
3011       }
3012     }
3013 #endif
3014 
3015     // Initialize these before initializing the VirtualSpaceList
3016     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3017     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3018     // Make the first class chunk bigger than a medium chunk so it's not put
3019     // on the medium chunk list.   The next chunk will be small and progress
3020     // from there.  This size calculated by -version.
3021     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3022                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3023     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3024     // Arbitrarily set the initial virtual space to a multiple
3025     // of the boot class loader size.
3026     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();


3027     // Initialize the list of virtual spaces.
3028     _space_list = new VirtualSpaceList(word_size);
3029     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);



3030   }



3031 }
3032 
3033 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3034                                                size_t chunk_word_size,
3035                                                size_t chunk_bunch) {
3036   // Get a chunk from the chunk freelist
3037   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3038   if (chunk != NULL) {
3039     return chunk;
3040   }
3041 
3042   return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
3043 }
3044 
3045 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3046 
3047   assert(space_list() != NULL,
3048     "Metadata VirtualSpaceList has not been initialized");
3049   assert(chunk_manager_metadata() != NULL,
3050     "Metadata ChunkManager has not been initialized");


3095   _alloc_record_head = NULL;
3096   _alloc_record_tail = NULL;
3097 }
3098 
3099 size_t Metaspace::align_word_size_up(size_t word_size) {
3100   size_t byte_size = word_size * wordSize;
3101   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3102 }
3103 
3104 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3105   // DumpSharedSpaces doesn't use class metadata area (yet)
3106   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3107   if (is_class_space_allocation(mdtype)) {
3108     return  class_vsm()->allocate(word_size);
3109   } else {
3110     return  vsm()->allocate(word_size);
3111   }
3112 }
3113 
3114 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3115   MetaWord* result;
3116   MetaspaceGC::set_expand_after_GC(true);
3117   size_t before_inc = MetaspaceGC::capacity_until_GC();
3118   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
3119   MetaspaceGC::inc_capacity_until_GC(delta_bytes);

3120   if (PrintGCDetails && Verbose) {
3121     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3122       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
3123   }
3124 
3125   result = allocate(word_size, mdtype);
3126 
3127   return result;
3128 }
3129 
3130 // Space allocated in the Metaspace.  This may
3131 // be across several metadata virtual spaces.
3132 char* Metaspace::bottom() const {
3133   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3134   return (char*)vsm()->current_chunk()->bottom();
3135 }
3136 
3137 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3138   if (mdtype == ClassType) {
3139     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3140   } else {
3141     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3142   }
3143 }
3144 
3145 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3146   if (mdtype == ClassType) {
3147     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;


3189       vsm()->deallocate(ptr, word_size);
3190     }
3191   } else {
3192     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3193 
3194     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3195       // Dark matter.  Too small for dictionary.
3196 #ifdef ASSERT
3197       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3198 #endif
3199       return;
3200     }
3201     if (is_class && using_class_space()) {
3202       class_vsm()->deallocate(ptr, word_size);
3203     } else {
3204       vsm()->deallocate(ptr, word_size);
3205     }
3206   }
3207 }
3208 

3209 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3210                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3211   if (HAS_PENDING_EXCEPTION) {
3212     assert(false, "Should not allocate with exception pending");
3213     return NULL;  // caller does a CHECK_NULL too
3214   }
3215 
3216   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3217 
3218   // SSS: Should we align the allocations and make sure the sizes are aligned.
3219   MetaWord* result = NULL;
3220 
3221   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3222         "ClassLoaderData::the_null_class_loader_data() should have been used.");

3223   // Allocate in metaspaces without taking out a lock, because it deadlocks
3224   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3225   // to revisit this for application class data sharing.
3226   if (DumpSharedSpaces) {
3227     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3228     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3229     result = space->allocate(word_size, NonClassType);
3230     if (result == NULL) {
3231       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3232     } else {
3233       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3234     }
3235     return Metablock::initialize(result, word_size);
3236   }
3237 
3238   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);



3239 
3240   if (result == NULL) {




3241     // Try to clean out some memory and retry.
3242     result =
3243       Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3244         loader_data, word_size, mdtype);


3245 
3246     // If result is still null, we are out of memory.
3247     if (result == NULL) {










3248       if (Verbose && TraceMetadataChunkAllocation) {
3249         gclog_or_tty->print_cr("Metaspace allocation failed for size "
3250           SIZE_FORMAT, word_size);
3251         if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);


3252         MetaspaceAux::dump(gclog_or_tty);
3253       }

3254       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3255       const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
3256                                                                      "Metadata space";
3257       report_java_out_of_memory(space_string);
3258 
3259       if (JvmtiExport::should_post_resource_exhausted()) {
3260         JvmtiExport::post_resource_exhausted(
3261             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3262             space_string);
3263       }





3264       if (is_class_space_allocation(mdtype)) {
3265         THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
3266       } else {
3267         THROW_OOP_0(Universe::out_of_memory_error_metaspace());
3268       }
3269     }
3270   }
3271   return Metablock::initialize(result, word_size);
3272 }
3273 
3274 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3275   assert(DumpSharedSpaces, "sanity");
3276 
3277   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3278   if (_alloc_record_head == NULL) {
3279     _alloc_record_head = _alloc_record_tail = rec;
3280   } else {
3281     _alloc_record_tail->_next = rec;
3282     _alloc_record_tail = rec;
3283   }
3284 }
3285 
3286 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3287   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3288 
3289   address last_addr = (address)bottom();
3290 
3291   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/binaryTreeDictionary.hpp"
  28 #include "memory/freeList.hpp"
  29 #include "memory/collectorPolicy.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/gcLocker.hpp"
  33 #include "memory/metablock.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceShared.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "runtime/atomic.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/mutex.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "services/memTracker.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  51 // Define this macro to enable slow integrity checking of
  52 // the free chunk lists
  53 const bool metaspace_slow_verify = false;
  54 
  55 // Parameters for stress mode testing
  56 const uint metadata_deallocate_a_lot_block = 10;
  57 const uint metadata_deallocate_a_lock_chunk = 3;
  58 size_t const allocation_from_dictionary_limit = 4 * K;
  59 
  60 MetaWord* last_allocated = 0;
  61 


  70   HumongousIndex = MediumIndex + 1,
  71   NumberOfFreeLists = 3,
  72   NumberOfInUseLists = 4
  73 };
  74 
  75 enum ChunkSizes {    // in words.
  76   ClassSpecializedChunk = 128,
  77   SpecializedChunk = 128,
  78   ClassSmallChunk = 256,
  79   SmallChunk = 512,
  80   ClassMediumChunk = 4 * K,
  81   MediumChunk = 8 * K,
  82   HumongousChunkGranularity = 8
  83 };
  84 
  85 static ChunkIndex next_chunk_index(ChunkIndex i) {
  86   assert(i < NumberOfInUseLists, "Out of bound");
  87   return (ChunkIndex) (i+1);
  88 }
  89 
  90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;






  91 uint MetaspaceGC::_shrink_factor = 0;
  92 bool MetaspaceGC::_should_concurrent_collect = false;
  93 
  94 // Blocks of space for metadata are allocated out of Metachunks.
  95 //
  96 // Metachunk are allocated out of MetadataVirtualspaces and once
  97 // allocated there is no explicit link between a Metachunk and
  98 // the MetadataVirtualspaces from which it was allocated.
  99 //
 100 // Each SpaceManager maintains a
 101 // list of the chunks it is using and the current chunk.  The current
 102 // chunk is the chunk from which allocations are done.  Space freed in
 103 // a chunk is placed on the free list of blocks (BlockFreelist) and
 104 // reused from there.
 105 
 106 typedef class FreeList<Metachunk> ChunkList;
 107 
 108 // Manages the global free lists of chunks.
 109 // Has three lists of free chunks, and a total size and
 110 // count that includes all three


 273 
 274   // Convenience functions to access the _virtual_space
 275   char* low()  const { return virtual_space()->low(); }
 276   char* high() const { return virtual_space()->high(); }
 277 
 278   // The first Metachunk will be allocated at the bottom of the
 279   // VirtualSpace
 280   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 281 
 282  public:
 283 
 284   VirtualSpaceNode(size_t byte_size);
 285   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 286   ~VirtualSpaceNode();
 287 
 288   // Convenience functions for logical bottom and end
 289   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 290   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 291 
 292   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }

 293   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 294 
 295   bool is_pre_committed() const { return _virtual_space.special(); }
 296 
 297   // address of next available space in _virtual_space;
 298   // Accessors
 299   VirtualSpaceNode* next() { return _next; }
 300   void set_next(VirtualSpaceNode* v) { _next = v; }
 301 
 302   void set_reserved(MemRegion const v) { _reserved = v; }
 303   void set_top(MetaWord* v) { _top = v; }
 304 
 305   // Accessors
 306   MemRegion* reserved() { return &_reserved; }
 307   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 308 
 309   // Returns true if "word_size" is available in the VirtualSpace
 310   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 311 
 312   MetaWord* top() const { return _top; }
 313   void inc_top(size_t word_size) { _top += word_size; }
 314 
 315   uintx container_count() { return _container_count; }
 316   void inc_container_count();


 318 #ifdef ASSERT
 319   uint container_count_slow();
 320   void verify_container_count();
 321 #endif
 322 
 323   // used and capacity in this single entry in the list
 324   size_t used_words_in_vs() const;
 325   size_t capacity_words_in_vs() const;
 326   size_t free_words_in_vs() const;
 327 
 328   bool initialize();
 329 
 330   // get space from the virtual space
 331   Metachunk* take_from_committed(size_t chunk_word_size);
 332 
 333   // Allocate a chunk from the virtual space and return it.
 334   Metachunk* get_chunk_vs(size_t chunk_word_size);
 335 
 336   // Expands/shrinks the committed space in a virtual space.  Delegates
 337   // to Virtualspace
 338   bool expand_by(size_t min_words, size_t preferred_words);
 339 
 340   // In preparation for deleting this node, remove all the chunks
 341   // in the node from any freelist.
 342   void purge(ChunkManager* chunk_manager);
 343 
 344 #ifdef ASSERT
 345   // Debug support
 346   void mangle();
 347 #endif
 348 
 349   void print_on(outputStream* st) const;
 350 };
 351 
 352 #define assert_is_ptr_aligned(ptr, alignment) \
 353   assert(is_ptr_aligned(ptr, alignment),      \
 354     err_msg(PTR_FORMAT " is not aligned to "  \
 355       SIZE_FORMAT, ptr, alignment))
 356 
 357 #define assert_is_size_aligned(size, alignment) \
 358   assert(is_size_aligned(size, alignment),      \
 359     err_msg(SIZE_FORMAT " is not aligned to "   \
 360        SIZE_FORMAT, size, alignment))
 361 
 362 
 363 static bool should_reserve_large_pages(size_t bytes) {
 364   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 365     size_t words = bytes / BytesPerWord;
 366     bool is_class = false; // We never reserve large pages for the class space.
 367     if (MetaspaceGC::can_expand(words, is_class) &&
 368         MetaspaceGC::allowed_expansion() >= words) {
 369       return true;
 370     }
 371   }
 372 
 373   return false;
 374 }
 375 
 376   // byte_size is the size of the associated virtualspace.
 377 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 378   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());

 379 
 380   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 381   // configurable address, generally at the top of the Java heap so other
 382   // memory addresses don't conflict.
 383   if (DumpSharedSpaces) {
 384     bool large_pages = false; // No large pages when dumping the CDS archive.
 385     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 386 
 387     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 388     if (_rs.is_reserved()) {
 389       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 390     } else {
 391       // Get a mmap region anywhere if the SharedBaseAddress fails.
 392       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 393     }
 394     MetaspaceShared::set_shared_rs(&_rs);
 395   } else {
 396     // Decide if large pages should be commmitted when the memory is reserved.
 397     bool large_pages = should_reserve_large_pages(bytes);
 398 
 399     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 400   }
 401 
 402   if (_rs.is_reserved()) {
 403     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 404     assert(_rs.size() != 0, "Catch if we get a 0 size");
 405     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 406     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 407 
 408     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 409   }
 410 }
 411 
 412 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 413   Metachunk* chunk = first_chunk();
 414   Metachunk* invalid_chunk = (Metachunk*) top();
 415   while (chunk < invalid_chunk ) {
 416     assert(chunk->is_free(), "Should be marked free");
 417       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 418       chunk_manager->remove_chunk(chunk);
 419       assert(chunk->next() == NULL &&
 420              chunk->prev() == NULL,
 421              "Was not removed from its list");
 422       chunk = (Metachunk*) next;
 423   }
 424 }
 425 
 426 #ifdef ASSERT
 427 uint VirtualSpaceNode::container_count_slow() {
 428   uint count = 0;
 429   Metachunk* chunk = first_chunk();
 430   Metachunk* invalid_chunk = (Metachunk*) top();
 431   while (chunk < invalid_chunk ) {
 432     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 433     // Don't count the chunks on the free lists.  Those are
 434     // still part of the VirtualSpaceNode but not currently
 435     // counted.
 436     if (!chunk->is_free()) {
 437       count++;
 438     }
 439     chunk = (Metachunk*) next;
 440   }
 441   return count;
 442 }
 443 #endif
 444 
 445 // List of VirtualSpaces for metadata allocation.


 446 class VirtualSpaceList : public CHeapObj<mtClass> {
 447   friend class VirtualSpaceNode;
 448 
 449   enum VirtualSpaceSizes {
 450     VirtualSpaceSize = 256 * K
 451   };
 452 

 453   // Head of the list
 454   VirtualSpaceNode* _virtual_space_list;
 455   // virtual space currently being used for allocations
 456   VirtualSpaceNode* _current_virtual_space;
 457 
 458   // Is this VirtualSpaceList used for the compressed class space

 459   bool _is_class;

 460 
 461   // Sum of reserved and committed memory in the virtual spaces
 462   size_t _reserved_words;
 463   size_t _committed_words;
 464 
 465   // Number of virtual spaces
 466   size_t _virtual_space_count;
 467 
 468   ~VirtualSpaceList();
 469 
 470   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
 471 
 472   void set_virtual_space_list(VirtualSpaceNode* v) {
 473     _virtual_space_list = v;
 474   }
 475   void set_current_virtual_space(VirtualSpaceNode* v) {
 476     _current_virtual_space = v;
 477   }
 478 
 479   void link_vs(VirtualSpaceNode* new_entry);
 480 
 481   // Get another virtual space and add it to the list.  This
 482   // is typically prompted by a failed attempt to allocate a chunk
 483   // and is typically followed by the allocation of a chunk.
 484   bool create_new_virtual_space(size_t vs_word_size);
 485 
 486  public:
 487   VirtualSpaceList(size_t word_size);
 488   VirtualSpaceList(ReservedSpace rs);
 489 
 490   size_t free_bytes();
 491 
 492   Metachunk* get_new_chunk(size_t word_size,
 493                            size_t grow_chunks_by_words,
 494                            size_t medium_chunk_bunch);
 495 
 496   bool expand_node_by(VirtualSpaceNode* node,
 497                       size_t min_words,
 498                       size_t preferred_words);
 499 
 500   bool expand_by(size_t min_words,
 501                  size_t preferred_words);
 502 
 503   // Get the first chunk for a Metaspace.  Used for
 504   // special cases such as the boot class loader, reflection
 505   // class loader and anonymous class loader.
 506   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
 507 
 508   VirtualSpaceNode* current_virtual_space() {
 509     return _current_virtual_space;
 510   }
 511 
 512   bool is_class() const { return _is_class; }
 513 
 514   bool initialization_succeeded() { return _virtual_space_list != NULL; }

 515 
 516   size_t reserved_words()  { return _reserved_words; }
 517   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
 518   size_t committed_words() { return _committed_words; }
 519   size_t committed_bytes() { return committed_words() * BytesPerWord; }
 520 
 521   void inc_reserved_words(size_t v);
 522   void dec_reserved_words(size_t v);
 523   void inc_committed_words(size_t v);
 524   void dec_committed_words(size_t v);
 525   void inc_virtual_space_count();
 526   void dec_virtual_space_count();
 527 
 528   // Unlink empty VirtualSpaceNodes and free it.
 529   void purge(ChunkManager* chunk_manager);
 530 
 531   bool contains(const void *ptr);
 532 
 533   void print_on(outputStream* st) const;
 534 


 884   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 885 }
 886 
 887 // Space committed in the VirtualSpace
 888 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 889   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 890 }
 891 
 892 size_t VirtualSpaceNode::free_words_in_vs() const {
 893   return pointer_delta(end(), top(), sizeof(MetaWord));
 894 }
 895 
 896 // Allocates the chunk from the virtual space only.
 897 // This interface is also used internally for debugging.  Not all
 898 // chunks removed here are necessarily used for allocation.
 899 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 900   // Bottom of the new chunk
 901   MetaWord* chunk_limit = top();
 902   assert(chunk_limit != NULL, "Not safe to call this method");
 903 
 904   // The virtual spaces are always expanded by the
 905   // commit granularity to enforce the following condition.
 906   // Without this the is_available check will not work correctly.
 907   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 908       "The committed memory doesn't match the expanded memory.");
 909 
 910   if (!is_available(chunk_word_size)) {
 911     if (TraceMetadataChunkAllocation) {
 912       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
 913       // Dump some information about the virtual space that is nearly full
 914       print_on(gclog_or_tty);
 915     }
 916     return NULL;
 917   }
 918 
 919   // Take the space  (bump top on the current virtual space).
 920   inc_top(chunk_word_size);
 921 
 922   // Initialize the chunk
 923   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 924   return result;
 925 }
 926 
 927 
 928 // Expand the virtual space (commit more of the reserved space)
 929 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 930   size_t min_bytes = min_words * BytesPerWord;
 931   size_t preferred_bytes = preferred_words * BytesPerWord;
 932 
 933   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 934 
 935   if (uncommitted < min_bytes) {
 936     return false;
 937   }  
 938 
 939   size_t commit = MIN2(preferred_bytes, uncommitted);
 940   bool result = virtual_space()->expand_by(commit, false);
 941   
 942   assert(result, "Failed to commit memory");
 943 
 944   return result;
 945 }
 946 
 947 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 948   assert_lock_strong(SpaceManager::expand_lock());
 949   Metachunk* result = take_from_committed(chunk_word_size);
 950   if (result != NULL) {
 951     inc_container_count();
 952   }
 953   return result;
 954 }
 955 
 956 bool VirtualSpaceNode::initialize() {
 957 
 958   if (!_rs.is_reserved()) {
 959     return false;
 960   }
 961 
 962   // These are necessary restriction to make sure that the virtual space always
 963   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 964   // aligned only the middle alignment of the VirtualSpace is used.
 965   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
 966   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
 967 
 968   // ReservedSpaces marked as special will have the entire memory
 969   // pre-committed. Setting a committed size will make sure that
 970   // committed_size and actual_committed_size agrees.
 971   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 972 
 973   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 974                                             Metaspace::commit_alignment());
 975   if (result) {
 976     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 977         "Checking that the pre-committed memory was registered by the VirtualSpace");
 978   
 979     set_top((MetaWord*)virtual_space()->low());
 980     set_reserved(MemRegion((HeapWord*)_rs.base(),
 981                  (HeapWord*)(_rs.base() + _rs.size())));
 982 
 983     assert(reserved()->start() == (HeapWord*) _rs.base(),
 984       err_msg("Reserved start was not set properly " PTR_FORMAT
 985         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
 986     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 987       err_msg("Reserved size was not set properly " SIZE_FORMAT
 988         " != " SIZE_FORMAT, reserved()->word_size(),
 989         _rs.size() / BytesPerWord));
 990   }
 991 
 992   return result;
 993 }
 994 
 995 void VirtualSpaceNode::print_on(outputStream* st) const {
 996   size_t used = used_words_in_vs();
 997   size_t capacity = capacity_words_in_vs();
 998   VirtualSpace* vs = virtual_space();


1015 // VirtualSpaceList methods
1016 // Space allocated from the VirtualSpace
1017 
1018 VirtualSpaceList::~VirtualSpaceList() {
1019   VirtualSpaceListIterator iter(virtual_space_list());
1020   while (iter.repeat()) {
1021     VirtualSpaceNode* vsl = iter.get_next();
1022     delete vsl;
1023   }
1024 }
1025 
1026 void VirtualSpaceList::inc_reserved_words(size_t v) {
1027   assert_lock_strong(SpaceManager::expand_lock());
1028   _reserved_words = _reserved_words + v;
1029 }
1030 void VirtualSpaceList::dec_reserved_words(size_t v) {
1031   assert_lock_strong(SpaceManager::expand_lock());
1032   _reserved_words = _reserved_words - v;
1033 }
1034 
1035 #define assert_committed_below_limit()                             \
1036   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
1037       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1038               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1039           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1040 
1041 void VirtualSpaceList::inc_committed_words(size_t v) {
1042   assert_lock_strong(SpaceManager::expand_lock());
1043   _committed_words = _committed_words + v;
1044 
1045   assert_committed_below_limit();
1046 }
1047 void VirtualSpaceList::dec_committed_words(size_t v) {
1048   assert_lock_strong(SpaceManager::expand_lock());
1049   _committed_words = _committed_words - v;
1050 
1051   assert_committed_below_limit();
1052 }
1053 
1054 void VirtualSpaceList::inc_virtual_space_count() {
1055   assert_lock_strong(SpaceManager::expand_lock());
1056   _virtual_space_count++;
1057 }
1058 void VirtualSpaceList::dec_virtual_space_count() {
1059   assert_lock_strong(SpaceManager::expand_lock());
1060   _virtual_space_count--;
1061 }
1062 
1063 void ChunkManager::remove_chunk(Metachunk* chunk) {
1064   size_t word_size = chunk->word_size();
1065   ChunkIndex index = list_index(word_size);
1066   if (index != HumongousIndex) {
1067     free_chunks(index)->remove_chunk(chunk);
1068   } else {
1069     humongous_dictionary()->remove_chunk(chunk);
1070   }
1071 


1074 }
1075 
1076 // Walk the list of VirtualSpaceNodes and delete
1077 // nodes with a 0 container_count.  Remove Metachunks in
1078 // the node from their respective freelists.
1079 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1080   assert_lock_strong(SpaceManager::expand_lock());
1081   // Don't use a VirtualSpaceListIterator because this
1082   // list is being changed and a straightforward use of an iterator is not safe.
1083   VirtualSpaceNode* purged_vsl = NULL;
1084   VirtualSpaceNode* prev_vsl = virtual_space_list();
1085   VirtualSpaceNode* next_vsl = prev_vsl;
1086   while (next_vsl != NULL) {
1087     VirtualSpaceNode* vsl = next_vsl;
1088     next_vsl = vsl->next();
1089     // Don't free the current virtual space since it will likely
1090     // be needed soon.
1091     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1092       // Unlink it from the list
1093       if (prev_vsl == vsl) {
1094         // This is the case of the current node being the first node.
1095         assert(vsl == virtual_space_list(), "Expected to be the first node");
1096         set_virtual_space_list(vsl->next());
1097       } else {
1098         prev_vsl->set_next(vsl->next());
1099       }
1100 
1101       vsl->purge(chunk_manager);
1102       dec_reserved_words(vsl->reserved_words());
1103       dec_committed_words(vsl->committed_words());
1104       dec_virtual_space_count();
1105       purged_vsl = vsl;
1106       delete vsl;
1107     } else {
1108       prev_vsl = vsl;
1109     }
1110   }
1111 #ifdef ASSERT
1112   if (purged_vsl != NULL) {
1113   // List should be stable enough to use an iterator here.
1114   VirtualSpaceListIterator iter(virtual_space_list());
1115     while (iter.repeat()) {
1116       VirtualSpaceNode* vsl = iter.get_next();
1117       assert(vsl != purged_vsl, "Purge of vsl failed");
1118     }
1119   }
1120 #endif
1121 }
1122 
1123 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1124                                    _is_class(false),
1125                                    _virtual_space_list(NULL),
1126                                    _current_virtual_space(NULL),
1127                                    _reserved_words(0),
1128                                    _committed_words(0),
1129                                    _virtual_space_count(0) {
1130   MutexLockerEx cl(SpaceManager::expand_lock(),
1131                    Mutex::_no_safepoint_check_flag);
1132   create_new_virtual_space(word_size);


1133 }
1134 
1135 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1136                                    _is_class(true),
1137                                    _virtual_space_list(NULL),
1138                                    _current_virtual_space(NULL),
1139                                    _reserved_words(0),
1140                                    _committed_words(0),
1141                                    _virtual_space_count(0) {
1142   MutexLockerEx cl(SpaceManager::expand_lock(),
1143                    Mutex::_no_safepoint_check_flag);
1144   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1145   bool succeeded = class_entry->initialize();
1146   if (succeeded) {
1147     link_vs(class_entry);
1148   }
1149 }
1150 
1151 size_t VirtualSpaceList::free_bytes() {
1152   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1153 }
1154 
1155 // Allocate another meta virtual space and add it to the list.
1156 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1157   assert_lock_strong(SpaceManager::expand_lock());
1158 
1159   if (is_class()) {
1160     assert(false, "We currently don't support more than one VirtualSpace for"
1161                   " the compressed class space. The initialization of the"
1162                   " CCS uses another code path and should not hit this path.");
1163     return false;
1164   }
1165 
1166   if (vs_word_size == 0) {
1167     assert(false, "vs_word_size should always be at least _reserve_alignement large.");
1168     return false;
1169   }
1170 
1171   // Reserve the space
1172   size_t vs_byte_size = vs_word_size * BytesPerWord;
1173   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1174 
1175   // Allocate the meta virtual space and initialize it.
1176   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1177   if (!new_entry->initialize()) {
1178     delete new_entry;
1179     return false;
1180   } else {
1181     assert(new_entry->reserved_words() == vs_word_size,
1182         "Reserved memory size differs from requested memory size");
1183     // ensure lock-free iteration sees fully initialized node
1184     OrderAccess::storestore();
1185     link_vs(new_entry);
1186     return true;
1187   }
1188 }
1189 
1190 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1191   if (virtual_space_list() == NULL) {
1192       set_virtual_space_list(new_entry);
1193   } else {
1194     current_virtual_space()->set_next(new_entry);
1195   }
1196   set_current_virtual_space(new_entry);
1197   inc_reserved_words(new_entry->reserved_words());
1198   inc_committed_words(new_entry->committed_words());
1199   inc_virtual_space_count();
1200 #ifdef ASSERT
1201   new_entry->mangle();
1202 #endif
1203   if (TraceMetavirtualspaceAllocation && Verbose) {
1204     VirtualSpaceNode* vsl = current_virtual_space();
1205     vsl->print_on(gclog_or_tty);
1206   }
1207 }
1208 
1209 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1210                                       size_t min_words,
1211                                       size_t preferred_words) {
1212   size_t before = node->committed_words();
1213 
1214   bool result = node->expand_by(min_words, preferred_words);
1215 
1216   size_t after = node->committed_words();
1217 
1218   // after and before can be the same if the memory was pre-committed.
1219   assert(after >= before, "Inconsistency");
1220   inc_committed_words(after - before);
1221 
1222   return result;
1223 }
1224 
1225 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1226   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1227   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1228   assert(min_words <= preferred_words, "Invalid arguments");
1229 
1230   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1231     return  false;
1232   }
1233 
1234   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1235   if (allowed_expansion_words < min_words) {
1236     return false;
1237   }
1238 
1239   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1240 
1241   // Commit more memory from the the current virtual space.
1242   bool vs_expanded = expand_node_by(current_virtual_space(),
1243                                     min_words,
1244                                     max_expansion_words);
1245   if (vs_expanded) {
1246     return true;
1247   }
1248 
1249   // Get another virtual space.
1250   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1251   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1252 
1253   if (create_new_virtual_space(grow_vs_words)) {
1254     if (current_virtual_space()->is_pre_committed()) {
1255       // The memory was pre-comitted, so we are done here.
1256       assert(min_words <= current_virtual_space()->committed_words(),
1257           "The new VirtualSpace was pre-committed, so it"
1258           "should be large enough to fit the alloc request.");
1259       return true;
1260     }
1261 
1262     return expand_node_by(current_virtual_space(),
1263                           min_words,
1264                           max_expansion_words);
1265   }
1266 
1267   return false;
1268 }
1269 
1270 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1271                                            size_t grow_chunks_by_words,
1272                                            size_t medium_chunk_bunch) {
1273 
1274   // Allocate a chunk out of the current virtual space.
1275   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1276 
1277   if (next != NULL) {
1278     return next;


































1279   }
1280 
1281   // The expand amount is currently only determined by the requested sizes
1282   // and not how much committed memory is left in the current virtual space.
1283 
1284   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1285   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1286   if (min_word_size >= preferred_word_size) {
1287     // Can happen when humongous chunks are allocated.
1288     preferred_word_size = min_word_size;
1289   }
1290 
1291   bool expanded = expand_by(min_word_size, preferred_word_size);
1292   if (expanded) {
1293     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1294     assert(next != NULL, "The allocation was expected to succeed after the expansion");

1295   }
1296 


1297    return next;
1298 }
1299 
1300 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1301                                                       size_t chunk_bunch) {
1302   // Get a chunk from the chunk freelist
1303   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1304                                        chunk_word_size,
1305                                        chunk_bunch);
1306   return new_chunk;
1307 }
1308 
1309 void VirtualSpaceList::print_on(outputStream* st) const {
1310   if (TraceMetadataChunkAllocation && Verbose) {
1311     VirtualSpaceListIterator iter(virtual_space_list());
1312     while (iter.repeat()) {
1313       VirtualSpaceNode* node = iter.get_next();
1314       node->print_on(st);
1315     }
1316   }


1335 // Within the VM operation after the GC the attempt to allocate the metadata
1336 // should succeed.  If the GC did not free enough space for the metaspace
1337 // allocation, the HWM is increased so that another virtualspace will be
1338 // allocated for the metadata.  With perm gen the increase in the perm
1339 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1340 // metaspace policy uses those as the small and large steps for the HWM.
1341 //
1342 // After the GC the compute_new_size() for MetaspaceGC is called to
1343 // resize the capacity of the metaspaces.  The current implementation
1344 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1345 // to resize the Java heap by some GC's.  New flags can be implemented
1346 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1347 // free space is desirable in the metaspace capacity to decide how much
1348 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1349 // free space is desirable in the metaspace capacity before decreasing
1350 // the HWM.
1351 
1352 // Calculate the amount to increase the high water mark (HWM).
1353 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1354 // another expansion is not requested too soon.  If that is not
1355 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1356 // If that is still not enough, expand by the size of the allocation
1357 // plus some.
1358 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1359   size_t min_delta = MinMetaspaceExpansion;
1360   size_t max_delta = MaxMetaspaceExpansion;
1361   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1362 
1363   if (delta <= min_delta) {
1364     delta = min_delta;
1365   } else if (delta <= max_delta) {

1366     // Don't want to hit the high water mark on the next
1367     // allocation so make the delta greater than just enough
1368     // for this allocation.
1369     delta = max_delta;
1370   } else {
1371     // This allocation is large but the next ones are probably not
1372     // so increase by the minimum.
1373     delta = delta + min_delta;

1374   }
1375 
1376   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1377 
1378   return delta;
1379 }
1380 
1381 size_t MetaspaceGC::capacity_until_GC() {
1382   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1383   assert(value >= MetaspaceSize, "Not initialied properly?");
1384   return value;
1385 }
1386 
1387 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1388   assert_is_size_aligned(v, Metaspace::commit_alignment());
1389 
1390   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1391 }
1392 
1393 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1394   assert_is_size_aligned(v, Metaspace::commit_alignment());
1395 
1396   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1397 }
1398 
1399 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1400   // Check if the compressed class space is full.
1401   if (is_class && Metaspace::using_class_space()) {
1402     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1403     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1404       return false;
1405     }
1406   }
1407 
1408   // Check if the user has imposed a limit on the metaspace memory.
1409   size_t committed_bytes = MetaspaceAux::committed_bytes();
1410   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1411     return false;





1412   }
1413 










1414   return true;
1415 }
1416 
1417 size_t MetaspaceGC::allowed_expansion() {
1418   size_t committed_bytes = MetaspaceAux::committed_bytes();
1419 
1420   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
1421 
1422   // Always grant expansion if we are initiating the JVM,
1423   // or if the GC_locker is preventing GCs.
1424   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1425     return left_until_max / BytesPerWord;


1426   }
1427 
1428   size_t capacity_until_gc = capacity_until_GC();
1429 
1430   if (capacity_until_gc <= committed_bytes) {
1431     return 0;
1432   }

1433 
1434   size_t left_until_GC = capacity_until_gc - committed_bytes;
1435   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1436 
1437   return left_to_commit / BytesPerWord;
1438 }
1439 
1440 void MetaspaceGC::compute_new_size() {
1441   assert(_shrink_factor <= 100, "invalid shrink factor");
1442   uint current_shrink_factor = _shrink_factor;
1443   _shrink_factor = 0;
1444 


1445   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1446   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1447 
1448   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1449   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1450 
1451   const double min_tmp = used_after_gc / maximum_used_percentage;
1452   size_t minimum_desired_capacity =
1453     (size_t)MIN2(min_tmp, double(max_uintx));
1454   // Don't shrink less than the initial generation size
1455   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1456                                   MetaspaceSize);
1457 
1458   if (PrintGCDetails && Verbose) {
1459     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1460     gclog_or_tty->print_cr("  "
1461                   "  minimum_free_percentage: %6.2f"
1462                   "  maximum_used_percentage: %6.2f",
1463                   minimum_free_percentage,
1464                   maximum_used_percentage);
1465     gclog_or_tty->print_cr("  "
1466                   "   used_after_gc       : %6.1fKB",
1467                   used_after_gc / (double) K);
1468   }
1469 
1470 
1471   size_t shrink_bytes = 0;
1472   if (capacity_until_GC < minimum_desired_capacity) {
1473     // If we have less capacity below the metaspace HWM, then
1474     // increment the HWM.
1475     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1476     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1477     // Don't expand unless it's significant
1478     if (expand_bytes >= MinMetaspaceExpansion) {
1479       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1480     }
1481     if (PrintGCDetails && Verbose) {
1482       size_t new_capacity_until_GC = capacity_until_GC;
1483       gclog_or_tty->print_cr("    expanding:"
1484                     "  minimum_desired_capacity: %6.1fKB"
1485                     "  expand_bytes: %6.1fKB"
1486                     "  MinMetaspaceExpansion: %6.1fKB"
1487                     "  new metaspace HWM:  %6.1fKB",
1488                     minimum_desired_capacity / (double) K,
1489                     expand_bytes / (double) K,
1490                     MinMetaspaceExpansion / (double) K,
1491                     new_capacity_until_GC / (double) K);
1492     }
1493     return;
1494   }
1495 
1496   // No expansion, now see if we want to shrink
1497   // We would never want to shrink more than this
1498   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1499   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,


1516       gclog_or_tty->print_cr("  "
1517                              "  minimum_desired_capacity: %6.1fKB"
1518                              "  maximum_desired_capacity: %6.1fKB",
1519                              minimum_desired_capacity / (double) K,
1520                              maximum_desired_capacity / (double) K);
1521     }
1522 
1523     assert(minimum_desired_capacity <= maximum_desired_capacity,
1524            "sanity check");
1525 
1526     if (capacity_until_GC > maximum_desired_capacity) {
1527       // Capacity too large, compute shrinking size
1528       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1529       // We don't want shrink all the way back to initSize if people call
1530       // System.gc(), because some programs do that between "phases" and then
1531       // we'd just have to grow the heap up again for the next phase.  So we
1532       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1533       // on the third call, and 100% by the fourth call.  But if we recompute
1534       // size without shrinking, it goes back to 0%.
1535       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1536 
1537       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1538 
1539       assert(shrink_bytes <= max_shrink_bytes,
1540         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1541           shrink_bytes, max_shrink_bytes));
1542       if (current_shrink_factor == 0) {
1543         _shrink_factor = 10;
1544       } else {
1545         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1546       }
1547       if (PrintGCDetails && Verbose) {
1548         gclog_or_tty->print_cr("  "
1549                       "  shrinking:"
1550                       "  initSize: %.1fK"
1551                       "  maximum_desired_capacity: %.1fK",
1552                       MetaspaceSize / (double) K,
1553                       maximum_desired_capacity / (double) K);
1554         gclog_or_tty->print_cr("  "
1555                       "  shrink_bytes: %.1fK"
1556                       "  current_shrink_factor: %d"
1557                       "  new shrink factor: %d"
1558                       "  MinMetaspaceExpansion: %.1fK",
1559                       shrink_bytes / (double) K,
1560                       current_shrink_factor,
1561                       _shrink_factor,
1562                       MinMetaspaceExpansion / (double) K);
1563       }
1564     }
1565   }
1566 
1567   // Don't shrink unless it's significant
1568   if (shrink_bytes >= MinMetaspaceExpansion &&
1569       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1570     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1571   }
1572 }
1573 
1574 // Metadebug methods
1575 
1576 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1577                                        size_t chunk_word_size){
1578 #ifdef ASSERT
1579   VirtualSpaceList* vsl = sm->vs_list();
1580   if (MetaDataDeallocateALot &&
1581       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1582     Metadebug::reset_deallocate_chunk_a_lot_count();
1583     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1584       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1585       if (dummy_chunk == NULL) {
1586         break;
1587       }
1588       sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1589 
1590       if (TraceMetadataChunkAllocation && Verbose) {


1783   slow_locked_verify();
1784   if (TraceMetadataChunkAllocation) {
1785     gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1786                            PTR_FORMAT "  size " SIZE_FORMAT,
1787                            chunk, chunk->word_size());
1788   }
1789   free_chunks_put(chunk);
1790 }
1791 
1792 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1793   assert_lock_strong(SpaceManager::expand_lock());
1794 
1795   slow_locked_verify();
1796 
1797   Metachunk* chunk = NULL;
1798   if (list_index(word_size) != HumongousIndex) {
1799     ChunkList* free_list = find_free_chunks_list(word_size);
1800     assert(free_list != NULL, "Sanity check");
1801 
1802     chunk = free_list->head();

1803 
1804     if (chunk == NULL) {
1805       return NULL;
1806     }
1807 
1808     // Remove the chunk as the head of the list.
1809     free_list->remove_chunk(chunk);
1810 



1811     if (TraceMetadataChunkAllocation && Verbose) {
1812       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1813                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1814                              free_list, chunk, chunk->word_size());
1815     }
1816   } else {
1817     chunk = humongous_dictionary()->get_chunk(
1818       word_size,
1819       FreeBlockDictionary<Metachunk>::atLeast);
1820 
1821     if (chunk == NULL) {
1822       return NULL;
1823     }
1824 
1825     if (TraceMetadataHumongousAllocation) {
1826       size_t waste = chunk->word_size() - word_size;
1827       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1828                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1829                              " waste " SIZE_FORMAT,
1830                              chunk->word_size(), word_size, waste);
1831     }
1832   }
1833 
1834   // Chunk is being removed from the chunks free list.
1835   dec_free_chunks_total(chunk->capacity_word_size());




1836 
1837   // Remove it from the links to this freelist
1838   chunk->set_next(NULL);
1839   chunk->set_prev(NULL);
1840 #ifdef ASSERT
1841   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1842   // work.
1843   chunk->set_is_free(false);
1844 #endif
1845   chunk->container()->inc_container_count();
1846 
1847   slow_locked_verify();
1848   return chunk;
1849 }
1850 
1851 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1852   assert_lock_strong(SpaceManager::expand_lock());
1853   slow_locked_verify();
1854 
1855   // Take from the beginning of the list


2082          "Don't need to expand");
2083   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2084 
2085   if (TraceMetadataChunkAllocation && Verbose) {
2086     size_t words_left = 0;
2087     size_t words_used = 0;
2088     if (current_chunk() != NULL) {
2089       words_left = current_chunk()->free_word_size();
2090       words_used = current_chunk()->used_word_size();
2091     }
2092     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2093                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2094                            " words left",
2095                             word_size, words_used, words_left);
2096   }
2097 
2098   // Get another chunk out of the virtual space
2099   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2100   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2101 
2102   if (next != NULL) {
2103     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2104   }
2105 
2106   MetaWord* mem = NULL;
2107 
2108   // If a chunk was available, add it to the in-use chunk list
2109   // and do an allocation from it.
2110   if (next != NULL) {

2111     // Add to this manager's list of chunks in use.
2112     add_chunk(next, false);
2113     mem = next->allocate(word_size);
2114   }
2115 
2116   return mem;
2117 }
2118 
2119 void SpaceManager::print_on(outputStream* st) const {
2120 
2121   for (ChunkIndex i = ZeroIndex;
2122        i < NumberOfInUseLists ;
2123        i = next_chunk_index(i) ) {
2124     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2125                  chunks_in_use(i),
2126                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2127   }
2128   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2129                " Humongous " SIZE_FORMAT,
2130                sum_waste_in_chunks_in_use(SmallIndex),
2131                sum_waste_in_chunks_in_use(MediumIndex),
2132                sum_waste_in_chunks_in_use(HumongousIndex));
2133   // block free lists
2134   if (block_freelists() != NULL) {
2135     st->print_cr("total in block free lists " SIZE_FORMAT,
2136       block_freelists()->total_size());


2452 // Returns the address of spaced allocated for "word_size".
2453 // This methods does not know about blocks (Metablocks)
2454 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2455   assert_lock_strong(_lock);
2456 #ifdef ASSERT
2457   if (Metadebug::test_metadata_failure()) {
2458     return NULL;
2459   }
2460 #endif
2461   // Is there space in the current chunk?
2462   MetaWord* result = NULL;
2463 
2464   // For DumpSharedSpaces, only allocate out of the current chunk which is
2465   // never null because we gave it the size we wanted.   Caller reports out
2466   // of memory if this returns null.
2467   if (DumpSharedSpaces) {
2468     assert(current_chunk() != NULL, "should never happen");
2469     inc_used_metrics(word_size);
2470     return current_chunk()->allocate(word_size); // caller handles null result
2471   }
2472 
2473   if (current_chunk() != NULL) {
2474     result = current_chunk()->allocate(word_size);
2475   }
2476 
2477   if (result == NULL) {
2478     result = grow_and_allocate(word_size);
2479   }
2480 
2481   if (result != NULL) {
2482     inc_used_metrics(word_size);
2483     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2484            "Head of the list is being allocated");
2485   }
2486 
2487   return result;
2488 }
2489 
2490 void SpaceManager::verify() {
2491   // If there are blocks in the dictionary, then
2492   // verfication of chunks does not work since
2493   // being in the dictionary alters a chunk.
2494   if (block_freelists()->total_size() == 0) {
2495     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2496       Metachunk* curr = chunks_in_use(i);
2497       while (curr != NULL) {
2498         curr->verify();
2499         verify_chunk_size(curr);
2500         curr = curr->next();
2501       }


2727                         "("  SIZE_FORMAT ")",
2728                         prev_metadata_used,
2729                         allocated_used_bytes(),
2730                         reserved_bytes());
2731   } else {
2732     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2733                         "->" SIZE_FORMAT "K"
2734                         "("  SIZE_FORMAT "K)",
2735                         prev_metadata_used/K,
2736                         allocated_used_bytes()/K,
2737                         reserved_bytes()/K);
2738   }
2739 
2740   gclog_or_tty->print("]");
2741 }
2742 
2743 // This is printed when PrintGCDetails
2744 void MetaspaceAux::print_on(outputStream* out) {
2745   Metaspace::MetadataType nct = Metaspace::NonClassType;
2746 
2747   out->print_cr(" Metaspace       "
2748                 "used "      SIZE_FORMAT "K, "
2749                 "capacity "  SIZE_FORMAT "K, "
2750                 "committed " SIZE_FORMAT "K, "
2751                 "reserved "  SIZE_FORMAT "K",
2752                 allocated_used_bytes()/K,
2753                 allocated_capacity_bytes()/K,
2754                 committed_bytes()/K,
2755                 reserved_bytes()/K);
2756 

2757   if (Metaspace::using_class_space()) {
2758     Metaspace::MetadataType ct = Metaspace::ClassType;
2759     out->print_cr("  class space    "
2760                   "used "      SIZE_FORMAT "K, "
2761                   "capacity "  SIZE_FORMAT "K, "
2762                   "committed " SIZE_FORMAT "K, "
2763                   "reserved "  SIZE_FORMAT "K",
2764                   allocated_used_bytes(ct)/K,
2765                   allocated_capacity_bytes(ct)/K,
2766                   committed_bytes(ct)/K,
2767                   reserved_bytes(ct)/K);
2768   }
2769 }
2770 
2771 // Print information for class space and data space separately.
2772 // This is almost the same as above.
2773 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2774   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2775   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2776   size_t used_bytes = used_bytes_slow(mdtype);
2777   size_t free_bytes = free_bytes_slow(mdtype);
2778   size_t used_and_free = used_bytes + free_bytes +
2779                            free_chunks_capacity_bytes;
2780   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2781              "K + unused in chunks " SIZE_FORMAT "K  + "
2782              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2783              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2784              used_bytes / K,
2785              free_bytes / K,
2786              free_chunks_capacity_bytes / K,


2898     size_t used_in_use_bytes = used_bytes_slow(i);
2899     assert(allocated_used_bytes(i) == used_in_use_bytes,
2900       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2901               " used_bytes_slow(%u)" SIZE_FORMAT,
2902               i, allocated_used_bytes(i), i, used_in_use_bytes));
2903   }
2904 #endif
2905 }
2906 
2907 void MetaspaceAux::verify_metrics() {
2908   verify_capacity();
2909   verify_used();
2910 }
2911 
2912 
2913 // Metaspace methods
2914 
2915 size_t Metaspace::_first_chunk_word_size = 0;
2916 size_t Metaspace::_first_class_chunk_word_size = 0;
2917 
2918 size_t Metaspace::_commit_alignment = 0;
2919 size_t Metaspace::_reserve_alignment = 0;
2920 
2921 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2922   initialize(lock, type);
2923 }
2924 
2925 Metaspace::~Metaspace() {
2926   delete _vsm;
2927   if (using_class_space()) {
2928     delete _class_vsm;
2929   }
2930 }
2931 
2932 VirtualSpaceList* Metaspace::_space_list = NULL;
2933 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2934 
2935 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2936 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2937 
2938 #define VIRTUALSPACEMULTIPLIER 2
2939 
2940 #ifdef _LP64


2962   }
2963 }
2964 
2965 // Return TRUE if the specified metaspace_base and cds_base are close enough
2966 // to work with compressed klass pointers.
2967 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2968   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2969   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2970   address lower_base = MIN2((address)metaspace_base, cds_base);
2971   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2972                                 (address)(metaspace_base + class_metaspace_size()));
2973   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2974 }
2975 
2976 // Try to allocate the metaspace at the requested addr.
2977 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2978   assert(using_class_space(), "called improperly");
2979   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2980   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2981          "Metaspace size is too big");
2982   assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
2983   assert_is_ptr_aligned(cds_base,                _reserve_alignment);
2984   assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
2985 
2986   // Don't use large pages for the class space.
2987   bool large_pages = false;
2988 
2989   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2990                                              _reserve_alignment,
2991                                              large_pages, 
2992                                              requested_addr, 0);
2993   if (!metaspace_rs.is_reserved()) {
2994     if (UseSharedSpaces) {
2995       size_t increment = align_size_up(1*G, _reserve_alignment);
2996 
2997       // Keep trying to allocate the metaspace, increasing the requested_addr
2998       // by 1GB each time, until we reach an address that will no longer allow
2999       // use of CDS with compressed klass pointers.
3000       char *addr = requested_addr;
3001       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3002              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3003         addr = addr + increment;
3004         metaspace_rs = ReservedSpace(class_metaspace_size(),
3005                                      _reserve_alignment, large_pages, addr, 0);
3006       }
3007     }
3008 
3009     // If no successful allocation then try to allocate the space anywhere.  If
3010     // that fails then OOM doom.  At this point we cannot try allocating the
3011     // metaspace as if UseCompressedClassPointers is off because too much
3012     // initialization has happened that depends on UseCompressedClassPointers.
3013     // So, UseCompressedClassPointers cannot be turned off at this point.
3014     if (!metaspace_rs.is_reserved()) {
3015       metaspace_rs = ReservedSpace(class_metaspace_size(),
3016                                    _reserve_alignment, large_pages);
3017       if (!metaspace_rs.is_reserved()) {
3018         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3019                                               class_metaspace_size()));
3020       }
3021     }
3022   }
3023 
3024   // If we got here then the metaspace got allocated.
3025   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3026 
3027   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3028   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3029     FileMapInfo::stop_sharing_and_unmap(
3030         "Could not allocate metaspace at a compatible address");
3031   }
3032 
3033   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3034                                   UseSharedSpaces ? (address)cds_base : 0);
3035 
3036   initialize_class_space(metaspace_rs);
3037 
3038   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3039     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3040                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3041     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3042                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
3043   }
3044 }
3045 
3046 // For UseCompressedClassPointers the class space is reserved above the top of
3047 // the Java heap.  The argument passed in is at the base of the compressed space.
3048 void Metaspace::initialize_class_space(ReservedSpace rs) {
3049   // The reserved space size may be bigger because of alignment, esp with UseLargePages
3050   assert(rs.size() >= CompressedClassSpaceSize,
3051          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3052   assert(using_class_space(), "Must be using class space");
3053   _class_space_list = new VirtualSpaceList(rs);
3054   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3055 
3056   if (!_class_space_list->initialization_succeeded()) {
3057     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3058   }
3059 }
3060 
3061 #endif
3062 
3063 // Align down. If the aligning result in 0, return 'alignment'.
3064 static size_t restricted_align_down(size_t size, size_t alignment) {
3065   return MAX2(alignment, align_size_down_(size, alignment));
3066 }
3067 
3068 void Metaspace::ergo_initialize() {
3069   if (DumpSharedSpaces) {
3070     // Using large pages when dumping the shared archive is currently not implemented.
3071     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3072   }
3073 
3074   size_t page_size = os::vm_page_size();
3075   if (UseLargePages && UseLargePagesInMetaspace) {
3076     page_size = os::large_page_size();
3077   }
3078 
3079   _commit_alignment  = page_size;
3080   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3081 
3082   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3083   // override if MaxMetaspaceSize was set on the command line or not.
3084   // This information is needed later to conform to the specification of the
3085   // java.lang.management.MemoryUsage API.
3086   //
3087   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3088   // globals.hpp to the aligned value, but this is not possible, since the
3089   // alignment depends on other flags being parsed.
3090   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
3091 
3092   if (MetaspaceSize > MaxMetaspaceSize) {
3093     MetaspaceSize = MaxMetaspaceSize;
3094   }
3095 
3096   MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
3097 
3098   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3099 
3100   if (MetaspaceSize < 256*K) {
3101     vm_exit_during_initialization("Too small initial Metaspace size");
3102   }
3103 
3104   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
3105   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
3106 
3107   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
3108   set_class_metaspace_size(CompressedClassSpaceSize);
3109 }
3110 
3111 void Metaspace::global_initialize() {
3112   // Initialize the alignment for shared spaces.
3113   int max_alignment = os::vm_page_size();
3114   size_t cds_total = 0;
3115 



3116   MetaspaceShared::set_max_alignment(max_alignment);
3117 
3118   if (DumpSharedSpaces) {
3119     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3120     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3121     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3122     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3123 
3124     // Initialize with the sum of the shared space sizes.  The read-only
3125     // and read write metaspace chunks will be allocated out of this and the
3126     // remainder is the misc code and data chunks.
3127     cds_total = FileMapInfo::shared_spaces_size();
3128     cds_total = align_size_up(cds_total, _reserve_alignment);
3129     _space_list = new VirtualSpaceList(cds_total/wordSize);
3130     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3131 
3132     if (!_space_list->initialization_succeeded()) {
3133       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3134     }
3135 
3136 #ifdef _LP64
3137     if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
3138       vm_exit_during_initialization("Unable to dump shared archive.",
3139           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3140                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3141                   "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
3142                   cds_total + class_metaspace_size(), (size_t)max_juint));
3143     }
3144 
3145     // Set the compressed klass pointer base so that decoding of these pointers works
3146     // properly when creating the shared archive.
3147     assert(UseCompressedOops && UseCompressedClassPointers,
3148       "UseCompressedOops and UseCompressedClassPointers must be set");
3149     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3150     if (TraceMetavirtualspaceAllocation && Verbose) {
3151       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3152                              _space_list->current_virtual_space()->bottom());
3153     }
3154 



3155     Universe::set_narrow_klass_shift(0);
3156 #endif
3157 
3158   } else {
3159     // If using shared space, open the file that contains the shared space
3160     // and map in the memory before initializing the rest of metaspace (so
3161     // the addresses don't conflict)
3162     address cds_address = NULL;
3163     if (UseSharedSpaces) {
3164       FileMapInfo* mapinfo = new FileMapInfo();
3165       memset(mapinfo, 0, sizeof(FileMapInfo));
3166 
3167       // Open the shared archive file, read and validate the header. If
3168       // initialization fails, shared spaces [UseSharedSpaces] are
3169       // disabled and the file is closed.
3170       // Map in spaces now also
3171       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3172         FileMapInfo::set_current_info(mapinfo);
3173         cds_total = FileMapInfo::shared_spaces_size();
3174         cds_address = (address)mapinfo->region_base(0);
3175       } else {
3176         assert(!mapinfo->is_open() && !UseSharedSpaces,
3177                "archive file not closed or shared spaces not disabled.");
3178       }


3179     }
3180 
3181 #ifdef _LP64
3182     // If UseCompressedClassPointers is set then allocate the metaspace area
3183     // above the heap and above the CDS area (if it exists).
3184     if (using_class_space()) {
3185       if (UseSharedSpaces) {
3186         char* cds_end = (char*)(cds_address + cds_total);
3187         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3188         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3189       } else {
3190         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
3191       }
3192     }
3193 #endif
3194 
3195     // Initialize these before initializing the VirtualSpaceList
3196     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3197     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3198     // Make the first class chunk bigger than a medium chunk so it's not put
3199     // on the medium chunk list.   The next chunk will be small and progress
3200     // from there.  This size calculated by -version.
3201     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3202                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3203     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3204     // Arbitrarily set the initial virtual space to a multiple
3205     // of the boot class loader size.
3206     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3207     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3208 
3209     // Initialize the list of virtual spaces.
3210     _space_list = new VirtualSpaceList(word_size);
3211     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3212 
3213     if (!_space_list->initialization_succeeded()) {
3214       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3215     }
3216   }
3217 
3218   MetaspaceGC::initialize();
3219 }
3220 
3221 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3222                                                size_t chunk_word_size,
3223                                                size_t chunk_bunch) {
3224   // Get a chunk from the chunk freelist
3225   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3226   if (chunk != NULL) {
3227     return chunk;
3228   }
3229 
3230   return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
3231 }
3232 
3233 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3234 
3235   assert(space_list() != NULL,
3236     "Metadata VirtualSpaceList has not been initialized");
3237   assert(chunk_manager_metadata() != NULL,
3238     "Metadata ChunkManager has not been initialized");


3283   _alloc_record_head = NULL;
3284   _alloc_record_tail = NULL;
3285 }
3286 
3287 size_t Metaspace::align_word_size_up(size_t word_size) {
3288   size_t byte_size = word_size * wordSize;
3289   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3290 }
3291 
3292 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3293   // DumpSharedSpaces doesn't use class metadata area (yet)
3294   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3295   if (is_class_space_allocation(mdtype)) {
3296     return  class_vsm()->allocate(word_size);
3297   } else {
3298     return  vsm()->allocate(word_size);
3299   }
3300 }
3301 
3302 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3303   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3304   assert(delta_bytes > 0, "Must be");
3305 
3306   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3307   size_t before_inc = after_inc - delta_bytes;
3308 
3309   if (PrintGCDetails && Verbose) {
3310     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3311         " to " SIZE_FORMAT, before_inc, after_inc);
3312   }
3313 
3314   return allocate(word_size, mdtype);


3315 }
3316 
3317 // Space allocated in the Metaspace.  This may
3318 // be across several metadata virtual spaces.
3319 char* Metaspace::bottom() const {
3320   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3321   return (char*)vsm()->current_chunk()->bottom();
3322 }
3323 
3324 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3325   if (mdtype == ClassType) {
3326     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3327   } else {
3328     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3329   }
3330 }
3331 
3332 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3333   if (mdtype == ClassType) {
3334     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;


3376       vsm()->deallocate(ptr, word_size);
3377     }
3378   } else {
3379     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3380 
3381     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3382       // Dark matter.  Too small for dictionary.
3383 #ifdef ASSERT
3384       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3385 #endif
3386       return;
3387     }
3388     if (is_class && using_class_space()) {
3389       class_vsm()->deallocate(ptr, word_size);
3390     } else {
3391       vsm()->deallocate(ptr, word_size);
3392     }
3393   }
3394 }
3395 
3396 
3397 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3398                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3399   if (HAS_PENDING_EXCEPTION) {
3400     assert(false, "Should not allocate with exception pending");
3401     return NULL;  // caller does a CHECK_NULL too
3402   }
3403 





3404   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3405         "ClassLoaderData::the_null_class_loader_data() should have been used.");
3406 
3407   // Allocate in metaspaces without taking out a lock, because it deadlocks
3408   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
3409   // to revisit this for application class data sharing.
3410   if (DumpSharedSpaces) {
3411     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3412     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3413     MetaWord* result = space->allocate(word_size, NonClassType);
3414     if (result == NULL) {
3415       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3416     } else {
3417       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3418     }
3419     return Metablock::initialize(result, word_size);
3420   }
3421 
3422   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3423 
3424   // Try to allocate metadata.
3425   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3426 
3427   if (result == NULL) {
3428     // Allocation failed.
3429     if (is_init_completed()) {
3430       // Only start a GC if the bootstrapping has completed.
3431 
3432       // Try to clean out some memory and retry.
3433       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(

3434           loader_data, word_size, mdtype);
3435     }
3436   }
3437 

3438   if (result == NULL) {
3439     report_metadata_oome(loader_data, word_size, mdtype, THREAD);
3440     // Will not reach here.
3441     return NULL;
3442   }
3443 
3444   return Metablock::initialize(result, word_size);
3445 }
3446 
3447 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3448   // If result is still null, we are out of memory.
3449   if (Verbose && TraceMetadataChunkAllocation) {
3450     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3451         SIZE_FORMAT, word_size);
3452     if (loader_data->metaspace_or_null() != NULL) {
3453       loader_data->dump(gclog_or_tty);
3454     }
3455     MetaspaceAux::dump(gclog_or_tty);
3456   }
3457 
3458   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3459   const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
3460                                                                  "Metadata space";
3461   report_java_out_of_memory(space_string);
3462 
3463   if (JvmtiExport::should_post_resource_exhausted()) {
3464     JvmtiExport::post_resource_exhausted(
3465         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3466         space_string);
3467   }
3468 
3469   if (!is_init_completed()) {
3470     vm_exit_during_initialization("OutOfMemoryError", space_string);
3471   }
3472 
3473   if (is_class_space_allocation(mdtype)) {
3474     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3475   } else {
3476     THROW_OOP(Universe::out_of_memory_error_metaspace());
3477   }



3478 }
3479 
3480 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3481   assert(DumpSharedSpaces, "sanity");
3482 
3483   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3484   if (_alloc_record_head == NULL) {
3485     _alloc_record_head = _alloc_record_tail = rec;
3486   } else {
3487     _alloc_record_tail->_next = rec;
3488     _alloc_record_tail = rec;
3489   }
3490 }
3491 
3492 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3493   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3494 
3495   address last_addr = (address)bottom();
3496 
3497   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {


src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File