< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page
rev 49926 : [mq]: 8201572-improve-metaspace-reporting


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"


  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"

  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 


  54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  56 
  57 // Helper function that does a bunch of checks for a chunk.
  58 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  59 
  60 // Given a Metachunk, update its in-use information (both in the
  61 // chunk and the occupancy map).
  62 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  63 
  64 size_t const allocation_from_dictionary_limit = 4 * K;
  65 
  66 MetaWord* last_allocated = 0;
  67 
  68 size_t Metaspace::_compressed_class_space_size;
  69 const MetaspaceTracer* Metaspace::_tracer = NULL;
  70 
  71 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  72 
























  73 enum ChunkSizes {    // in words.
  74   ClassSpecializedChunk = 128,
  75   SpecializedChunk = 128,
  76   ClassSmallChunk = 256,
  77   SmallChunk = 512,
  78   ClassMediumChunk = 4 * K,
  79   MediumChunk = 8 * K
  80 };
  81 
  82 // Returns size of this chunk type.
  83 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  84   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  85   size_t size = 0;
  86   if (is_class) {
  87     switch(chunktype) {
  88       case SpecializedIndex: size = ClassSpecializedChunk; break;
  89       case SmallIndex: size = ClassSmallChunk; break;
  90       case MediumIndex: size = ClassMediumChunk; break;
  91       default:
  92         ShouldNotReachHere();


 116       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 117       return HumongousIndex;
 118     }
 119   } else {
 120     if (size == SpecializedChunk) {
 121       return SpecializedIndex;
 122     } else if (size == SmallChunk) {
 123       return SmallIndex;
 124     } else if (size == MediumChunk) {
 125       return MediumIndex;
 126     } else if (size > MediumChunk) {
 127       // A valid humongous chunk size is a multiple of the smallest chunk size.
 128       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 129       return HumongousIndex;
 130     }
 131   }
 132   ShouldNotReachHere();
 133   return (ChunkIndex)-1;
 134 }
 135 
 136 
 137 static ChunkIndex next_chunk_index(ChunkIndex i) {
 138   assert(i < NumberOfInUseLists, "Out of bound");
 139   return (ChunkIndex) (i+1);
 140 }
 141 
 142 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 143   assert(i > ZeroIndex, "Out of bound");
 144   return (ChunkIndex) (i-1);
 145 }
 146 
 147 static const char* scale_unit(size_t scale) {
 148   switch(scale) {
 149     case 1: return "BYTES";
 150     case K: return "KB";
 151     case M: return "MB";
 152     case G: return "GB";
 153     default:
 154       ShouldNotReachHere();
 155       return NULL;
 156   }


 157 }
 158 
 159 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 160 uint MetaspaceGC::_shrink_factor = 0;
 161 bool MetaspaceGC::_should_concurrent_collect = false;
 162 

 163 typedef class FreeList<Metachunk> ChunkList;
 164 
 165 // Manages the global free lists of chunks.
 166 class ChunkManager : public CHeapObj<mtInternal> {
 167   friend class TestVirtualSpaceNodeTest;
 168 
 169   // Free list of chunks of different sizes.
 170   //   SpecializedChunk
 171   //   SmallChunk
 172   //   MediumChunk
 173   ChunkList _free_chunks[NumberOfFreeLists];
 174 
 175   // Whether or not this is the class chunkmanager.
 176   const bool _is_class;
 177 
 178   // Return non-humongous chunk list by its index.
 179   ChunkList* free_chunks(ChunkIndex index);
 180 
 181   // Returns non-humongous chunk list for the given chunk word size.
 182   ChunkList* find_free_chunks_list(size_t word_size);


 223 
 224   // Helper for chunk merging:
 225   //  Given an address range with 1-n chunks which are all supposed to be
 226   //  free and hence currently managed by this ChunkManager, remove them
 227   //  from this ChunkManager and mark them as invalid.
 228   // - This does not correct the occupancy map.
 229   // - This does not adjust the counters in ChunkManager.
 230   // - Does not adjust container count counter in containing VirtualSpaceNode.
 231   // Returns number of chunks removed.
 232   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 233 
 234   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 235   // split up the larger chunk into n smaller chunks, at least one of which should be
 236   // the target chunk of target chunk size. The smaller chunks, including the target
 237   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 238   // Note that this chunk is supposed to be removed from the freelist right away.
 239   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 240 
 241  public:
 242 
 243   struct ChunkManagerStatistics {
 244     size_t num_by_type[NumberOfFreeLists];
 245     size_t single_size_by_type[NumberOfFreeLists];
 246     size_t total_size_by_type[NumberOfFreeLists];
 247     size_t num_humongous_chunks;
 248     size_t total_size_humongous_chunks;
 249   };
 250 
 251   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 252   void get_statistics(ChunkManagerStatistics* stat) const;
 253   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 254 
 255 
 256   ChunkManager(bool is_class)
 257       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 258     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 259     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 260     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 261   }
 262 
 263   // Add or delete (return) a chunk to the global freelist.
 264   Metachunk* chunk_freelist_allocate(size_t word_size);
 265 
 266   // Map a size to a list index assuming that there are lists
 267   // for special, small, medium, and humongous chunks.
 268   ChunkIndex list_index(size_t size);
 269 
 270   // Map a given index to the chunk size.
 271   size_t size_by_index(ChunkIndex index) const;
 272 
 273   bool is_class() const { return _is_class; }
 274 
 275   // Convenience accessors.


 341   // Debug support
 342   void verify();
 343   void slow_verify() {
 344     if (VerifyMetaspace) {
 345       verify();
 346     }
 347   }
 348   void locked_verify();
 349   void slow_locked_verify() {
 350     if (VerifyMetaspace) {
 351       locked_verify();
 352     }
 353   }
 354   void verify_free_chunks_total();
 355 
 356   void locked_print_free_chunks(outputStream* st);
 357   void locked_print_sum_free_chunks(outputStream* st);
 358 
 359   void print_on(outputStream* st) const;
 360 
 361   // Prints composition for both non-class and (if available)
 362   // class chunk manager.
 363   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 364 };
 365 
 366 class SmallBlocks : public CHeapObj<mtClass> {
 367   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 368   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 369 
 370  private:
 371   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 372 
 373   FreeList<Metablock>& list_at(size_t word_size) {
 374     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 375     return _small_lists[word_size - _small_block_min_size];
 376   }
 377 
 378  public:
 379   SmallBlocks() {
 380     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 381       uint k = i - _small_block_min_size;
 382       _small_lists[k].set_size(i);
 383     }
 384   }
 385 

 386   size_t total_size() const {
 387     size_t result = 0;
 388     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 389       uint k = i - _small_block_min_size;
 390       result = result + _small_lists[k].count() * _small_lists[k].size();
 391     }
 392     return result;
 393   }
 394 










 395   static uint small_block_max_size() { return _small_block_max_size; }
 396   static uint small_block_min_size() { return _small_block_min_size; }
 397 
 398   MetaWord* get_block(size_t word_size) {
 399     if (list_at(word_size).count() > 0) {
 400       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 401       return new_block;
 402     } else {
 403       return NULL;
 404     }
 405   }
 406   void return_block(Metablock* free_chunk, size_t word_size) {
 407     list_at(word_size).return_chunk_at_head(free_chunk, false);
 408     assert(list_at(word_size).count() > 0, "Should have a chunk");
 409   }
 410 
 411   void print_on(outputStream* st) const {
 412     st->print_cr("SmallBlocks:");
 413     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 414       uint k = i - _small_block_min_size;


 427   // is at least 1/4th the size of the available block.
 428   const static int WasteMultiplier = 4;
 429 
 430   // Accessors
 431   BlockTreeDictionary* dictionary() const { return _dictionary; }
 432   SmallBlocks* small_blocks() {
 433     if (_small_blocks == NULL) {
 434       _small_blocks = new SmallBlocks();
 435     }
 436     return _small_blocks;
 437   }
 438 
 439  public:
 440   BlockFreelist();
 441   ~BlockFreelist();
 442 
 443   // Get and return a block to the free list
 444   MetaWord* get_block(size_t word_size);
 445   void return_block(MetaWord* p, size_t word_size);
 446 

 447   size_t total_size() const  {
 448     size_t result = dictionary()->total_size();
 449     if (_small_blocks != NULL) {
 450       result = result + _small_blocks->total_size();
 451     }
 452     return result;
 453   }
 454 









 455   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 456   void print_on(outputStream* st) const;
 457 };
 458 
 459 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 460 template <typename T> struct all_ones  { static const T value; };
 461 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 462 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 463 
 464 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 465 // keeps information about
 466 // - where a chunk starts
 467 // - whether a chunk is in-use or free
 468 // A bit in this bitmap represents one range of memory in the smallest
 469 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 470 class OccupancyMap : public CHeapObj<mtInternal> {
 471 
 472   // The address range this map covers.
 473   const MetaWord* const _reference_address;
 474   const size_t _word_size;


 840 
 841   // Allocate a chunk from the virtual space and return it.
 842   Metachunk* get_chunk_vs(size_t chunk_word_size);
 843 
 844   // Expands/shrinks the committed space in a virtual space.  Delegates
 845   // to Virtualspace
 846   bool expand_by(size_t min_words, size_t preferred_words);
 847 
 848   // In preparation for deleting this node, remove all the chunks
 849   // in the node from any freelist.
 850   void purge(ChunkManager* chunk_manager);
 851 
 852   // If an allocation doesn't fit in the current node a new node is created.
 853   // Allocate chunks out of the remaining committed space in this node
 854   // to avoid wasting that memory.
 855   // This always adds up because all the chunk sizes are multiples of
 856   // the smallest chunk size.
 857   void retire(ChunkManager* chunk_manager);
 858 
 859 
 860   void print_on(outputStream* st) const;

 861   void print_map(outputStream* st, bool is_class) const;
 862 
 863   // Debug support
 864   DEBUG_ONLY(void mangle();)
 865   // Verify counters, all chunks in this list node and the occupancy map.
 866   DEBUG_ONLY(void verify();)
 867   // Verify that all free chunks in this node are ideally merged
 868   // (there not should be multiple small chunks where a large chunk could exist.)
 869   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 870 
 871 };
 872 
 873 #define assert_is_aligned(value, alignment)                  \
 874   assert(is_aligned((value), (alignment)),                   \
 875          SIZE_FORMAT_HEX " is not aligned to "               \
 876          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 877 






 878 // Decide if large pages should be committed when the memory is reserved.
 879 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 880   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 881     size_t words = bytes / BytesPerWord;
 882     bool is_class = false; // We never reserve large pages for the class space.
 883     if (MetaspaceGC::can_expand(words, is_class) &&
 884         MetaspaceGC::allowed_expansion() >= words) {
 885       return true;
 886     }
 887   }
 888 
 889   return false;
 890 }
 891 
 892   // byte_size is the size of the associated virtualspace.
 893 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 894   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 895   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 896   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 897   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);


1164 
1165   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1166 
1167   size_t reserved_words()  { return _reserved_words; }
1168   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1169   size_t committed_words() { return _committed_words; }
1170   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1171 
1172   void inc_reserved_words(size_t v);
1173   void dec_reserved_words(size_t v);
1174   void inc_committed_words(size_t v);
1175   void dec_committed_words(size_t v);
1176   void inc_virtual_space_count();
1177   void dec_virtual_space_count();
1178 
1179   bool contains(const void* ptr);
1180 
1181   // Unlink empty VirtualSpaceNodes and free it.
1182   void purge(ChunkManager* chunk_manager);
1183 
1184   void print_on(outputStream* st) const;

1185   void print_map(outputStream* st) const;
1186 
1187   class VirtualSpaceListIterator : public StackObj {
1188     VirtualSpaceNode* _virtual_spaces;
1189    public:
1190     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1191       _virtual_spaces(virtual_spaces) {}
1192 
1193     bool repeat() {
1194       return _virtual_spaces != NULL;
1195     }
1196 
1197     VirtualSpaceNode* get_next() {
1198       VirtualSpaceNode* result = _virtual_spaces;
1199       if (_virtual_spaces != NULL) {
1200         _virtual_spaces = _virtual_spaces->next();
1201       }
1202       return result;
1203     }
1204   };
1205 };
1206 
1207 class Metadebug : AllStatic {
1208   // Debugging support for Metaspaces
1209   static int _allocation_fail_alot_count;
1210 
1211  public:
1212 
1213   static void init_allocation_fail_alot_count();
1214 #ifdef ASSERT
1215   static bool test_metadata_failure();
1216 #endif
1217 };
1218 
1219 int Metadebug::_allocation_fail_alot_count = 0;
1220 

1221 //  SpaceManager - used by Metaspace to handle allocations
1222 class SpaceManager : public CHeapObj<mtClass> {
1223   friend class ClassLoaderMetaspace;
1224   friend class Metadebug;
1225 
1226  private:
1227 
1228   // protects allocations
1229   Mutex* const _lock;
1230 
1231   // Type of metadata allocated.
1232   const Metaspace::MetadataType   _mdtype;
1233 
1234   // Type of metaspace
1235   const Metaspace::MetaspaceType  _space_type;
1236 
1237   // List of chunks in use by this SpaceManager.  Allocations
1238   // are done from the current chunk.  The list is used for deallocating
1239   // chunks when the SpaceManager is freed.
1240   Metachunk* _chunks_in_use[NumberOfInUseLists];
1241   Metachunk* _current_chunk;
1242 
1243   // Maximum number of small chunks to allocate to a SpaceManager
1244   static uint const _small_chunk_limit;
1245 
1246   // Maximum number of specialize chunks to allocate for anonymous and delegating
1247   // metadata space to a SpaceManager
1248   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1249 
1250   // Sum of all space in allocated chunks
1251   size_t _allocated_blocks_words;
1252 
1253   // Sum of all allocated chunks
1254   size_t _allocated_chunks_words;
1255   size_t _allocated_chunks_count;

1256 
1257   // Free lists of blocks are per SpaceManager since they
1258   // are assumed to be in chunks in use by the SpaceManager
1259   // and all chunks in use by a SpaceManager are freed when
1260   // the class loader using the SpaceManager is collected.
1261   BlockFreelist* _block_freelists;
1262 
1263  private:
1264   // Accessors
1265   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1266   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1267     _chunks_in_use[index] = v;
1268   }
1269 
1270   BlockFreelist* block_freelists() const { return _block_freelists; }
1271 
1272   Metaspace::MetadataType mdtype() { return _mdtype; }
1273 
1274   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1275   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1276 
1277   Metachunk* current_chunk() const { return _current_chunk; }
1278   void set_current_chunk(Metachunk* v) {
1279     _current_chunk = v;
1280   }
1281 
1282   Metachunk* find_current_chunk(size_t word_size);
1283 
1284   // Add chunk to the list of chunks in use
1285   void add_chunk(Metachunk* v, bool make_current);
1286   void retire_current_chunk();
1287 
1288   Mutex* lock() const { return _lock; }
1289 






1290  protected:
1291   void initialize();
1292 
1293  public:
1294   SpaceManager(Metaspace::MetadataType mdtype,
1295                Metaspace::MetaspaceType space_type,
1296                Mutex* lock);
1297   ~SpaceManager();
1298 
1299   enum ChunkMultiples {
1300     MediumChunkMultiple = 4
1301   };
1302 
1303   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1304   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1305   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1306 
1307   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1308 
1309   // Accessors
1310   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1311 
1312   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1313   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1314   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1315 
1316   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1317 
1318   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1319 
1320   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1321   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1322   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1323   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1324   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1325 
1326   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1327 
1328   // Increment the per Metaspace and global running sums for Metachunks
1329   // by the given size.  This is used when a Metachunk to added to
1330   // the in-use list.
1331   void inc_size_metrics(size_t words);
1332   // Increment the per Metaspace and global running sums Metablocks by the given
1333   // size.  This is used when a Metablock is allocated.
1334   void inc_used_metrics(size_t words);
1335   // Delete the portion of the running sums for this SpaceManager. That is,
1336   // the globals running sums for the Metachunks and Metablocks are
1337   // decremented for all the Metachunks in-use by this SpaceManager.
1338   void dec_total_from_size_metrics();


1339 
1340   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1341   // or return the unadjusted size if the requested size is humongous.
1342   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1343   size_t adjust_initial_chunk_size(size_t requested) const;
1344 
1345   // Get the initial chunks size for this metaspace type.
1346   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1347 
1348   size_t sum_capacity_in_chunks_in_use() const;
1349   size_t sum_used_in_chunks_in_use() const;
1350   size_t sum_free_in_chunks_in_use() const;
1351   size_t sum_waste_in_chunks_in_use() const;
1352   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1353 
1354   size_t sum_count_in_chunks_in_use();
1355   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1356 
1357   Metachunk* get_new_chunk(size_t chunk_word_size);
1358 
1359   // Block allocation and deallocation.
1360   // Allocates a block from the current chunk
1361   MetaWord* allocate(size_t word_size);
1362 
1363   // Helper for allocations
1364   MetaWord* allocate_work(size_t word_size);
1365 
1366   // Returns a block to the per manager freelist
1367   void deallocate(MetaWord* p, size_t word_size);
1368 
1369   // Based on the allocation size and a minimum chunk size,
1370   // returned chunk size (for expanding space for chunk allocation).
1371   size_t calc_chunk_size(size_t allocation_word_size);
1372 
1373   // Called when an allocation from the current chunk fails.
1374   // Gets a new chunk (may require getting a new virtual space),
1375   // and allocates from that chunk.
1376   MetaWord* grow_and_allocate(size_t word_size);
1377 
1378   // Notify memory usage to MemoryService.
1379   void track_metaspace_memory_usage();
1380 
1381   // debugging support.
1382 
1383   void dump(outputStream* const out) const;
1384   void print_on(outputStream* st) const;
1385   void locked_print_chunks_in_use_on(outputStream* st) const;
1386 
1387   void verify();
1388   void verify_chunk_size(Metachunk* chunk);
1389 #ifdef ASSERT
1390   void verify_allocated_blocks_words();
1391 #endif
1392 
1393   // This adjusts the size given to be greater than the minimum allocation size in
1394   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1395   size_t get_allocation_word_size(size_t word_size) {
1396     size_t byte_size = word_size * BytesPerWord;
1397 
1398     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1399     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1400 
1401     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1402     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1403 
1404     return raw_word_size;
1405   }







1406 };
1407 
1408 uint const SpaceManager::_small_chunk_limit = 4;
1409 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1410 
1411 void VirtualSpaceNode::inc_container_count() {
1412   assert_lock_strong(MetaspaceExpand_lock);
1413   _container_count++;
1414 }
1415 
1416 void VirtualSpaceNode::dec_container_count() {
1417   assert_lock_strong(MetaspaceExpand_lock);
1418   _container_count--;
1419 }
1420 
1421 #ifdef ASSERT
1422 void VirtualSpaceNode::verify_container_count() {
1423   assert(_container_count == container_count_slow(),
1424          "Inconsistency in container_count _container_count " UINTX_FORMAT
1425          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());


1640   // Now, top should be aligned correctly.
1641   assert_is_aligned(top(), required_chunk_alignment);
1642 
1643   // Bottom of the new chunk
1644   MetaWord* chunk_limit = top();
1645   assert(chunk_limit != NULL, "Not safe to call this method");
1646 
1647   // The virtual spaces are always expanded by the
1648   // commit granularity to enforce the following condition.
1649   // Without this the is_available check will not work correctly.
1650   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1651       "The committed memory doesn't match the expanded memory.");
1652 
1653   if (!is_available(chunk_word_size)) {
1654     LogTarget(Debug, gc, metaspace, freelist) lt;
1655     if (lt.is_enabled()) {
1656       LogStream ls(lt);
1657       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1658       // Dump some information about the virtual space that is nearly full
1659       print_on(&ls);

1660     }
1661     return NULL;
1662   }
1663 
1664   // Take the space  (bump top on the current virtual space).
1665   inc_top(chunk_word_size);
1666 
1667   // Initialize the chunk
1668   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1669   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1670   assert(result == (Metachunk*)chunk_limit, "Sanity");
1671   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1672   do_update_in_use_info_for_chunk(result, true);
1673 
1674   inc_container_count();
1675 
1676   if (VerifyMetaspace) {
1677     DEBUG_ONLY(chunk_manager->locked_verify());
1678     DEBUG_ONLY(this->verify());
1679   }


1686 }
1687 
1688 
1689 // Expand the virtual space (commit more of the reserved space)
1690 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1691   size_t min_bytes = min_words * BytesPerWord;
1692   size_t preferred_bytes = preferred_words * BytesPerWord;
1693 
1694   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1695 
1696   if (uncommitted < min_bytes) {
1697     return false;
1698   }
1699 
1700   size_t commit = MIN2(preferred_bytes, uncommitted);
1701   bool result = virtual_space()->expand_by(commit, false);
1702 
1703   if (result) {
1704     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1705               (is_class() ? "class" : "non-class"), commit);

1706   } else {
1707     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1708               (is_class() ? "class" : "non-class"), commit);
1709   }
1710 
1711   assert(result, "Failed to commit memory");
1712 
1713   return result;
1714 }
1715 
1716 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1717   assert_lock_strong(MetaspaceExpand_lock);
1718   Metachunk* result = take_from_committed(chunk_word_size);
1719   return result;
1720 }
1721 
1722 bool VirtualSpaceNode::initialize() {
1723 
1724   if (!_rs.is_reserved()) {
1725     return false;


1745     set_top((MetaWord*)virtual_space()->low());
1746     set_reserved(MemRegion((HeapWord*)_rs.base(),
1747                  (HeapWord*)(_rs.base() + _rs.size())));
1748 
1749     assert(reserved()->start() == (HeapWord*) _rs.base(),
1750            "Reserved start was not set properly " PTR_FORMAT
1751            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1752     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1753            "Reserved size was not set properly " SIZE_FORMAT
1754            " != " SIZE_FORMAT, reserved()->word_size(),
1755            _rs.size() / BytesPerWord);
1756   }
1757 
1758   // Initialize Occupancy Map.
1759   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1760   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1761 
1762   return result;
1763 }
1764 
1765 void VirtualSpaceNode::print_on(outputStream* st) const {
1766   size_t used = used_words_in_vs();
1767   size_t capacity = capacity_words_in_vs();

1768   VirtualSpace* vs = virtual_space();
1769   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1770            "[" PTR_FORMAT ", " PTR_FORMAT ", "








1771            PTR_FORMAT ", " PTR_FORMAT ")",
1772            p2i(vs), capacity / K,
1773            capacity == 0 ? 0 : used * 100 / capacity,
1774            p2i(bottom()), p2i(top()), p2i(end()),
1775            p2i(vs->high_boundary()));
1776 }
1777 
1778 #ifdef ASSERT
1779 void VirtualSpaceNode::mangle() {
1780   size_t word_size = capacity_words_in_vs();
1781   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1782 }
1783 #endif // ASSERT
1784 
1785 // VirtualSpaceList methods
1786 // Space allocated from the VirtualSpace
1787 
1788 VirtualSpaceList::~VirtualSpaceList() {
1789   VirtualSpaceListIterator iter(virtual_space_list());
1790   while (iter.repeat()) {
1791     VirtualSpaceNode* vsl = iter.get_next();
1792     delete vsl;
1793   }


1975 // Walk the list of VirtualSpaceNodes and delete
1976 // nodes with a 0 container_count.  Remove Metachunks in
1977 // the node from their respective freelists.
1978 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1979   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1980   assert_lock_strong(MetaspaceExpand_lock);
1981   // Don't use a VirtualSpaceListIterator because this
1982   // list is being changed and a straightforward use of an iterator is not safe.
1983   VirtualSpaceNode* purged_vsl = NULL;
1984   VirtualSpaceNode* prev_vsl = virtual_space_list();
1985   VirtualSpaceNode* next_vsl = prev_vsl;
1986   while (next_vsl != NULL) {
1987     VirtualSpaceNode* vsl = next_vsl;
1988     DEBUG_ONLY(vsl->verify_container_count();)
1989     next_vsl = vsl->next();
1990     // Don't free the current virtual space since it will likely
1991     // be needed soon.
1992     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1993       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1994                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());

1995       // Unlink it from the list
1996       if (prev_vsl == vsl) {
1997         // This is the case of the current node being the first node.
1998         assert(vsl == virtual_space_list(), "Expected to be the first node");
1999         set_virtual_space_list(vsl->next());
2000       } else {
2001         prev_vsl->set_next(vsl->next());
2002       }
2003 
2004       vsl->purge(chunk_manager);
2005       dec_reserved_words(vsl->reserved_words());
2006       dec_committed_words(vsl->committed_words());
2007       dec_virtual_space_count();
2008       purged_vsl = vsl;
2009       delete vsl;
2010     } else {
2011       prev_vsl = vsl;
2012     }
2013   }
2014 #ifdef ASSERT


2122   if (vs_word_size == 0) {
2123     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2124     return false;
2125   }
2126 
2127   // Reserve the space
2128   size_t vs_byte_size = vs_word_size * BytesPerWord;
2129   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2130 
2131   // Allocate the meta virtual space and initialize it.
2132   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2133   if (!new_entry->initialize()) {
2134     delete new_entry;
2135     return false;
2136   } else {
2137     assert(new_entry->reserved_words() == vs_word_size,
2138         "Reserved memory size differs from requested memory size");
2139     // ensure lock-free iteration sees fully initialized node
2140     OrderAccess::storestore();
2141     link_vs(new_entry);

2142     return true;
2143   }
2144 }
2145 
2146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2147   if (virtual_space_list() == NULL) {
2148       set_virtual_space_list(new_entry);
2149   } else {
2150     current_virtual_space()->set_next(new_entry);
2151   }
2152   set_current_virtual_space(new_entry);
2153   inc_reserved_words(new_entry->reserved_words());
2154   inc_committed_words(new_entry->committed_words());
2155   inc_virtual_space_count();
2156 #ifdef ASSERT
2157   new_entry->mangle();
2158 #endif
2159   LogTarget(Trace, gc, metaspace) lt;
2160   if (lt.is_enabled()) {
2161     LogStream ls(lt);
2162     VirtualSpaceNode* vsl = current_virtual_space();
2163     ResourceMark rm;
2164     vsl->print_on(&ls);

2165   }
2166 }
2167 
2168 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2169                                       size_t min_words,
2170                                       size_t preferred_words) {
2171   size_t before = node->committed_words();
2172 
2173   bool result = node->expand_by(min_words, preferred_words);
2174 
2175   size_t after = node->committed_words();
2176 
2177   // after and before can be the same if the memory was pre-committed.
2178   assert(after >= before, "Inconsistency");
2179   inc_committed_words(after - before);
2180 
2181   return result;
2182 }
2183 
2184 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


2270   // We must have enough space for the requested size and any
2271   // additional reqired padding chunks.
2272   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2273 
2274   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2275   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2276   if (min_word_size >= preferred_word_size) {
2277     // Can happen when humongous chunks are allocated.
2278     preferred_word_size = min_word_size;
2279   }
2280 
2281   bool expanded = expand_by(min_word_size, preferred_word_size);
2282   if (expanded) {
2283     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2284     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2285   }
2286 
2287    return next;
2288 }
2289 
2290 void VirtualSpaceList::print_on(outputStream* st) const {


2291   VirtualSpaceListIterator iter(virtual_space_list());
2292   while (iter.repeat()) {

2293     VirtualSpaceNode* node = iter.get_next();
2294     node->print_on(st);
2295   }
2296 }
2297 
2298 void VirtualSpaceList::print_map(outputStream* st) const {
2299   VirtualSpaceNode* list = virtual_space_list();
2300   VirtualSpaceListIterator iter(list);
2301   unsigned i = 0;
2302   while (iter.repeat()) {
2303     st->print_cr("Node %u:", i);
2304     VirtualSpaceNode* node = iter.get_next();
2305     node->print_map(st, this->is_class());
2306     i ++;
2307   }
2308 }
2309 
2310 // MetaspaceGC methods
2311 
2312 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2313 // Within the VM operation after the GC the attempt to allocate the metadata
2314 // should succeed.  If the GC did not free enough space for the metaspace


2961     return NULL;
2962   }
2963 
2964   assert((word_size <= chunk->word_size()) ||
2965          (list_index(chunk->word_size()) == HumongousIndex),
2966          "Non-humongous variable sized chunk");
2967   LogTarget(Debug, gc, metaspace, freelist) lt;
2968   if (lt.is_enabled()) {
2969     size_t list_count;
2970     if (list_index(word_size) < HumongousIndex) {
2971       ChunkList* list = find_free_chunks_list(word_size);
2972       list_count = list->count();
2973     } else {
2974       list_count = humongous_dictionary()->total_count();
2975     }
2976     LogStream ls(lt);
2977     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2978              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2979     ResourceMark rm;
2980     locked_print_free_chunks(&ls);

2981   }
2982 
2983   return chunk;
2984 }
2985 
2986 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2987   assert_lock_strong(MetaspaceExpand_lock);
2988   DEBUG_ONLY(do_verify_chunk(chunk);)
2989   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
2990   assert(chunk != NULL, "Expected chunk.");
2991   assert(chunk->container() != NULL, "Container should have been set.");
2992   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2993   index_bounds_check(index);
2994 
2995   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2996   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2997   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2998   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
2999 
3000   if (index != HumongousIndex) {


3055       size_chunks_returned += cur->word_size();
3056     }
3057     return_single_chunk(index, cur);
3058     cur = next;
3059   }
3060   if (log.is_enabled()) { // tracing
3061     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3062         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3063     if (index != HumongousIndex) {
3064       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3065     } else {
3066       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3067     }
3068   }
3069 }
3070 
3071 void ChunkManager::print_on(outputStream* out) const {
3072   _humongous_dictionary.report_statistics(out);
3073 }
3074 
3075 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3076   assert_lock_strong(MetaspaceExpand_lock);
3077   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3078     stat->num_by_type[i] = num_free_chunks(i);
3079     stat->single_size_by_type[i] = size_by_index(i);
3080     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3081   }
3082   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3083   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3084 }
3085 
3086 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3087   MutexLockerEx cl(MetaspaceExpand_lock,
3088                    Mutex::_no_safepoint_check_flag);
3089   locked_get_statistics(stat);
3090 }
3091 
3092 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3093   size_t total = 0;
3094   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3095 
3096   const char* unit = scale_unit(scale);
3097   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3098     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3099                    stat->num_by_type[i], chunk_size_name(i),
3100                    stat->single_size_by_type[i]);
3101     if (scale == 1) {
3102       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3103     } else {
3104       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3105     }
3106 
3107     total += stat->total_size_by_type[i];
3108   }
3109 
3110 
3111   total += stat->total_size_humongous_chunks;
3112 
3113   if (scale == 1) {
3114     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3115     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3116 
3117     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3118   } else {
3119     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3120     stat->num_humongous_chunks,
3121     (float)stat->total_size_humongous_chunks / scale, unit);
3122 
3123     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3124   }
3125 
3126 }
3127 
3128 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3129   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3130 
3131   // Note: keep lock protection only to retrieving statistics; keep printing
3132   // out of lock protection
3133   ChunkManagerStatistics stat;
3134   out->print_cr("Chunkmanager (non-class):");
3135   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3136   if (non_class_cm != NULL) {
3137     non_class_cm->get_statistics(&stat);
3138     ChunkManager::print_statistics(&stat, out, scale);
3139   } else {
3140     out->print_cr("unavailable.");
3141   }
3142   out->print_cr("Chunkmanager (class):");
3143   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3144   if (class_cm != NULL) {
3145     class_cm->get_statistics(&stat);
3146     ChunkManager::print_statistics(&stat, out, scale);
3147   } else {
3148     out->print_cr("unavailable.");
3149   }
3150 }
3151 
3152 // SpaceManager methods
3153 
3154 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3155   size_t chunk_sizes[] = {
3156       specialized_chunk_size(is_class_space),
3157       small_chunk_size(is_class_space),
3158       medium_chunk_size(is_class_space)
3159   };
3160 
3161   // Adjust up to one of the fixed chunk sizes ...
3162   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3163     if (requested <= chunk_sizes[i]) {
3164       return chunk_sizes[i];
3165     }
3166   }
3167 
3168   // ... or return the size as a humongous chunk.


3184     default:                                 requested = ClassSmallChunk; break;
3185     }
3186   } else {
3187     switch (type) {
3188     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3189     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3190     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3191     default:                                 requested = SmallChunk; break;
3192     }
3193   }
3194 
3195   // Adjust to one of the fixed chunk sizes (unless humongous)
3196   const size_t adjusted = adjust_initial_chunk_size(requested);
3197 
3198   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3199          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3200 
3201   return adjusted;
3202 }
3203 
3204 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3205   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3206   size_t free = 0;
3207   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3208     Metachunk* chunk = chunks_in_use(i);
3209     while (chunk != NULL) {
3210       free += chunk->free_word_size();
3211       chunk = chunk->next();
3212     }
3213   }
3214   return free;
3215 }
3216 
3217 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3218   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3219   size_t result = 0;
3220   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3221    result += sum_waste_in_chunks_in_use(i);
3222   }
3223 
3224   return result;
3225 }
3226 
3227 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3228   size_t result = 0;
3229   Metachunk* chunk = chunks_in_use(index);
3230   // Count the free space in all the chunk but not the
3231   // current chunk from which allocations are still being done.
3232   while (chunk != NULL) {
3233     if (chunk != current_chunk()) {
3234       result += chunk->free_word_size();
3235     }
3236     chunk = chunk->next();
3237   }
3238   return result;
3239 }
3240 
3241 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3242   // For CMS use "allocated_chunks_words()" which does not need the
3243   // Metaspace lock.  For the other collectors sum over the
3244   // lists.  Use both methods as a check that "allocated_chunks_words()"
3245   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3246   // to use in the product and allocated_chunks_words() should be used
3247   // but allow for  checking that allocated_chunks_words() returns the same
3248   // value as sum_capacity_in_chunks_in_use() which is the definitive
3249   // answer.
3250   if (UseConcMarkSweepGC) {
3251     return allocated_chunks_words();
3252   } else {
3253     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3254     size_t sum = 0;
3255     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3256       Metachunk* chunk = chunks_in_use(i);
3257       while (chunk != NULL) {
3258         sum += chunk->word_size();
3259         chunk = chunk->next();
3260       }
3261     }
3262   return sum;
3263   }
3264 }
3265 
3266 size_t SpaceManager::sum_count_in_chunks_in_use() {
3267   size_t count = 0;
3268   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3269     count = count + sum_count_in_chunks_in_use(i);
3270   }
3271 
3272   return count;
3273 }
3274 
3275 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3276   size_t count = 0;
3277   Metachunk* chunk = chunks_in_use(i);
3278   while (chunk != NULL) {
3279     count++;
3280     chunk = chunk->next();
3281   }
3282   return count;
3283 }
3284 
3285 
3286 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3287   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3288   size_t used = 0;
3289   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3290     Metachunk* chunk = chunks_in_use(i);
3291     while (chunk != NULL) {
3292       used += chunk->used_word_size();
3293       chunk = chunk->next();
3294     }
3295   }
3296   return used;
3297 }
3298 
3299 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3300 
3301   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302     Metachunk* chunk = chunks_in_use(i);
3303     st->print("SpaceManager: %s " PTR_FORMAT,
3304                  chunk_size_name(i), p2i(chunk));
3305     if (chunk != NULL) {
3306       st->print_cr(" free " SIZE_FORMAT,
3307                    chunk->free_word_size());
3308     } else {
3309       st->cr();
3310     }
3311   }
3312 
3313   chunk_manager()->locked_print_free_chunks(st);
3314   chunk_manager()->locked_print_sum_free_chunks(st);
3315 }
3316 
3317 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3318 


3410     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3411     // case it usually makes no sense to make it the current chunk, since the next allocation would
3412     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3413     // good chunk which could be used for more normal allocations.
3414     bool make_current = true;
3415     if (next->get_chunk_type() == HumongousIndex &&
3416         current_chunk() != NULL) {
3417       make_current = false;
3418     }
3419     add_chunk(next, make_current);
3420     mem = next->allocate(word_size);
3421   }
3422 
3423   // Track metaspace memory usage statistic.
3424   track_metaspace_memory_usage();
3425 
3426   return mem;
3427 }
3428 
3429 void SpaceManager::print_on(outputStream* st) const {
3430 
3431   for (ChunkIndex i = ZeroIndex;
3432        i < NumberOfInUseLists ;
3433        i = next_chunk_index(i) ) {
3434     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3435                  p2i(chunks_in_use(i)),
3436                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3437   }
3438   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3439                " Humongous " SIZE_FORMAT,
3440                sum_waste_in_chunks_in_use(SmallIndex),
3441                sum_waste_in_chunks_in_use(MediumIndex),
3442                sum_waste_in_chunks_in_use(HumongousIndex));
3443   // block free lists
3444   if (block_freelists() != NULL) {
3445     st->print_cr("total in block free lists " SIZE_FORMAT,
3446       block_freelists()->total_size());
3447   }
3448 }
3449 
3450 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3451                            Metaspace::MetaspaceType space_type,
3452                            Mutex* lock) :
3453   _mdtype(mdtype),
3454   _space_type(space_type),
3455   _allocated_blocks_words(0),
3456   _allocated_chunks_words(0),
3457   _allocated_chunks_count(0),
3458   _block_freelists(NULL),
3459   _lock(lock)
3460 {
3461   initialize();
3462 }
3463 
3464 void SpaceManager::inc_size_metrics(size_t words) {

3465   assert_lock_strong(MetaspaceExpand_lock);
3466   // Total of allocated Metachunks and allocated Metachunks count
3467   // for each SpaceManager
3468   _allocated_chunks_words = _allocated_chunks_words + words;
3469   _allocated_chunks_count++;
3470   // Global total of capacity in allocated Metachunks
3471   MetaspaceUtils::inc_capacity(mdtype(), words);
3472   // Global total of allocated Metablocks.
3473   // used_words_slow() includes the overhead in each
3474   // Metachunk so include it in the used when the
3475   // Metachunk is first added (so only added once per
3476   // Metachunk).
3477   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3478 }
3479 
3480 void SpaceManager::inc_used_metrics(size_t words) {
3481   // Add to the per SpaceManager total
3482   Atomic::add(words, &_allocated_blocks_words);
3483   // Add to the global total
3484   MetaspaceUtils::inc_used(mdtype(), words);
3485 }
3486 
3487 void SpaceManager::dec_total_from_size_metrics() {
3488   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3489   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3490   // Also deduct the overhead per Metachunk
3491   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());


3492 }
3493 
3494 void SpaceManager::initialize() {
3495   Metadebug::init_allocation_fail_alot_count();
3496   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3497     _chunks_in_use[i] = NULL;
3498   }
3499   _current_chunk = NULL;
3500   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3501 }
3502 
3503 SpaceManager::~SpaceManager() {

3504   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3505   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3506          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3507          " allocated_chunks_words() " SIZE_FORMAT,
3508          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3509 
3510   MutexLockerEx fcl(MetaspaceExpand_lock,
3511                     Mutex::_no_safepoint_check_flag);
3512 
3513   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3514          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3515          " allocated_chunks_count() " SIZE_FORMAT,
3516          sum_count_in_chunks_in_use(), allocated_chunks_count());
3517 
3518   chunk_manager()->slow_locked_verify();
3519 
3520   dec_total_from_size_metrics();
3521 
3522   Log(gc, metaspace, freelist) log;
3523   if (log.is_trace()) {
3524     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3525     ResourceMark rm;
3526     LogStream ls(log.trace());
3527     locked_print_chunks_in_use_on(&ls);
3528     if (block_freelists() != NULL) {
3529       block_freelists()->print_on(&ls);
3530     }

3531   }
3532 
3533   // Add all the chunks in use by this space manager
3534   // to the global list of free chunks.
3535 
3536   // Follow each list of chunks-in-use and add them to the
3537   // free lists.  Each list is NULL terminated.
3538 
3539   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3540     Metachunk* chunks = chunks_in_use(i);
3541     chunk_manager()->return_chunk_list(i, chunks);
3542     set_chunks_in_use(i, NULL);
3543   }
3544 
3545   chunk_manager()->slow_locked_verify();
3546 
3547   if (_block_freelists != NULL) {
3548     delete _block_freelists;
3549   }
3550 }
3551 
3552 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3553   assert_lock_strong(_lock);
3554   // Allocations and deallocations are in raw_word_size
3555   size_t raw_word_size = get_allocation_word_size(word_size);
3556   // Lazily create a block_freelist
3557   if (block_freelists() == NULL) {
3558     _block_freelists = new BlockFreelist();
3559   }
3560   block_freelists()->return_block(p, raw_word_size);

3561 }
3562 
3563 // Adds a chunk to the list of chunks in use.
3564 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3565 
3566   assert_lock_strong(_lock);
3567   assert(new_chunk != NULL, "Should not be NULL");
3568   assert(new_chunk->next() == NULL, "Should not be on a list");
3569 
3570   new_chunk->reset_empty();
3571 
3572   // Find the correct list and and set the current
3573   // chunk for that list.
3574   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3575 
3576   if (make_current) {
3577     // If we are to make the chunk current, retire the old current chunk and replace
3578     // it with the new chunk.
3579     retire_current_chunk();
3580     set_current_chunk(new_chunk);
3581   }
3582 
3583   // Add the new chunk at the head of its respective chunk list.
3584   new_chunk->set_next(chunks_in_use(index));
3585   set_chunks_in_use(index, new_chunk);
3586 
3587   // Add to the running sum of capacity
3588   inc_size_metrics(new_chunk->word_size());
3589 
3590   assert(new_chunk->is_empty(), "Not ready for reuse");
3591   Log(gc, metaspace, freelist) log;
3592   if (log.is_trace()) {
3593     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3594     ResourceMark rm;
3595     LogStream ls(log.trace());
3596     new_chunk->print_on(&ls);
3597     chunk_manager()->locked_print_free_chunks(&ls);

3598   }
3599 }
3600 
3601 void SpaceManager::retire_current_chunk() {
3602   if (current_chunk() != NULL) {
3603     size_t remaining_words = current_chunk()->free_word_size();
3604     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3605       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3606       deallocate(ptr, remaining_words);
3607       inc_used_metrics(remaining_words);
3608     }
3609   }
3610 }
3611 
3612 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3613   // Get a chunk from the chunk freelist
3614   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3615 
3616   if (next == NULL) {
3617     next = vs_list()->get_new_chunk(chunk_word_size,
3618                                     medium_chunk_bunch());
3619   }
3620 
3621   Log(gc, metaspace, alloc) log;
3622   if (log.is_debug() && next != NULL &&
3623       SpaceManager::is_humongous(next->word_size())) {
3624     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3625   }
3626 
3627   return next;
3628 }
3629 
3630 MetaWord* SpaceManager::allocate(size_t word_size) {
3631   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3632   size_t raw_word_size = get_allocation_word_size(word_size);
3633   BlockFreelist* fl =  block_freelists();
3634   MetaWord* p = NULL;



3635   // Allocation from the dictionary is expensive in the sense that
3636   // the dictionary has to be searched for a size.  Don't allocate
3637   // from the dictionary until it starts to get fat.  Is this
3638   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3639   // for allocations.  Do some profiling.  JJJ
3640   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3641     p = fl->get_block(raw_word_size);



3642   }
3643   if (p == NULL) {
3644     p = allocate_work(raw_word_size);
3645   }
3646 
3647   return p;
3648 }
3649 
3650 // Returns the address of spaced allocated for "word_size".
3651 // This methods does not know about blocks (Metablocks)
3652 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3653   assert_lock_strong(_lock);
3654 #ifdef ASSERT
3655   if (Metadebug::test_metadata_failure()) {
3656     return NULL;
3657   }
3658 #endif
3659   // Is there space in the current chunk?
3660   MetaWord* result = NULL;
3661 
3662   if (current_chunk() != NULL) {
3663     result = current_chunk()->allocate(word_size);
3664   }
3665 
3666   if (result == NULL) {
3667     result = grow_and_allocate(word_size);
3668   }
3669 
3670   if (result != NULL) {
3671     inc_used_metrics(word_size);
3672     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3673            "Head of the list is being allocated");
3674   }
3675 
3676   return result;
3677 }
3678 
3679 void SpaceManager::verify() {
3680   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3681     Metachunk* curr = chunks_in_use(i);
3682     while (curr != NULL) {
3683       DEBUG_ONLY(do_verify_chunk(curr);)
3684       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3685       curr = curr->next();
3686     }
3687   }
3688 }
3689 
3690 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3691   assert(is_humongous(chunk->word_size()) ||
3692          chunk->word_size() == medium_chunk_size() ||
3693          chunk->word_size() == small_chunk_size() ||
3694          chunk->word_size() == specialized_chunk_size(),
3695          "Chunk size is wrong");
3696   return;
3697 }
3698 
3699 #ifdef ASSERT
3700 void SpaceManager::verify_allocated_blocks_words() {
3701   // Verification is only guaranteed at a safepoint.
3702   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3703     "Verification can fail if the applications is running");
3704   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3705          "allocation total is not consistent " SIZE_FORMAT
3706          " vs " SIZE_FORMAT,
3707          allocated_blocks_words(), sum_used_in_chunks_in_use());
3708 }
3709 
3710 #endif
3711 
3712 void SpaceManager::dump(outputStream* const out) const {
3713   size_t curr_total = 0;
3714   size_t waste = 0;
3715   uint i = 0;
3716   size_t used = 0;
3717   size_t capacity = 0;
3718 
3719   // Add up statistics for all chunks in this SpaceManager.
3720   for (ChunkIndex index = ZeroIndex;
3721        index < NumberOfInUseLists;
3722        index = next_chunk_index(index)) {
3723     for (Metachunk* curr = chunks_in_use(index);
3724          curr != NULL;
3725          curr = curr->next()) {
3726       out->print("%d) ", i++);
3727       curr->print_on(out);
3728       curr_total += curr->word_size();
3729       used += curr->used_word_size();
3730       capacity += curr->word_size();
3731       waste += curr->free_word_size() + curr->overhead();;
3732     }

3733   }
3734 
3735   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3736     if (block_freelists() != NULL) block_freelists()->print_on(out);
3737   }
3738 
3739   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3740   // Free space isn't wasted.
3741   waste -= free;
3742 
3743   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3744                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3745                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3746 }
3747 
3748 // MetaspaceUtils



3749 



3750 
3751 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3752 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3753 
3754 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3755   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3756   return list == NULL ? 0 : list->free_bytes();
3757 }
3758 
3759 size_t MetaspaceUtils::free_bytes() {
3760   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3761 }
3762 
3763 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3764   assert_lock_strong(MetaspaceExpand_lock);
3765   assert(words <= capacity_words(mdtype),
3766          "About to decrement below 0: words " SIZE_FORMAT
3767          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3768          words, mdtype, capacity_words(mdtype));
3769   _capacity_words[mdtype] -= words;
3770 }
3771 
3772 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3773   assert_lock_strong(MetaspaceExpand_lock);
3774   // Needs to be atomic
3775   _capacity_words[mdtype] += words;
3776 }

3777 
3778 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3779   assert(words <= used_words(mdtype),
3780          "About to decrement below 0: words " SIZE_FORMAT
3781          " is greater than _used_words[%u] " SIZE_FORMAT,
3782          words, mdtype, used_words(mdtype));
3783   // For CMS deallocation of the Metaspaces occurs during the
3784   // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
3785   // is not enough since allocation is on a per Metaspace basis
3786   // and protected by the Metaspace lock.
3787   Atomic::sub(words, &_used_words[mdtype]);
3788 }
3789 
3790 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3791   // _used_words tracks allocations for
3792   // each piece of metadata.  Those allocations are
3793   // generally done concurrently by different application
3794   // threads so must be done atomically.
3795   Atomic::add(words, &_used_words[mdtype]);
3796 }
3797 
3798 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3799   size_t used = 0;








3800   ClassLoaderDataGraphMetaspaceIterator iter;
3801   while (iter.repeat()) {
3802     ClassLoaderMetaspace* msp = iter.get_next();
3803     // Sum allocated_blocks_words for each metaspace
3804     if (msp != NULL) {
3805       used += msp->used_words_slow(mdtype);
3806     }
3807   }
3808   return used * BytesPerWord;
3809 }
3810 
3811 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3812   size_t free = 0;
3813   ClassLoaderDataGraphMetaspaceIterator iter;
3814   while (iter.repeat()) {
3815     ClassLoaderMetaspace* msp = iter.get_next();
3816     if (msp != NULL) {
3817       free += msp->free_words_slow(mdtype);
3818     }
3819   }
3820   return free * BytesPerWord;
3821 }
3822 
3823 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3824   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3825     return 0;
3826   }
3827   // Don't count the space in the freelists.  That space will be
3828   // added to the capacity calculation as needed.
3829   size_t capacity = 0;
3830   ClassLoaderDataGraphMetaspaceIterator iter;
3831   while (iter.repeat()) {
3832     ClassLoaderMetaspace* msp = iter.get_next();
3833     if (msp != NULL) {
3834       capacity += msp->capacity_words_slow(mdtype);
3835     }
3836   }
3837   return capacity * BytesPerWord;
3838 }
3839 
3840 size_t MetaspaceUtils::capacity_bytes_slow() {
3841 #ifdef PRODUCT
3842   // Use capacity_bytes() in PRODUCT instead of this function.
3843   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3844 #endif
3845   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3846   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3847   assert(capacity_bytes() == class_capacity + non_class_capacity,
3848          "bad accounting: capacity_bytes() " SIZE_FORMAT
3849          " class_capacity + non_class_capacity " SIZE_FORMAT
3850          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3851          capacity_bytes(), class_capacity + non_class_capacity,
3852          class_capacity, non_class_capacity);
3853 
3854   return class_capacity + non_class_capacity;





































3855 }
3856 
3857 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3858   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3859   return list == NULL ? 0 : list->reserved_bytes();
3860 }
3861 
3862 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3863   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3864   return list == NULL ? 0 : list->committed_bytes();
3865 }
3866 
3867 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3868 
3869 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3870   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3871   if (chunk_manager == NULL) {
3872     return 0;
3873   }
3874   chunk_manager->slow_verify();


3916                 "reserved "  SIZE_FORMAT "K",
3917                 used_bytes()/K,
3918                 capacity_bytes()/K,
3919                 committed_bytes()/K,
3920                 reserved_bytes()/K);
3921 
3922   if (Metaspace::using_class_space()) {
3923     Metaspace::MetadataType ct = Metaspace::ClassType;
3924     out->print_cr("  class space    "
3925                   "used "      SIZE_FORMAT "K, "
3926                   "capacity "  SIZE_FORMAT "K, "
3927                   "committed " SIZE_FORMAT "K, "
3928                   "reserved "  SIZE_FORMAT "K",
3929                   used_bytes(ct)/K,
3930                   capacity_bytes(ct)/K,
3931                   committed_bytes(ct)/K,
3932                   reserved_bytes(ct)/K);
3933   }
3934 }
3935 
3936 // Print information for class space and data space separately.
3937 // This is almost the same as above.
3938 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3939   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3940   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3941   size_t used_bytes = used_bytes_slow(mdtype);
3942   size_t free_bytes = free_bytes_slow(mdtype);
3943   size_t used_and_free = used_bytes + free_bytes +
3944                            free_chunks_capacity_bytes;
3945   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3946              "K + unused in chunks " SIZE_FORMAT "K  + "
3947              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3948              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3949              used_bytes / K,
3950              free_bytes / K,
3951              free_chunks_capacity_bytes / K,
3952              used_and_free / K,
3953              capacity_bytes / K);
3954   // Accounting can only be correct if we got the values during a safepoint
3955   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3956 }
3957 
3958 // Print total fragmentation for class metaspaces
3959 void MetaspaceUtils::print_class_waste(outputStream* out) {
3960   assert(Metaspace::using_class_space(), "class metaspace not used");
3961   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3962   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3963   ClassLoaderDataGraphMetaspaceIterator iter;
3964   while (iter.repeat()) {
3965     ClassLoaderMetaspace* msp = iter.get_next();
3966     if (msp != NULL) {
3967       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3968       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3969       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3970       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3971       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3972       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3973       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3974     }
3975   }
3976   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3977                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3978                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3979                 "large count " SIZE_FORMAT,
3980                 cls_specialized_count, cls_specialized_waste,
3981                 cls_small_count, cls_small_waste,
3982                 cls_medium_count, cls_medium_waste, cls_humongous_count);
3983 }
3984 
3985 // Print total fragmentation for data and class metaspaces separately
3986 void MetaspaceUtils::print_waste(outputStream* out) {
3987   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3988   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3989 
3990   ClassLoaderDataGraphMetaspaceIterator iter;
3991   while (iter.repeat()) {
3992     ClassLoaderMetaspace* msp = iter.get_next();
3993     if (msp != NULL) {
3994       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3995       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3996       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3997       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3998       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3999       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4000       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4001     }
4002   }
4003   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4004   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4005                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4006                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4007                         "large count " SIZE_FORMAT,
4008              specialized_count, specialized_waste, small_count,
4009              small_waste, medium_count, medium_waste, humongous_count);
4010   if (Metaspace::using_class_space()) {
4011     print_class_waste(out);
4012   }
4013 }
4014 
4015 class MetadataStats {
4016 private:
4017   size_t _capacity;
4018   size_t _used;
4019   size_t _free;
4020   size_t _waste;
4021 
4022 public:
4023   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4024   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4025   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4026 
4027   void add(const MetadataStats& stats) {
4028     _capacity += stats.capacity();
4029     _used += stats.used();
4030     _free += stats.free();
4031     _waste += stats.waste();
4032   }
4033 
4034   size_t capacity() const { return _capacity; }
4035   size_t used() const     { return _used; }
4036   size_t free() const     { return _free; }
4037   size_t waste() const    { return _waste; }
4038 
4039   void print_on(outputStream* out, size_t scale) const;
4040 };
4041 
4042 
4043 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4044   const char* unit = scale_unit(scale);
4045   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4046     (float)capacity() / scale, unit,
4047     (float)used() / scale, unit,
4048     (float)free() / scale, unit,
4049     (float)waste() / scale, unit);
4050 }
4051 
4052 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4053 private:
4054   outputStream*  _out;
4055   size_t         _scale;
4056 
4057   size_t         _total_count;
4058   MetadataStats  _total_metadata;
4059   MetadataStats  _total_class;
4060 
4061   size_t         _total_anon_count;
4062   MetadataStats  _total_anon_metadata;
4063   MetadataStats  _total_anon_class;
4064 
4065 public:
4066   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4067   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4068 
4069   ~PrintCLDMetaspaceInfoClosure() {
4070     print_summary();
4071   }
4072 
4073   void do_cld(ClassLoaderData* cld) {

4074     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4075 
4076     if (cld->is_unloading()) return;
4077     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4078     if (msp == NULL) {
4079       return;
4080     }
4081 
4082     bool anonymous = false;














4083     if (cld->is_anonymous()) {
4084       _out->print_cr("ClassLoader: for anonymous class");
4085       anonymous = true;
4086     } else {
4087       ResourceMark rm;
4088       _out->print_cr("ClassLoader: %s", cld->loader_name());
4089     }
4090 
4091     print_metaspace(msp, anonymous);




4092     _out->cr();

4093   }
4094 
4095 private:
4096   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4097   void print_summary() const;
4098 };
4099 
4100 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4101   assert(msp != NULL, "Sanity");
4102   SpaceManager* vsm = msp->vsm();
4103   const char* unit = scale_unit(_scale);
4104 
4105   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4106   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4107   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4108   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4109 
4110   _total_count ++;
4111   MetadataStats metadata_stats(capacity, used, free, waste);
4112   _total_metadata.add(metadata_stats);
4113 
4114   if (anonymous) {
4115     _total_anon_count ++;
4116     _total_anon_metadata.add(metadata_stats);
4117   }




4118 
4119   _out->print("  Metadata   ");
4120   metadata_stats.print_on(_out, _scale);
























4121 
4122   if (Metaspace::using_class_space()) {
4123     vsm = msp->class_vsm();


















4124 
4125     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4126     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4127     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4128     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;











4129 
4130     MetadataStats class_stats(capacity, used, free, waste);
4131     _total_class.add(class_stats);
4132 
4133     if (anonymous) {
4134       _total_anon_class.add(class_stats);







4135     }
4136 
4137     _out->print("  Class data ");
4138     class_stats.print_on(_out, _scale);








4139   }













4140 }
4141 
4142 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4143   const char* unit = scale_unit(_scale);
4144   _out->cr();
4145   _out->print_cr("Summary:");



































4146 
4147   MetadataStats total;
4148   total.add(_total_metadata);
4149   total.add(_total_class);
4150 
4151   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4152   total.print_on(_out, _scale);



4153 
4154   _out->print("                    Metadata ");
4155   _total_metadata.print_on(_out, _scale);


4156 
4157   if (Metaspace::using_class_space()) {
4158     _out->print("                  Class data ");
4159     _total_class.print_on(_out, _scale);
4160   }
4161   _out->cr();






4162 
4163   MetadataStats total_anon;
4164   total_anon.add(_total_anon_metadata);
4165   total_anon.add(_total_anon_class);

4166 
4167   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4168   total_anon.print_on(_out, _scale);








4169 
4170   _out->print("                    Metadata ");
4171   _total_anon_metadata.print_on(_out, _scale);




4172 
4173   if (Metaspace::using_class_space()) {
4174     _out->print("                  Class data ");
4175     _total_anon_class.print_on(_out, _scale);
4176   }
4177 }
4178 
4179 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4180   const char* unit = scale_unit(scale);
4181   out->print_cr("Metaspaces:");
4182   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4183     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4184     committed_bytes(Metaspace::NonClassType) / scale, unit);
4185   if (Metaspace::using_class_space()) {
4186     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4187     reserved_bytes(Metaspace::ClassType) / scale, unit,
4188     committed_bytes(Metaspace::ClassType) / scale, unit);

4189   }
4190 

4191   out->cr();
4192   ChunkManager::print_all_chunkmanagers(out, scale);



















4193 





4194   out->cr();
4195   out->print_cr("Per-classloader metadata:");

4196   out->cr();
4197 
4198   PrintCLDMetaspaceInfoClosure cl(out, scale);
4199   ClassLoaderDataGraph::cld_do(&cl);
4200 }




4201 











4202 
4203 // Dump global metaspace things from the end of ClassLoaderDataGraph
4204 void MetaspaceUtils::dump(outputStream* out) {
4205   out->print_cr("All Metaspace:");
4206   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4207   out->print("class space: "); print_on(out, Metaspace::ClassType);
4208   print_waste(out);
4209 }






































4210 
4211 // Prints an ASCII representation of the given space.
4212 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4213   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4214   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4215   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4216   if (vsl != NULL) {
4217     if (for_class) {
4218       if (!Metaspace::using_class_space()) {
4219         out->print_cr("No Class Space.");
4220         return;
4221       }
4222       out->print_raw("---- Metaspace Map (Class Space) ----");
4223     } else {
4224       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4225     }
4226     // Print legend:
4227     out->cr();
4228     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4229     out->cr();
4230     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4231     vsl->print_map(out);
4232     out->cr();
4233   }
4234 }
4235 
4236 void MetaspaceUtils::verify_free_chunks() {
4237   Metaspace::chunk_manager_metadata()->verify();
4238   if (Metaspace::using_class_space()) {
4239     Metaspace::chunk_manager_class()->verify();
4240   }
4241 }
4242 
4243 void MetaspaceUtils::verify_capacity() {
4244 #ifdef ASSERT
4245   size_t running_sum_capacity_bytes = capacity_bytes();
4246   // For purposes of the running sum of capacity, verify against capacity
4247   size_t capacity_in_use_bytes = capacity_bytes_slow();
4248   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4249          "capacity_words() * BytesPerWord " SIZE_FORMAT
4250          " capacity_bytes_slow()" SIZE_FORMAT,
4251          running_sum_capacity_bytes, capacity_in_use_bytes);
4252   for (Metaspace::MetadataType i = Metaspace::ClassType;
4253        i < Metaspace:: MetadataTypeCount;
4254        i = (Metaspace::MetadataType)(i + 1)) {
4255     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4256     assert(capacity_bytes(i) == capacity_in_use_bytes,
4257            "capacity_bytes(%u) " SIZE_FORMAT
4258            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4259            i, capacity_bytes(i), i, capacity_in_use_bytes);








4260   }
4261 #endif
4262 }
4263 
4264 void MetaspaceUtils::verify_used() {
4265 #ifdef ASSERT
4266   size_t running_sum_used_bytes = used_bytes();
4267   // For purposes of the running sum of used, verify against used
4268   size_t used_in_use_bytes = used_bytes_slow();
4269   assert(used_bytes() == used_in_use_bytes,
4270          "used_bytes() " SIZE_FORMAT
4271          " used_bytes_slow()" SIZE_FORMAT,
4272          used_bytes(), used_in_use_bytes);
4273   for (Metaspace::MetadataType i = Metaspace::ClassType;
4274        i < Metaspace:: MetadataTypeCount;
4275        i = (Metaspace::MetadataType)(i + 1)) {
4276     size_t used_in_use_bytes = used_bytes_slow(i);
4277     assert(used_bytes(i) == used_in_use_bytes,
4278            "used_bytes(%u) " SIZE_FORMAT
4279            " used_bytes_slow(%u)" SIZE_FORMAT,
4280            i, used_bytes(i), i, used_in_use_bytes);
4281   }

4282 #endif
4283 }
4284 
4285 void MetaspaceUtils::verify_metrics() {
4286   verify_capacity();
4287   verify_used();
4288 }
4289 
4290 
4291 // Metaspace methods
4292 
4293 size_t Metaspace::_first_chunk_word_size = 0;
4294 size_t Metaspace::_first_class_chunk_word_size = 0;
4295 
4296 size_t Metaspace::_commit_alignment = 0;
4297 size_t Metaspace::_reserve_alignment = 0;
4298 
4299 VirtualSpaceList* Metaspace::_space_list = NULL;
4300 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4301 
4302 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4303 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4304 
4305 #define VIRTUALSPACEMULTIPLIER 2
4306 
4307 #ifdef _LP64
4308 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4309 


4468   // If we got here then the metaspace got allocated.
4469   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4470 
4471 #if INCLUDE_CDS
4472   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4473   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4474     FileMapInfo::stop_sharing_and_unmap(
4475         "Could not allocate metaspace at a compatible address");
4476   }
4477 #endif
4478   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4479                                   UseSharedSpaces ? (address)cds_base : 0);
4480 
4481   initialize_class_space(metaspace_rs);
4482 
4483   LogTarget(Trace, gc, metaspace) lt;
4484   if (lt.is_enabled()) {
4485     ResourceMark rm;
4486     LogStream ls(lt);
4487     print_compressed_class_space(&ls, requested_addr);

4488   }
4489 }
4490 
4491 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4492   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4493                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4494   if (_class_space_list != NULL) {
4495     address base = (address)_class_space_list->current_virtual_space()->bottom();
4496     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4497                  compressed_class_space_size(), p2i(base));
4498     if (requested_addr != 0) {
4499       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4500     }
4501     st->cr();
4502   }
4503 }
4504 
4505 // For UseCompressedClassPointers the class space is reserved above the top of
4506 // the Java heap.  The argument passed in is at the base of the compressed space.
4507 void Metaspace::initialize_class_space(ReservedSpace rs) {


4689 
4690   // Zero initialize.
4691   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4692 
4693   return result;
4694 }
4695 
4696 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4697   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4698 
4699   // If result is still null, we are out of memory.
4700   Log(gc, metaspace, freelist) log;
4701   if (log.is_info()) {
4702     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4703              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4704     ResourceMark rm;
4705     if (log.is_debug()) {
4706       if (loader_data->metaspace_or_null() != NULL) {
4707         LogStream ls(log.debug());
4708         loader_data->print_value_on(&ls);

4709       }
4710     }
4711     LogStream ls(log.info());
4712     MetaspaceUtils::dump(&ls);
4713     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4714     ChunkManager::print_all_chunkmanagers(&ls);
4715   }
4716 
4717   bool out_of_compressed_class_space = false;
4718   if (is_class_space_allocation(mdtype)) {
4719     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4720     out_of_compressed_class_space =
4721       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4722       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4723       CompressedClassSpaceSize;
4724   }
4725 
4726   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4727   const char* space_string = out_of_compressed_class_space ?
4728     "Compressed class space" : "Metaspace";
4729 
4730   report_java_out_of_memory(space_string);
4731 
4732   if (JvmtiExport::should_post_resource_exhausted()) {
4733     JvmtiExport::post_resource_exhausted(
4734         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


4769   }
4770 }
4771 
4772 bool Metaspace::contains(const void* ptr) {
4773   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4774     return true;
4775   }
4776   return contains_non_shared(ptr);
4777 }
4778 
4779 bool Metaspace::contains_non_shared(const void* ptr) {
4780   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4781      return true;
4782   }
4783 
4784   return get_space_list(NonClassType)->contains(ptr);
4785 }
4786 
4787 // ClassLoaderMetaspace
4788 
4789 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {





4790   initialize(lock, type);
4791 }
4792 
4793 ClassLoaderMetaspace::~ClassLoaderMetaspace() {

4794   delete _vsm;
4795   if (Metaspace::using_class_space()) {
4796     delete _class_vsm;
4797   }
4798 }

4799 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4800   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4801   if (chunk != NULL) {
4802     // Add to this manager's list of chunks in use and make it the current_chunk().
4803     get_space_manager(mdtype)->add_chunk(chunk, true);
4804   }
4805 }
4806 
4807 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4808   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4809 
4810   // Get a chunk from the chunk freelist
4811   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4812 
4813   if (chunk == NULL) {
4814     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4815                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4816   }
4817 
4818   return chunk;
4819 }
4820 
4821 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4822   Metaspace::verify_global_initialization();
4823 


4824   // Allocate SpaceManager for metadata objects.
4825   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4826 
4827   if (Metaspace::using_class_space()) {
4828     // Allocate SpaceManager for classes.
4829     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4830   }
4831 
4832   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4833 
4834   // Allocate chunk for metadata objects
4835   initialize_first_chunk(type, Metaspace::NonClassType);
4836 
4837   // Allocate chunk for class metadata objects
4838   if (Metaspace::using_class_space()) {
4839     initialize_first_chunk(type, Metaspace::ClassType);
4840   }
4841 }
4842 
4843 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4844   Metaspace::assert_not_frozen();



4845   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4846   if (Metaspace::is_class_space_allocation(mdtype)) {
4847     return  class_vsm()->allocate(word_size);
4848   } else {
4849     return  vsm()->allocate(word_size);
4850   }
4851 }
4852 
4853 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4854   Metaspace::assert_not_frozen();
4855   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4856   assert(delta_bytes > 0, "Must be");
4857 
4858   size_t before = 0;
4859   size_t after = 0;
4860   MetaWord* res;
4861   bool incremented;
4862 
4863   // Each thread increments the HWM at most once. Even if the thread fails to increment
4864   // the HWM, an allocation is still attempted. This is because another thread must then
4865   // have incremented the HWM and therefore the allocation might still succeed.
4866   do {
4867     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4868     res = allocate(word_size, mdtype);
4869   } while (!incremented && res == NULL);
4870 
4871   if (incremented) {
4872     Metaspace::tracer()->report_gc_threshold(before, after,
4873                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4874     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4875   }
4876 
4877   return res;
4878 }
4879 
4880 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4881   if (mdtype == Metaspace::ClassType) {
4882     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4883   } else {
4884     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4885   }
4886 }
4887 
4888 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4889   Metaspace::assert_not_frozen();
4890   if (mdtype == Metaspace::ClassType) {
4891     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4892   } else {
4893     return vsm()->sum_free_in_chunks_in_use();
4894   }
4895 }
4896 
4897 // Space capacity in the Metaspace.  It includes
4898 // space in the list of chunks from which allocations
4899 // have been made. Don't include space in the global freelist and
4900 // in the space available in the dictionary which
4901 // is already counted in some chunk.
4902 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4903   if (mdtype == Metaspace::ClassType) {
4904     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4905   } else {
4906     return vsm()->sum_capacity_in_chunks_in_use();
4907   }
4908 }
4909 
4910 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4911   return used_words_slow(mdtype) * BytesPerWord;
4912 }
4913 
4914 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4915   return capacity_words_slow(mdtype) * BytesPerWord;
4916 }
4917 
4918 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4919   return vsm()->allocated_blocks_bytes() +
4920       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4921 }
4922 
4923 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4924   return vsm()->allocated_chunks_bytes() +
4925       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4926 }
4927 
4928 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4929   Metaspace::assert_not_frozen();
4930   assert(!SafepointSynchronize::is_at_safepoint()
4931          || Thread::current()->is_VM_thread(), "should be the VM thread");
4932 


4933   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4934 
4935   if (is_class && Metaspace::using_class_space()) {
4936     class_vsm()->deallocate(ptr, word_size);
4937   } else {
4938     vsm()->deallocate(ptr, word_size);
4939   }
4940 }
4941 
4942 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4943   assert(Metaspace::using_class_space(), "Has to use class space");
4944   return class_vsm()->calc_chunk_size(word_size);
4945 }
4946 
4947 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4948   // Print both class virtual space counts and metaspace.
4949   if (Verbose) {
4950     vsm()->print_on(out);
4951     if (Metaspace::using_class_space()) {
4952       class_vsm()->print_on(out);
4953     }
4954   }
4955 }
4956 
4957 void ClassLoaderMetaspace::verify() {
4958   vsm()->verify();
4959   if (Metaspace::using_class_space()) {
4960     class_vsm()->verify();
4961   }
4962 }
4963 
4964 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4965   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4966   vsm()->dump(out);
4967   if (Metaspace::using_class_space()) {
4968     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4969     class_vsm()->dump(out);
4970   }
4971 }
4972 
4973 



4974 
4975 #ifdef ASSERT
4976 static void do_verify_chunk(Metachunk* chunk) {
4977   guarantee(chunk != NULL, "Sanity");
4978   // Verify chunk itself; then verify that it is consistent with the
4979   // occupany map of its containing node.
4980   chunk->verify();
4981   VirtualSpaceNode* const vsn = chunk->container();
4982   OccupancyMap* const ocmap = vsn->occupancy_map();
4983   ocmap->verify_for_chunk(chunk);
4984 }
4985 #endif
4986 
4987 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4988   chunk->set_is_tagged_free(!inuse);
4989   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4990   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4991 }
4992 
4993 /////////////// Unit tests ///////////////


5299     test_adjust_initial_chunk_size(false);
5300     test_adjust_initial_chunk_size(true);
5301   }
5302 };
5303 
5304 void SpaceManager_test_adjust_initial_chunk_size() {
5305   SpaceManagerTest::test_adjust_initial_chunk_size();
5306 }
5307 
5308 #endif // ASSERT
5309 
5310 struct chunkmanager_statistics_t {
5311   int num_specialized_chunks;
5312   int num_small_chunks;
5313   int num_medium_chunks;
5314   int num_humongous_chunks;
5315 };
5316 
5317 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5318   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5319   ChunkManager::ChunkManagerStatistics stat;
5320   chunk_manager->get_statistics(&stat);
5321   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5322   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5323   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5324   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5325 }
5326 
5327 struct chunk_geometry_t {
5328   size_t specialized_chunk_word_size;
5329   size_t small_chunk_word_size;
5330   size_t medium_chunk_word_size;
5331 };
5332 
5333 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5334   if (mdType == Metaspace::NonClassType) {
5335     out->specialized_chunk_word_size = SpecializedChunk;
5336     out->small_chunk_word_size = SmallChunk;
5337     out->medium_chunk_word_size = MediumChunk;
5338   } else {
5339     out->specialized_chunk_word_size = ClassSpecializedChunk;
5340     out->small_chunk_word_size = ClassSmallChunk;
5341     out->medium_chunk_word_size = ClassMediumChunk;
5342   }
5343 }


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/metaspaceStatistics.hpp"
  38 #include "memory/metaspaceGCThresholdUpdater.hpp"
  39 #include "memory/metaspaceShared.hpp"
  40 #include "memory/metaspaceTracer.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutex.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "runtime/orderAccess.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 using namespace metaspace::internals;
  58 
  59 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  60 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  61 
  62 // Helper function that does a bunch of checks for a chunk.
  63 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  64 
  65 // Given a Metachunk, update its in-use information (both in the
  66 // chunk and the occupancy map).
  67 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  68 
  69 size_t const allocation_from_dictionary_limit = 4 * K;
  70 
  71 MetaWord* last_allocated = 0;
  72 
  73 size_t Metaspace::_compressed_class_space_size;
  74 const MetaspaceTracer* Metaspace::_tracer = NULL;
  75 
  76 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  77 
  78 // Internal statistics.
  79 #ifdef ASSERT
  80 static struct {
  81   // Number of allocations.
  82   uintx num_allocs;
  83   // Number of times a ClassLoaderMetaspace was born...
  84   uintx num_metaspace_births;
  85   // ... and died.
  86   uintx num_metaspace_deaths;
  87   // Number of times VirtualSpaceListNodes were created...
  88   uintx num_vsnodes_created;
  89   // ... and purged.
  90   uintx num_vsnodes_purged;
  91   // Number of times we expanded the committed section of the space.
  92   uintx num_committed_space_expanded;
  93   // Number of deallocations
  94   uintx num_deallocs;
  95   // Number of deallocations triggered from outside ("real" deallocations).
  96   uintx num_external_deallocs;
  97   // Number of times an allocation was satisfied from deallocated blocks.
  98   uintx num_allocs_from_deallocated_blocks;
  99 } g_internal_statistics;
 100 #endif
 101 
 102 enum ChunkSizes {    // in words.
 103   ClassSpecializedChunk = 128,
 104   SpecializedChunk = 128,
 105   ClassSmallChunk = 256,
 106   SmallChunk = 512,
 107   ClassMediumChunk = 4 * K,
 108   MediumChunk = 8 * K
 109 };
 110 
 111 // Returns size of this chunk type.
 112 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 113   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 114   size_t size = 0;
 115   if (is_class) {
 116     switch(chunktype) {
 117       case SpecializedIndex: size = ClassSpecializedChunk; break;
 118       case SmallIndex: size = ClassSmallChunk; break;
 119       case MediumIndex: size = ClassMediumChunk; break;
 120       default:
 121         ShouldNotReachHere();


 145       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 146       return HumongousIndex;
 147     }
 148   } else {
 149     if (size == SpecializedChunk) {
 150       return SpecializedIndex;
 151     } else if (size == SmallChunk) {
 152       return SmallIndex;
 153     } else if (size == MediumChunk) {
 154       return MediumIndex;
 155     } else if (size > MediumChunk) {
 156       // A valid humongous chunk size is a multiple of the smallest chunk size.
 157       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 158       return HumongousIndex;
 159     }
 160   }
 161   ShouldNotReachHere();
 162   return (ChunkIndex)-1;
 163 }
 164 
 165 ChunkIndex next_chunk_index(ChunkIndex i) {

 166   assert(i < NumberOfInUseLists, "Out of bound");
 167   return (ChunkIndex) (i+1);
 168 }
 169 
 170 ChunkIndex prev_chunk_index(ChunkIndex i) {
 171   assert(i > ZeroIndex, "Out of bound");
 172   return (ChunkIndex) (i-1);
 173 }
 174 
 175 static const char* space_type_name(Metaspace::MetaspaceType t) {
 176   const char* s = NULL;
 177   switch (t) {
 178   case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 179   case Metaspace::BootMetaspaceType: s = "Boot"; break;
 180   case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 181   case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 182   default: ShouldNotReachHere();

 183   }
 184   assert(s != NULL, "Invalid space type");
 185   return s;
 186 }
 187 
 188 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 189 uint MetaspaceGC::_shrink_factor = 0;
 190 bool MetaspaceGC::_should_concurrent_collect = false;
 191 
 192 
 193 typedef class FreeList<Metachunk> ChunkList;
 194 
 195 // Manages the global free lists of chunks.
 196 class ChunkManager : public CHeapObj<mtInternal> {
 197   friend class TestVirtualSpaceNodeTest;
 198 
 199   // Free list of chunks of different sizes.
 200   //   SpecializedChunk
 201   //   SmallChunk
 202   //   MediumChunk
 203   ChunkList _free_chunks[NumberOfFreeLists];
 204 
 205   // Whether or not this is the class chunkmanager.
 206   const bool _is_class;
 207 
 208   // Return non-humongous chunk list by its index.
 209   ChunkList* free_chunks(ChunkIndex index);
 210 
 211   // Returns non-humongous chunk list for the given chunk word size.
 212   ChunkList* find_free_chunks_list(size_t word_size);


 253 
 254   // Helper for chunk merging:
 255   //  Given an address range with 1-n chunks which are all supposed to be
 256   //  free and hence currently managed by this ChunkManager, remove them
 257   //  from this ChunkManager and mark them as invalid.
 258   // - This does not correct the occupancy map.
 259   // - This does not adjust the counters in ChunkManager.
 260   // - Does not adjust container count counter in containing VirtualSpaceNode.
 261   // Returns number of chunks removed.
 262   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 263 
 264   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 265   // split up the larger chunk into n smaller chunks, at least one of which should be
 266   // the target chunk of target chunk size. The smaller chunks, including the target
 267   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 268   // Note that this chunk is supposed to be removed from the freelist right away.
 269   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 270 
 271  public:
 272 













 273   ChunkManager(bool is_class)
 274       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 275     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 276     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 277     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 278   }
 279 
 280   // Add or delete (return) a chunk to the global freelist.
 281   Metachunk* chunk_freelist_allocate(size_t word_size);
 282 
 283   // Map a size to a list index assuming that there are lists
 284   // for special, small, medium, and humongous chunks.
 285   ChunkIndex list_index(size_t size);
 286 
 287   // Map a given index to the chunk size.
 288   size_t size_by_index(ChunkIndex index) const;
 289 
 290   bool is_class() const { return _is_class; }
 291 
 292   // Convenience accessors.


 358   // Debug support
 359   void verify();
 360   void slow_verify() {
 361     if (VerifyMetaspace) {
 362       verify();
 363     }
 364   }
 365   void locked_verify();
 366   void slow_locked_verify() {
 367     if (VerifyMetaspace) {
 368       locked_verify();
 369     }
 370   }
 371   void verify_free_chunks_total();
 372 
 373   void locked_print_free_chunks(outputStream* st);
 374   void locked_print_sum_free_chunks(outputStream* st);
 375 
 376   void print_on(outputStream* st) const;
 377 
 378   // Fill in current statistic values to the given statistics object.
 379   void collect_statistics(ChunkManagerStatistics* out) const;
 380 
 381 };
 382 
 383 class SmallBlocks : public CHeapObj<mtClass> {
 384   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 385   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 386 
 387  private:
 388   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 389 
 390   FreeList<Metablock>& list_at(size_t word_size) {
 391     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 392     return _small_lists[word_size - _small_block_min_size];
 393   }
 394 
 395  public:
 396   SmallBlocks() {
 397     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 398       uint k = i - _small_block_min_size;
 399       _small_lists[k].set_size(i);
 400     }
 401   }
 402 
 403   // Returns the total size, in words, of all blocks, across all block sizes.
 404   size_t total_size() const {
 405     size_t result = 0;
 406     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 407       uint k = i - _small_block_min_size;
 408       result = result + _small_lists[k].count() * _small_lists[k].size();
 409     }
 410     return result;
 411   }
 412 
 413   // Returns the total number of all blocks across all block sizes.
 414   uintx total_num_blocks() const {
 415     uintx result = 0;
 416     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 417       uint k = i - _small_block_min_size;
 418       result = result + _small_lists[k].count();
 419     }
 420     return result;
 421   }
 422 
 423   static uint small_block_max_size() { return _small_block_max_size; }
 424   static uint small_block_min_size() { return _small_block_min_size; }
 425 
 426   MetaWord* get_block(size_t word_size) {
 427     if (list_at(word_size).count() > 0) {
 428       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 429       return new_block;
 430     } else {
 431       return NULL;
 432     }
 433   }
 434   void return_block(Metablock* free_chunk, size_t word_size) {
 435     list_at(word_size).return_chunk_at_head(free_chunk, false);
 436     assert(list_at(word_size).count() > 0, "Should have a chunk");
 437   }
 438 
 439   void print_on(outputStream* st) const {
 440     st->print_cr("SmallBlocks:");
 441     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 442       uint k = i - _small_block_min_size;


 455   // is at least 1/4th the size of the available block.
 456   const static int WasteMultiplier = 4;
 457 
 458   // Accessors
 459   BlockTreeDictionary* dictionary() const { return _dictionary; }
 460   SmallBlocks* small_blocks() {
 461     if (_small_blocks == NULL) {
 462       _small_blocks = new SmallBlocks();
 463     }
 464     return _small_blocks;
 465   }
 466 
 467  public:
 468   BlockFreelist();
 469   ~BlockFreelist();
 470 
 471   // Get and return a block to the free list
 472   MetaWord* get_block(size_t word_size);
 473   void return_block(MetaWord* p, size_t word_size);
 474 
 475   // Returns the total size, in words, of all blocks kept in this structure.
 476   size_t total_size() const  {
 477     size_t result = dictionary()->total_size();
 478     if (_small_blocks != NULL) {
 479       result = result + _small_blocks->total_size();
 480     }
 481     return result;
 482   }
 483 
 484   // Returns the number of all blocks kept in this structure.
 485   uintx num_blocks() const {
 486     uintx result = dictionary()->total_free_blocks();
 487     if (_small_blocks != NULL) {
 488       result = result + _small_blocks->total_num_blocks();
 489     }
 490     return result;
 491   }
 492 
 493   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 494   void print_on(outputStream* st) const;
 495 };
 496 
 497 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 498 template <typename T> struct all_ones  { static const T value; };
 499 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 500 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 501 
 502 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 503 // keeps information about
 504 // - where a chunk starts
 505 // - whether a chunk is in-use or free
 506 // A bit in this bitmap represents one range of memory in the smallest
 507 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 508 class OccupancyMap : public CHeapObj<mtInternal> {
 509 
 510   // The address range this map covers.
 511   const MetaWord* const _reference_address;
 512   const size_t _word_size;


 878 
 879   // Allocate a chunk from the virtual space and return it.
 880   Metachunk* get_chunk_vs(size_t chunk_word_size);
 881 
 882   // Expands/shrinks the committed space in a virtual space.  Delegates
 883   // to Virtualspace
 884   bool expand_by(size_t min_words, size_t preferred_words);
 885 
 886   // In preparation for deleting this node, remove all the chunks
 887   // in the node from any freelist.
 888   void purge(ChunkManager* chunk_manager);
 889 
 890   // If an allocation doesn't fit in the current node a new node is created.
 891   // Allocate chunks out of the remaining committed space in this node
 892   // to avoid wasting that memory.
 893   // This always adds up because all the chunk sizes are multiples of
 894   // the smallest chunk size.
 895   void retire(ChunkManager* chunk_manager);
 896 
 897 
 898   void print_on(outputStream* st) const                 { print_on(st, K); }
 899   void print_on(outputStream* st, size_t scale) const;
 900   void print_map(outputStream* st, bool is_class) const;
 901 
 902   // Debug support
 903   DEBUG_ONLY(void mangle();)
 904   // Verify counters, all chunks in this list node and the occupancy map.
 905   DEBUG_ONLY(void verify();)
 906   // Verify that all free chunks in this node are ideally merged
 907   // (there not should be multiple small chunks where a large chunk could exist.)
 908   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 909 
 910 };
 911 
 912 #define assert_is_aligned(value, alignment)                  \
 913   assert(is_aligned((value), (alignment)),                   \
 914          SIZE_FORMAT_HEX " is not aligned to "               \
 915          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 916 
 917 #define assert_counter(expected_value, real_value, msg) \
 918   assert( (expected_value) == (real_value),             \
 919          "Counter mismatch (%s): expected " SIZE_FORMAT \
 920          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
 921          real_value);
 922 
 923 // Decide if large pages should be committed when the memory is reserved.
 924 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 925   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 926     size_t words = bytes / BytesPerWord;
 927     bool is_class = false; // We never reserve large pages for the class space.
 928     if (MetaspaceGC::can_expand(words, is_class) &&
 929         MetaspaceGC::allowed_expansion() >= words) {
 930       return true;
 931     }
 932   }
 933 
 934   return false;
 935 }
 936 
 937   // byte_size is the size of the associated virtualspace.
 938 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 939   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 940   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 941   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 942   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);


1209 
1210   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1211 
1212   size_t reserved_words()  { return _reserved_words; }
1213   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1214   size_t committed_words() { return _committed_words; }
1215   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1216 
1217   void inc_reserved_words(size_t v);
1218   void dec_reserved_words(size_t v);
1219   void inc_committed_words(size_t v);
1220   void dec_committed_words(size_t v);
1221   void inc_virtual_space_count();
1222   void dec_virtual_space_count();
1223 
1224   bool contains(const void* ptr);
1225 
1226   // Unlink empty VirtualSpaceNodes and free it.
1227   void purge(ChunkManager* chunk_manager);
1228 
1229   void print_on(outputStream* st) const                 { print_on(st, K); }
1230   void print_on(outputStream* st, size_t scale) const;
1231   void print_map(outputStream* st) const;
1232 
1233   class VirtualSpaceListIterator : public StackObj {
1234     VirtualSpaceNode* _virtual_spaces;
1235    public:
1236     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1237       _virtual_spaces(virtual_spaces) {}
1238 
1239     bool repeat() {
1240       return _virtual_spaces != NULL;
1241     }
1242 
1243     VirtualSpaceNode* get_next() {
1244       VirtualSpaceNode* result = _virtual_spaces;
1245       if (_virtual_spaces != NULL) {
1246         _virtual_spaces = _virtual_spaces->next();
1247       }
1248       return result;
1249     }
1250   };
1251 };
1252 
1253 class Metadebug : AllStatic {
1254   // Debugging support for Metaspaces
1255   static int _allocation_fail_alot_count;
1256 
1257  public:
1258 
1259   static void init_allocation_fail_alot_count();
1260 #ifdef ASSERT
1261   static bool test_metadata_failure();
1262 #endif
1263 };
1264 
1265 int Metadebug::_allocation_fail_alot_count = 0;
1266 
1267 
1268 //  SpaceManager - used by Metaspace to handle allocations
1269 class SpaceManager : public CHeapObj<mtClass> {
1270   friend class ClassLoaderMetaspace;
1271   friend class Metadebug;
1272 
1273  private:
1274 
1275   // protects allocations
1276   Mutex* const _lock;
1277 
1278   // Type of metadata allocated.
1279   const Metaspace::MetadataType   _mdtype;
1280 
1281   // Type of metaspace
1282   const Metaspace::MetaspaceType  _space_type;
1283 
1284   // List of chunks in use by this SpaceManager.  Allocations
1285   // are done from the current chunk.  The list is used for deallocating
1286   // chunks when the SpaceManager is freed.
1287   Metachunk* _chunks_in_use[NumberOfInUseLists];
1288   Metachunk* _current_chunk;
1289 
1290   // Maximum number of small chunks to allocate to a SpaceManager
1291   static uint const _small_chunk_limit;
1292 
1293   // Maximum number of specialize chunks to allocate for anonymous and delegating
1294   // metadata space to a SpaceManager
1295   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1296 
1297   // Some running counters, but lets keep their number small to not add to much to
1298   // the per-classloader footprint.
1299   // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1300   // free and waste. Their sum can be deduced from the three other values.
1301   size_t _overhead_words;
1302   size_t _capacity_words;
1303   size_t _used_words;
1304 
1305   // Free lists of blocks are per SpaceManager since they
1306   // are assumed to be in chunks in use by the SpaceManager
1307   // and all chunks in use by a SpaceManager are freed when
1308   // the class loader using the SpaceManager is collected.
1309   BlockFreelist* _block_freelists;
1310 
1311  private:
1312   // Accessors
1313   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1314   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1315     _chunks_in_use[index] = v;
1316   }
1317 
1318   BlockFreelist* block_freelists() const { return _block_freelists; }
1319 
1320   Metaspace::MetadataType mdtype() { return _mdtype; }
1321 
1322   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1323   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1324 
1325   Metachunk* current_chunk() const { return _current_chunk; }
1326   void set_current_chunk(Metachunk* v) {
1327     _current_chunk = v;
1328   }
1329 
1330   Metachunk* find_current_chunk(size_t word_size);
1331 
1332   // Add chunk to the list of chunks in use
1333   void add_chunk(Metachunk* v, bool make_current);
1334   void retire_current_chunk();
1335 
1336   Mutex* lock() const { return _lock; }
1337 
1338   // Adds to the given statistic object. Expects to be locked with lock().
1339   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1340 
1341   // Verify internal counters against the current state. Expects to be locked with lock().
1342   DEBUG_ONLY(void verify_metrics_locked() const;)
1343 
1344  protected:
1345   void initialize();
1346 
1347  public:
1348   SpaceManager(Metaspace::MetadataType mdtype,
1349                Metaspace::MetaspaceType space_type,
1350                Mutex* lock);
1351   ~SpaceManager();
1352 
1353   enum ChunkMultiples {
1354     MediumChunkMultiple = 4
1355   };
1356 
1357   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1358   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1359   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1360 
1361   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1362 
1363   // Accessors
1364   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1365 
1366   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1367   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1368   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1369 
1370   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1371 
1372   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1373 






1374   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1375 
1376   size_t capacity_words() const     { return _capacity_words; }
1377   size_t used_words() const         { return _used_words; }
1378   size_t overhead_words() const     { return _overhead_words; }
1379 
1380   // Adjust local, global counters after a new chunk has been added.
1381   void account_for_new_chunk(const Metachunk* new_chunk);
1382 
1383   // Adjust local, global counters after space has been allocated from the current chunk.
1384   void account_for_allocation(size_t words);
1385 
1386   // Adjust global counters just before the SpaceManager dies, after all its chunks
1387   // have been returned to the freelist.
1388   void account_for_spacemanager_death();
1389 
1390   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1391   // or return the unadjusted size if the requested size is humongous.
1392   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1393   size_t adjust_initial_chunk_size(size_t requested) const;
1394 
1395   // Get the initial chunks size for this metaspace type.
1396   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1397 
1398   // Todo: remove this once we have counters by chunk type.






1399   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1400 
1401   Metachunk* get_new_chunk(size_t chunk_word_size);
1402 
1403   // Block allocation and deallocation.
1404   // Allocates a block from the current chunk
1405   MetaWord* allocate(size_t word_size);
1406 
1407   // Helper for allocations
1408   MetaWord* allocate_work(size_t word_size);
1409 
1410   // Returns a block to the per manager freelist
1411   void deallocate(MetaWord* p, size_t word_size);
1412 
1413   // Based on the allocation size and a minimum chunk size,
1414   // returned chunk size (for expanding space for chunk allocation).
1415   size_t calc_chunk_size(size_t allocation_word_size);
1416 
1417   // Called when an allocation from the current chunk fails.
1418   // Gets a new chunk (may require getting a new virtual space),
1419   // and allocates from that chunk.
1420   MetaWord* grow_and_allocate(size_t word_size);
1421 
1422   // Notify memory usage to MemoryService.
1423   void track_metaspace_memory_usage();
1424 
1425   // debugging support.
1426 

1427   void print_on(outputStream* st) const;
1428   void locked_print_chunks_in_use_on(outputStream* st) const;
1429 
1430   void verify();
1431   void verify_chunk_size(Metachunk* chunk);



1432 
1433   // This adjusts the size given to be greater than the minimum allocation size in
1434   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1435   size_t get_allocation_word_size(size_t word_size) {
1436     size_t byte_size = word_size * BytesPerWord;
1437 
1438     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1439     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1440 
1441     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1442     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1443 
1444     return raw_word_size;
1445   }
1446 
1447   // Adds to the given statistic object.
1448   void add_to_statistics(SpaceManagerStatistics* out) const;
1449 
1450   // Verify internal counters against the current state.
1451   DEBUG_ONLY(void verify_metrics() const;)
1452 
1453 };
1454 
1455 uint const SpaceManager::_small_chunk_limit = 4;
1456 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1457 
1458 void VirtualSpaceNode::inc_container_count() {
1459   assert_lock_strong(MetaspaceExpand_lock);
1460   _container_count++;
1461 }
1462 
1463 void VirtualSpaceNode::dec_container_count() {
1464   assert_lock_strong(MetaspaceExpand_lock);
1465   _container_count--;
1466 }
1467 
1468 #ifdef ASSERT
1469 void VirtualSpaceNode::verify_container_count() {
1470   assert(_container_count == container_count_slow(),
1471          "Inconsistency in container_count _container_count " UINTX_FORMAT
1472          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());


1687   // Now, top should be aligned correctly.
1688   assert_is_aligned(top(), required_chunk_alignment);
1689 
1690   // Bottom of the new chunk
1691   MetaWord* chunk_limit = top();
1692   assert(chunk_limit != NULL, "Not safe to call this method");
1693 
1694   // The virtual spaces are always expanded by the
1695   // commit granularity to enforce the following condition.
1696   // Without this the is_available check will not work correctly.
1697   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1698       "The committed memory doesn't match the expanded memory.");
1699 
1700   if (!is_available(chunk_word_size)) {
1701     LogTarget(Debug, gc, metaspace, freelist) lt;
1702     if (lt.is_enabled()) {
1703       LogStream ls(lt);
1704       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1705       // Dump some information about the virtual space that is nearly full
1706       print_on(&ls);
1707       ls.cr(); // ~LogStream does not autoflush.
1708     }
1709     return NULL;
1710   }
1711 
1712   // Take the space  (bump top on the current virtual space).
1713   inc_top(chunk_word_size);
1714 
1715   // Initialize the chunk
1716   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1717   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1718   assert(result == (Metachunk*)chunk_limit, "Sanity");
1719   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1720   do_update_in_use_info_for_chunk(result, true);
1721 
1722   inc_container_count();
1723 
1724   if (VerifyMetaspace) {
1725     DEBUG_ONLY(chunk_manager->locked_verify());
1726     DEBUG_ONLY(this->verify());
1727   }


1734 }
1735 
1736 
1737 // Expand the virtual space (commit more of the reserved space)
1738 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1739   size_t min_bytes = min_words * BytesPerWord;
1740   size_t preferred_bytes = preferred_words * BytesPerWord;
1741 
1742   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1743 
1744   if (uncommitted < min_bytes) {
1745     return false;
1746   }
1747 
1748   size_t commit = MIN2(preferred_bytes, uncommitted);
1749   bool result = virtual_space()->expand_by(commit, false);
1750 
1751   if (result) {
1752     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1753               (is_class() ? "class" : "non-class"), commit);
1754     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1755   } else {
1756     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1757               (is_class() ? "class" : "non-class"), commit);
1758   }
1759 
1760   assert(result, "Failed to commit memory");
1761 
1762   return result;
1763 }
1764 
1765 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1766   assert_lock_strong(MetaspaceExpand_lock);
1767   Metachunk* result = take_from_committed(chunk_word_size);
1768   return result;
1769 }
1770 
1771 bool VirtualSpaceNode::initialize() {
1772 
1773   if (!_rs.is_reserved()) {
1774     return false;


1794     set_top((MetaWord*)virtual_space()->low());
1795     set_reserved(MemRegion((HeapWord*)_rs.base(),
1796                  (HeapWord*)(_rs.base() + _rs.size())));
1797 
1798     assert(reserved()->start() == (HeapWord*) _rs.base(),
1799            "Reserved start was not set properly " PTR_FORMAT
1800            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1801     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1802            "Reserved size was not set properly " SIZE_FORMAT
1803            " != " SIZE_FORMAT, reserved()->word_size(),
1804            _rs.size() / BytesPerWord);
1805   }
1806 
1807   // Initialize Occupancy Map.
1808   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1809   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1810 
1811   return result;
1812 }
1813 
1814 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1815   size_t used_words = used_words_in_vs();
1816   size_t commit_words = committed_words();
1817   size_t res_words = reserved_words();
1818   VirtualSpace* vs = virtual_space();
1819 
1820   st->print("node @" PTR_FORMAT ": ", p2i(this));
1821   st->print("reserved=");
1822   print_scaled_words(st, res_words, scale);
1823   st->print(", committed=");
1824   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1825   st->print(", used=");
1826   print_scaled_words_and_percentage(st, used_words, res_words, scale);
1827   st->cr();
1828   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
1829            PTR_FORMAT ", " PTR_FORMAT ")",


1830            p2i(bottom()), p2i(top()), p2i(end()),
1831            p2i(vs->high_boundary()));
1832 }
1833 
1834 #ifdef ASSERT
1835 void VirtualSpaceNode::mangle() {
1836   size_t word_size = capacity_words_in_vs();
1837   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1838 }
1839 #endif // ASSERT
1840 
1841 // VirtualSpaceList methods
1842 // Space allocated from the VirtualSpace
1843 
1844 VirtualSpaceList::~VirtualSpaceList() {
1845   VirtualSpaceListIterator iter(virtual_space_list());
1846   while (iter.repeat()) {
1847     VirtualSpaceNode* vsl = iter.get_next();
1848     delete vsl;
1849   }


2031 // Walk the list of VirtualSpaceNodes and delete
2032 // nodes with a 0 container_count.  Remove Metachunks in
2033 // the node from their respective freelists.
2034 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2035   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2036   assert_lock_strong(MetaspaceExpand_lock);
2037   // Don't use a VirtualSpaceListIterator because this
2038   // list is being changed and a straightforward use of an iterator is not safe.
2039   VirtualSpaceNode* purged_vsl = NULL;
2040   VirtualSpaceNode* prev_vsl = virtual_space_list();
2041   VirtualSpaceNode* next_vsl = prev_vsl;
2042   while (next_vsl != NULL) {
2043     VirtualSpaceNode* vsl = next_vsl;
2044     DEBUG_ONLY(vsl->verify_container_count();)
2045     next_vsl = vsl->next();
2046     // Don't free the current virtual space since it will likely
2047     // be needed soon.
2048     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2049       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2050                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2051       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2052       // Unlink it from the list
2053       if (prev_vsl == vsl) {
2054         // This is the case of the current node being the first node.
2055         assert(vsl == virtual_space_list(), "Expected to be the first node");
2056         set_virtual_space_list(vsl->next());
2057       } else {
2058         prev_vsl->set_next(vsl->next());
2059       }
2060 
2061       vsl->purge(chunk_manager);
2062       dec_reserved_words(vsl->reserved_words());
2063       dec_committed_words(vsl->committed_words());
2064       dec_virtual_space_count();
2065       purged_vsl = vsl;
2066       delete vsl;
2067     } else {
2068       prev_vsl = vsl;
2069     }
2070   }
2071 #ifdef ASSERT


2179   if (vs_word_size == 0) {
2180     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2181     return false;
2182   }
2183 
2184   // Reserve the space
2185   size_t vs_byte_size = vs_word_size * BytesPerWord;
2186   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2187 
2188   // Allocate the meta virtual space and initialize it.
2189   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2190   if (!new_entry->initialize()) {
2191     delete new_entry;
2192     return false;
2193   } else {
2194     assert(new_entry->reserved_words() == vs_word_size,
2195         "Reserved memory size differs from requested memory size");
2196     // ensure lock-free iteration sees fully initialized node
2197     OrderAccess::storestore();
2198     link_vs(new_entry);
2199     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2200     return true;
2201   }
2202 }
2203 
2204 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2205   if (virtual_space_list() == NULL) {
2206       set_virtual_space_list(new_entry);
2207   } else {
2208     current_virtual_space()->set_next(new_entry);
2209   }
2210   set_current_virtual_space(new_entry);
2211   inc_reserved_words(new_entry->reserved_words());
2212   inc_committed_words(new_entry->committed_words());
2213   inc_virtual_space_count();
2214 #ifdef ASSERT
2215   new_entry->mangle();
2216 #endif
2217   LogTarget(Trace, gc, metaspace) lt;
2218   if (lt.is_enabled()) {
2219     LogStream ls(lt);
2220     VirtualSpaceNode* vsl = current_virtual_space();
2221     ResourceMark rm;
2222     vsl->print_on(&ls);
2223     ls.cr(); // ~LogStream does not autoflush.
2224   }
2225 }
2226 
2227 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2228                                       size_t min_words,
2229                                       size_t preferred_words) {
2230   size_t before = node->committed_words();
2231 
2232   bool result = node->expand_by(min_words, preferred_words);
2233 
2234   size_t after = node->committed_words();
2235 
2236   // after and before can be the same if the memory was pre-committed.
2237   assert(after >= before, "Inconsistency");
2238   inc_committed_words(after - before);
2239 
2240   return result;
2241 }
2242 
2243 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


2329   // We must have enough space for the requested size and any
2330   // additional reqired padding chunks.
2331   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2332 
2333   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2334   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2335   if (min_word_size >= preferred_word_size) {
2336     // Can happen when humongous chunks are allocated.
2337     preferred_word_size = min_word_size;
2338   }
2339 
2340   bool expanded = expand_by(min_word_size, preferred_word_size);
2341   if (expanded) {
2342     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2343     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2344   }
2345 
2346    return next;
2347 }
2348 
2349 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2350   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2351       _virtual_space_count, p2i(_current_virtual_space));
2352   VirtualSpaceListIterator iter(virtual_space_list());
2353   while (iter.repeat()) {
2354     st->cr();
2355     VirtualSpaceNode* node = iter.get_next();
2356     node->print_on(st, scale);
2357   }
2358 }
2359 
2360 void VirtualSpaceList::print_map(outputStream* st) const {
2361   VirtualSpaceNode* list = virtual_space_list();
2362   VirtualSpaceListIterator iter(list);
2363   unsigned i = 0;
2364   while (iter.repeat()) {
2365     st->print_cr("Node %u:", i);
2366     VirtualSpaceNode* node = iter.get_next();
2367     node->print_map(st, this->is_class());
2368     i ++;
2369   }
2370 }
2371 
2372 // MetaspaceGC methods
2373 
2374 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2375 // Within the VM operation after the GC the attempt to allocate the metadata
2376 // should succeed.  If the GC did not free enough space for the metaspace


3023     return NULL;
3024   }
3025 
3026   assert((word_size <= chunk->word_size()) ||
3027          (list_index(chunk->word_size()) == HumongousIndex),
3028          "Non-humongous variable sized chunk");
3029   LogTarget(Debug, gc, metaspace, freelist) lt;
3030   if (lt.is_enabled()) {
3031     size_t list_count;
3032     if (list_index(word_size) < HumongousIndex) {
3033       ChunkList* list = find_free_chunks_list(word_size);
3034       list_count = list->count();
3035     } else {
3036       list_count = humongous_dictionary()->total_count();
3037     }
3038     LogStream ls(lt);
3039     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3040              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3041     ResourceMark rm;
3042     locked_print_free_chunks(&ls);
3043     ls.cr(); // ~LogStream does not autoflush.
3044   }
3045 
3046   return chunk;
3047 }
3048 
3049 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3050   assert_lock_strong(MetaspaceExpand_lock);
3051   DEBUG_ONLY(do_verify_chunk(chunk);)
3052   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3053   assert(chunk != NULL, "Expected chunk.");
3054   assert(chunk->container() != NULL, "Container should have been set.");
3055   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3056   index_bounds_check(index);
3057 
3058   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3059   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3060   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3061   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3062 
3063   if (index != HumongousIndex) {


3118       size_chunks_returned += cur->word_size();
3119     }
3120     return_single_chunk(index, cur);
3121     cur = next;
3122   }
3123   if (log.is_enabled()) { // tracing
3124     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3125         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3126     if (index != HumongousIndex) {
3127       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3128     } else {
3129       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3130     }
3131   }
3132 }
3133 
3134 void ChunkManager::print_on(outputStream* out) const {
3135   _humongous_dictionary.report_statistics(out);
3136 }
3137 
3138 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3139   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3140   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3141     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));






































































3142   }
3143 }
3144 
3145 // SpaceManager methods
3146 
3147 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3148   size_t chunk_sizes[] = {
3149       specialized_chunk_size(is_class_space),
3150       small_chunk_size(is_class_space),
3151       medium_chunk_size(is_class_space)
3152   };
3153 
3154   // Adjust up to one of the fixed chunk sizes ...
3155   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3156     if (requested <= chunk_sizes[i]) {
3157       return chunk_sizes[i];
3158     }
3159   }
3160 
3161   // ... or return the size as a humongous chunk.


3177     default:                                 requested = ClassSmallChunk; break;
3178     }
3179   } else {
3180     switch (type) {
3181     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3182     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3183     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3184     default:                                 requested = SmallChunk; break;
3185     }
3186   }
3187 
3188   // Adjust to one of the fixed chunk sizes (unless humongous)
3189   const size_t adjusted = adjust_initial_chunk_size(requested);
3190 
3191   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3192          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3193 
3194   return adjusted;
3195 }
3196 







































































3197 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3198   size_t count = 0;
3199   Metachunk* chunk = chunks_in_use(i);
3200   while (chunk != NULL) {
3201     count++;
3202     chunk = chunk->next();
3203   }
3204   return count;
3205 }
3206 














3207 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3208 
3209   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3210     Metachunk* chunk = chunks_in_use(i);
3211     st->print("SpaceManager: %s " PTR_FORMAT,
3212                  chunk_size_name(i), p2i(chunk));
3213     if (chunk != NULL) {
3214       st->print_cr(" free " SIZE_FORMAT,
3215                    chunk->free_word_size());
3216     } else {
3217       st->cr();
3218     }
3219   }
3220 
3221   chunk_manager()->locked_print_free_chunks(st);
3222   chunk_manager()->locked_print_sum_free_chunks(st);
3223 }
3224 
3225 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3226 


3318     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3319     // case it usually makes no sense to make it the current chunk, since the next allocation would
3320     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3321     // good chunk which could be used for more normal allocations.
3322     bool make_current = true;
3323     if (next->get_chunk_type() == HumongousIndex &&
3324         current_chunk() != NULL) {
3325       make_current = false;
3326     }
3327     add_chunk(next, make_current);
3328     mem = next->allocate(word_size);
3329   }
3330 
3331   // Track metaspace memory usage statistic.
3332   track_metaspace_memory_usage();
3333 
3334   return mem;
3335 }
3336 
3337 void SpaceManager::print_on(outputStream* st) const {
3338   SpaceManagerStatistics stat;
3339   add_to_statistics(&stat); // will lock _lock.
3340   stat.print_on(st, 1*K, false);















3341 }
3342 
3343 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3344                            Metaspace::MetaspaceType space_type,
3345                            Mutex* lock) :
3346   _mdtype(mdtype),
3347   _space_type(space_type),
3348   _capacity_words(0),
3349   _used_words(0),
3350   _overhead_words(0),
3351   _block_freelists(NULL),
3352   _lock(lock)
3353 {
3354   initialize();
3355 }
3356 
3357 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3358 
3359   assert_lock_strong(MetaspaceExpand_lock);
3360 
3361   _capacity_words += new_chunk->word_size();
3362   _overhead_words += Metachunk::overhead();
3363 
3364   // Adjust global counters:
3365   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3366   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3367 }
3368 
3369 void SpaceManager::account_for_allocation(size_t words) {
3370   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3371   // We may or may not be locked with the global metaspace expansion lock.
3372   assert_lock_strong(lock());
3373 
3374   // Add to the per SpaceManager totals. This can be done non-atomically.
3375   _used_words += words;
3376 
3377   // Adjust global counters. This will be done atomically.
3378   MetaspaceUtils::inc_used(mdtype(), words);
3379 }
3380 
3381 void SpaceManager::account_for_spacemanager_death() {
3382 
3383   assert_lock_strong(MetaspaceExpand_lock);
3384 
3385   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3386   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3387   MetaspaceUtils::dec_used(mdtype(), _used_words);
3388 }
3389 
3390 void SpaceManager::initialize() {
3391   Metadebug::init_allocation_fail_alot_count();
3392   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3393     _chunks_in_use[i] = NULL;
3394   }
3395   _current_chunk = NULL;
3396   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3397 }
3398 
3399 SpaceManager::~SpaceManager() {
3400 
3401   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3402   DEBUG_ONLY(verify_metrics());



3403 
3404   MutexLockerEx fcl(MetaspaceExpand_lock,
3405                     Mutex::_no_safepoint_check_flag);
3406 





3407   chunk_manager()->slow_locked_verify();
3408 
3409   account_for_spacemanager_death();
3410 
3411   Log(gc, metaspace, freelist) log;
3412   if (log.is_trace()) {
3413     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3414     ResourceMark rm;
3415     LogStream ls(log.trace());
3416     locked_print_chunks_in_use_on(&ls);
3417     if (block_freelists() != NULL) {
3418       block_freelists()->print_on(&ls);
3419     }
3420     ls.cr(); // ~LogStream does not autoflush.
3421   }
3422 
3423   // Add all the chunks in use by this space manager
3424   // to the global list of free chunks.
3425 
3426   // Follow each list of chunks-in-use and add them to the
3427   // free lists.  Each list is NULL terminated.
3428 
3429   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3430     Metachunk* chunks = chunks_in_use(i);
3431     chunk_manager()->return_chunk_list(i, chunks);
3432     set_chunks_in_use(i, NULL);
3433   }
3434 
3435   chunk_manager()->slow_locked_verify();
3436 
3437   if (_block_freelists != NULL) {
3438     delete _block_freelists;
3439   }
3440 }
3441 
3442 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3443   assert_lock_strong(lock());
3444   // Allocations and deallocations are in raw_word_size
3445   size_t raw_word_size = get_allocation_word_size(word_size);
3446   // Lazily create a block_freelist
3447   if (block_freelists() == NULL) {
3448     _block_freelists = new BlockFreelist();
3449   }
3450   block_freelists()->return_block(p, raw_word_size);
3451   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3452 }
3453 
3454 // Adds a chunk to the list of chunks in use.
3455 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3456 
3457   assert_lock_strong(_lock);
3458   assert(new_chunk != NULL, "Should not be NULL");
3459   assert(new_chunk->next() == NULL, "Should not be on a list");
3460 
3461   new_chunk->reset_empty();
3462 
3463   // Find the correct list and and set the current
3464   // chunk for that list.
3465   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3466 
3467   if (make_current) {
3468     // If we are to make the chunk current, retire the old current chunk and replace
3469     // it with the new chunk.
3470     retire_current_chunk();
3471     set_current_chunk(new_chunk);
3472   }
3473 
3474   // Add the new chunk at the head of its respective chunk list.
3475   new_chunk->set_next(chunks_in_use(index));
3476   set_chunks_in_use(index, new_chunk);
3477 
3478   // Adjust counters.
3479   account_for_new_chunk(new_chunk);
3480 
3481   assert(new_chunk->is_empty(), "Not ready for reuse");
3482   Log(gc, metaspace, freelist) log;
3483   if (log.is_trace()) {
3484     log.trace("SpaceManager::added chunk: ");
3485     ResourceMark rm;
3486     LogStream ls(log.trace());
3487     new_chunk->print_on(&ls);
3488     chunk_manager()->locked_print_free_chunks(&ls);
3489     ls.cr(); // ~LogStream does not autoflush.
3490   }
3491 }
3492 
3493 void SpaceManager::retire_current_chunk() {
3494   if (current_chunk() != NULL) {
3495     size_t remaining_words = current_chunk()->free_word_size();
3496     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3497       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3498       deallocate(ptr, remaining_words);
3499       account_for_allocation(remaining_words);
3500     }
3501   }
3502 }
3503 
3504 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3505   // Get a chunk from the chunk freelist
3506   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3507 
3508   if (next == NULL) {
3509     next = vs_list()->get_new_chunk(chunk_word_size,
3510                                     medium_chunk_bunch());
3511   }
3512 
3513   Log(gc, metaspace, alloc) log;
3514   if (log.is_debug() && next != NULL &&
3515       SpaceManager::is_humongous(next->word_size())) {
3516     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3517   }
3518 
3519   return next;
3520 }
3521 
3522 MetaWord* SpaceManager::allocate(size_t word_size) {
3523   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3524   size_t raw_word_size = get_allocation_word_size(word_size);
3525   BlockFreelist* fl =  block_freelists();
3526   MetaWord* p = NULL;
3527 
3528   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
3529 
3530   // Allocation from the dictionary is expensive in the sense that
3531   // the dictionary has to be searched for a size.  Don't allocate
3532   // from the dictionary until it starts to get fat.  Is this
3533   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3534   // for allocations.  Do some profiling.  JJJ
3535   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3536     p = fl->get_block(raw_word_size);
3537     if (p != NULL) {
3538       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3539     }
3540   }
3541   if (p == NULL) {
3542     p = allocate_work(raw_word_size);
3543   }
3544 
3545   return p;
3546 }
3547 
3548 // Returns the address of spaced allocated for "word_size".
3549 // This methods does not know about blocks (Metablocks)
3550 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3551   assert_lock_strong(lock());
3552 #ifdef ASSERT
3553   if (Metadebug::test_metadata_failure()) {
3554     return NULL;
3555   }
3556 #endif
3557   // Is there space in the current chunk?
3558   MetaWord* result = NULL;
3559 
3560   if (current_chunk() != NULL) {
3561     result = current_chunk()->allocate(word_size);
3562   }
3563 
3564   if (result == NULL) {
3565     result = grow_and_allocate(word_size);
3566   }
3567 
3568   if (result != NULL) {
3569     account_for_allocation(word_size);
3570     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3571            "Head of the list is being allocated");
3572   }
3573 
3574   return result;
3575 }
3576 
3577 void SpaceManager::verify() {
3578   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3579     Metachunk* curr = chunks_in_use(i);
3580     while (curr != NULL) {
3581       DEBUG_ONLY(do_verify_chunk(curr);)
3582       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3583       curr = curr->next();
3584     }
3585   }
3586 }
3587 
3588 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3589   assert(is_humongous(chunk->word_size()) ||
3590          chunk->word_size() == medium_chunk_size() ||
3591          chunk->word_size() == small_chunk_size() ||
3592          chunk->word_size() == specialized_chunk_size(),
3593          "Chunk size is wrong");
3594   return;
3595 }
3596 
3597 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3598   assert_lock_strong(lock());
3599   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3600     UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
3601     Metachunk* chunk = chunks_in_use(i);
3602     while (chunk != NULL) {
3603       chunk_stat.add_num(1);
3604       chunk_stat.add_cap(chunk->word_size());
3605       chunk_stat.add_overhead(Metachunk::overhead());
3606       chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3607       if (chunk != current_chunk()) {
3608         chunk_stat.add_waste(chunk->free_word_size());
3609       } else {
3610         chunk_stat.add_free(chunk->free_word_size());



















3611       }
3612       chunk = chunk->next();
3613     }



3614   }
3615   if (block_freelists() != NULL) {
3616     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3617   }





3618 }
3619 
3620 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3621   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3622   add_to_statistics_locked(out);
3623 }
3624 
3625 #ifdef ASSERT
3626 void SpaceManager::verify_metrics_locked() const {
3627   assert_lock_strong(lock());
3628 
3629   SpaceManagerStatistics stat;
3630   add_to_statistics_locked(&stat);
3631 
3632   UsedChunksStatistics chunk_stats = stat.totals();



3633 
3634   DEBUG_ONLY(chunk_stats.check_sanity());


3635 
3636   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3637   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3638   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");




3639 }
3640 
3641 void SpaceManager::verify_metrics() const {
3642   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3643   verify_metrics_locked();

3644 }
3645 #endif // ASSERT
3646 











3647 







3648 
3649 // MetaspaceUtils
3650 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3651 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3652 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3653 
3654 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3655 // output will be the accumulated values for all live metaspaces.
3656 // Note: method does not do any locking.
3657 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3658   out->reset();
3659   ClassLoaderDataGraphMetaspaceIterator iter;
3660    while (iter.repeat()) {
3661      ClassLoaderMetaspace* msp = iter.get_next();

3662      if (msp != NULL) {
3663        msp->add_to_statistics(out);
3664      }
3665    }

3666 }
3667 
3668 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
3669   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3670   return list == NULL ? 0 : list->free_bytes();







3671 }
3672 
3673 size_t MetaspaceUtils::free_in_vs_bytes() {
3674   return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);













3675 }
3676 
3677 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
3678   assert_lock_strong(MetaspaceExpand_lock);
3679   (*pstat) += words;
3680 }









3681 
3682 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
3683   assert_lock_strong(MetaspaceExpand_lock);
3684   const size_t size_now = *pstat;
3685   assert(size_now >= words, "About to decrement counter below zero "
3686          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3687          size_now, words);
3688   *pstat = size_now - words;
3689 }
3690 
3691 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
3692   Atomic::add(words, pstat);
3693 }
3694 
3695 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
3696   const size_t size_now = *pstat;
3697   assert(size_now >= words, "About to decrement counter below zero "
3698          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3699          size_now, words);
3700   Atomic::sub(words, pstat);
3701 }
3702 
3703 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3704   dec_stat_nonatomically(&_capacity_words[mdtype], words);
3705 }
3706 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3707   inc_stat_nonatomically(&_capacity_words[mdtype], words);
3708 }
3709 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3710   dec_stat_atomically(&_used_words[mdtype], words);
3711 }
3712 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3713   inc_stat_atomically(&_used_words[mdtype], words);
3714 }
3715 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
3716   dec_stat_nonatomically(&_overhead_words[mdtype], words);
3717 }
3718 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
3719   inc_stat_nonatomically(&_overhead_words[mdtype], words);
3720 }
3721 
3722 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3723   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3724   return list == NULL ? 0 : list->reserved_bytes();
3725 }
3726 
3727 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3728   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3729   return list == NULL ? 0 : list->committed_bytes();
3730 }
3731 
3732 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3733 
3734 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3735   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3736   if (chunk_manager == NULL) {
3737     return 0;
3738   }
3739   chunk_manager->slow_verify();


3781                 "reserved "  SIZE_FORMAT "K",
3782                 used_bytes()/K,
3783                 capacity_bytes()/K,
3784                 committed_bytes()/K,
3785                 reserved_bytes()/K);
3786 
3787   if (Metaspace::using_class_space()) {
3788     Metaspace::MetadataType ct = Metaspace::ClassType;
3789     out->print_cr("  class space    "
3790                   "used "      SIZE_FORMAT "K, "
3791                   "capacity "  SIZE_FORMAT "K, "
3792                   "committed " SIZE_FORMAT "K, "
3793                   "reserved "  SIZE_FORMAT "K",
3794                   used_bytes(ct)/K,
3795                   capacity_bytes(ct)/K,
3796                   committed_bytes(ct)/K,
3797                   reserved_bytes(ct)/K);
3798   }
3799 }
3800 
3801 class PrintCLDMetaspaceInfoClosure : public CLDClosure {















































































3802 private:
3803   outputStream* const _out;
3804   const size_t        _scale;
3805   const bool          _do_print;
3806   const bool          _break_down_by_chunktype;
3807 
3808 public:















3809 
3810   uintx                           _num_loaders;
3811   ClassLoaderMetaspaceStatistics  _stats_total;
3812 
3813   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3814   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];




















3815 
3816 public:
3817   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3818     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3819     , _num_loaders(0)
3820   {
3821     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3822   }
3823 
3824   void do_cld(ClassLoaderData* cld) {
3825 
3826     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3827 

3828     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3829     if (msp == NULL) {
3830       return;
3831     }
3832 
3833     // Collect statistics for this class loader metaspace
3834     ClassLoaderMetaspaceStatistics this_cld_stat;
3835     msp->add_to_statistics(&this_cld_stat);
3836 
3837     // And add it to the running totals
3838     _stats_total.add(this_cld_stat);
3839     _num_loaders ++;
3840     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3841     _num_loaders_by_spacetype[msp->space_type()] ++;
3842 
3843     // Optionally, print.
3844     if (_do_print) {
3845 
3846       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3847 
3848       if (cld->is_anonymous()) {
3849         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));

3850       } else {
3851         ResourceMark rm;
3852         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3853       }
3854 
3855       if (cld->is_unloading()) {
3856         _out->print(" (unloading)");
3857       }
3858 
3859       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3860       _out->cr();
3861 
3862     }
3863 
3864   } // do_cld
3865 

3866 };
3867 
3868 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3869   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3870   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3871   {
3872     if (Metaspace::using_class_space()) {
3873       out->print("  Non-class space:  ");











3874     }
3875     print_scaled_words(out, reserved_nonclass_words, scale, 7);
3876     out->print(" reserved, ");
3877     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3878     out->print_cr(" committed ");
3879 
3880     if (Metaspace::using_class_space()) {
3881       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3882       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3883       out->print("      Class space:  ");
3884       print_scaled_words(out, reserved_class_words, scale, 7);
3885       out->print(" reserved, ");
3886       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
3887       out->print_cr(" committed ");
3888 
3889       const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
3890       const size_t committed_words = committed_nonclass_words + committed_class_words;
3891       out->print("             Both:  ");
3892       print_scaled_words(out, reserved_words, scale, 7);
3893       out->print(" reserved, ");
3894       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
3895       out->print_cr(" committed ");
3896     }
3897   }
3898 }
3899 
3900 // This will print out a basic metaspace usage report but
3901 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
3902 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
3903 
3904   out->cr();
3905   out->print_cr("Usage:");
3906 
3907   if (Metaspace::using_class_space()) {
3908     out->print("  Non-class:  ");
3909   }
3910 
3911   // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
3912   // MetaspaceUtils.
3913   const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
3914   const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
3915   const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
3916   const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
3917 
3918   print_scaled_words(out, cap_nc, scale, 5);
3919   out->print(" capacity, ");
3920   print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
3921   out->print(" used, ");
3922   print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
3923   out->print(" free+waste, ");
3924   print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
3925   out->print(" overhead. ");
3926   out->cr();
3927 
3928   if (Metaspace::using_class_space()) {
3929     const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
3930     const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
3931     const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
3932     const size_t free_and_waste_c = cap_c - overhead_c - used_c;
3933     out->print("      Class:  ");
3934     print_scaled_words(out, cap_c, scale, 5);
3935     out->print(" capacity, ");
3936     print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
3937     out->print(" used, ");
3938     print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
3939     out->print(" free+waste, ");
3940     print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
3941     out->print(" overhead. ");
3942     out->cr();
3943 
3944     out->print("       Both:  ");
3945     const size_t cap = cap_nc + cap_c;
3946 
3947     print_scaled_words(out, cap, scale, 5);
3948     out->print(" capacity, ");
3949     print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
3950     out->print(" used, ");
3951     print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
3952     out->print(" free+waste, ");
3953     print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
3954     out->print(" overhead. ");
3955     out->cr();
3956   }
3957 
3958   out->cr();
3959   out->print_cr("Virtual space:");
3960 
3961   print_vs(out, scale);
3962 
3963   out->cr();
3964   out->print_cr("Chunk freelists:");
3965 
3966   if (Metaspace::using_class_space()) {
3967     out->print("   Non-Class:  ");
3968   }
3969   out->print_human_readable_size(Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3970   out->cr();
3971   if (Metaspace::using_class_space()) {
3972     out->print("       Class:  ");
3973     out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
3974     out->cr();
3975     out->print("        Both:  ");
3976     out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words() +
3977                        Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3978     out->cr();
3979   }
3980   out->cr();
3981 
3982 }
3983 
3984 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
3985 
3986   const bool print_loaders = (flags & rf_show_loaders) > 0;
3987   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
3988   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
3989 
3990   // Some report options require walking the class loader data graph.
3991   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
3992   if (print_loaders) {
3993     out->cr();
3994     out->print_cr("Usage per loader:");
3995     out->cr();
3996   }
3997 
3998   ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
3999 
4000   // Print totals, broken up by space type.
4001   if (print_by_spacetype) {
4002     out->cr();
4003     out->print_cr("Usage per space type:");
4004     out->cr();
4005     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
4006          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
4007     {
4008       uintx num = cl._num_loaders_by_spacetype[space_type];
4009       out->print("%s (" UINTX_FORMAT " loader%s)%c",
4010         space_type_name((Metaspace::MetaspaceType)space_type),
4011         num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
4012       if (num > 0) {
4013         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
4014       }
4015       out->cr();
4016     }
4017   }
4018 
4019   // Print totals for in-use data:
4020   out->cr();
4021   out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
4022       cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
4023 
4024   cl._stats_total.print_on(out, scale, print_by_chunktype);


4025 
4026   // -- Print Virtual space.
4027   out->cr();
4028   out->print_cr("Virtual space:");
4029 
4030   print_vs(out, scale);
4031 
4032   // -- Print VirtualSpaceList details.
4033   if ((flags & rf_show_vslist) > 0) {
4034     out->cr();
4035     out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
4036 
4037     if (Metaspace::using_class_space()) {
4038       out->print_cr("   Non-Class:");

4039     }
4040     Metaspace::space_list()->print_on(out, scale);
4041     if (Metaspace::using_class_space()) {
4042       out->print_cr("       Class:");
4043       Metaspace::class_space_list()->print_on(out, scale);
4044     }
4045   }
4046   out->cr();
4047 
4048   // -- Print VirtualSpaceList map.
4049   if ((flags & rf_show_vsmap) > 0) {
4050     out->cr();
4051     out->print_cr("Virtual space map:");
4052 
4053     if (Metaspace::using_class_space()) {
4054       out->print_cr("   Non-Class:");
4055     }
4056     Metaspace::space_list()->print_map(out);
4057     if (Metaspace::using_class_space()) {
4058       out->print_cr("       Class:");
4059       Metaspace::class_space_list()->print_map(out);
4060     }
4061   }
4062   out->cr();
4063 
4064   // -- Print Freelists (ChunkManager) details
4065   out->cr();
4066   out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
4067 
4068   ChunkManagerStatistics non_class_cm_stat;
4069   Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
4070 
4071   if (Metaspace::using_class_space()) {
4072     out->print_cr("   Non-Class:");

4073   }
4074   non_class_cm_stat.print_on(out, scale);
4075 






4076   if (Metaspace::using_class_space()) {
4077     ChunkManagerStatistics class_cm_stat;
4078     Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
4079     out->print_cr("       Class:");
4080     class_cm_stat.print_on(out, scale);
4081   }
4082 
4083   // As a convenience, print a summary of common waste.
4084   out->cr();
4085   out->print("Waste: ");
4086   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4087   const size_t committed_words = committed_bytes() / BytesPerWord;
4088 
4089   out->print("Percentage values refer to total committed size ");
4090   print_scaled_words(out, committed_words, scale);
4091   out->print_cr(").");
4092 
4093   // Print space committed but not yet used by any class loader
4094   const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
4095   out->print("              Committed unused: ");
4096   print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
4097   out->cr();
4098 
4099   // Print waste for in-use chunks.
4100   UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4101   UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4102   UsedChunksStatistics ucs_all;
4103   ucs_all.add(ucs_nonclass);
4104   ucs_all.add(ucs_class);
4105 
4106   out->print("        Waste in chunks in use: ");
4107   print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4108   out->cr();
4109   out->print("         Free in chunks in use: ");
4110   print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4111   out->cr();
4112   out->print("     Overhead in chunks in use: ");
4113   print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
4114   out->cr();
4115 
4116   // Print waste in free chunks.
4117   const size_t total_capacity_in_free_chunks =
4118       Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4119      (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
4120   out->print("                In free chunks: ");
4121   print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4122   out->cr();
4123 
4124   // Print waste in deallocated blocks.
4125   const uintx free_blocks_num =
4126       cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4127       cl._stats_total.class_sm_stats().free_blocks_num();
4128   const size_t free_blocks_cap_words =
4129       cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4130       cl._stats_total.class_sm_stats().free_blocks_cap_words();
4131   out->print("Deallocated from chunks in use: ");
4132   print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4133   out->print(" ("UINTX_FORMAT " blocks)", free_blocks_num);
4134   out->cr();
4135 
4136   // Print total waste.
4137   const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
4138       + free_blocks_cap_words + unused_words_in_vs;
4139   out->print("                       -total-: ");
4140   print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
4141   out->cr();
4142 
4143   // Print internal statistics
4144 #ifdef ASSERT
4145   out->cr();
4146   out->cr();
4147   out->print_cr("Internal statistics:");
4148   out->cr();
4149   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4150   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4151   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4152   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4153   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4154   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4155   out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
4156   out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4157   out->cr();
4158 #endif
4159 
4160   // Print some interesting settings
4161   out->cr();
4162   out->cr();
4163   out->print("MaxMetaspaceSize: ");
4164   out->print_human_readable_size(MaxMetaspaceSize, scale);
4165   out->cr();
4166   out->print("InitialBootClassLoaderMetaspaceSize: ");
4167   out->print_human_readable_size(InitialBootClassLoaderMetaspaceSize, scale);
4168   out->cr();
4169 
4170   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4171   out->cr();
4172   if (Metaspace::using_class_space()) {
4173     out->print("CompressedClassSpaceSize: ");
4174     out->print_human_readable_size(CompressedClassSpaceSize, scale);
4175   }
4176 
4177   out->cr();
4178   out->cr();
4179 
4180 } // MetaspaceUtils::print_report()
4181 
4182 // Prints an ASCII representation of the given space.
4183 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4184   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4185   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4186   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4187   if (vsl != NULL) {
4188     if (for_class) {
4189       if (!Metaspace::using_class_space()) {
4190         out->print_cr("No Class Space.");
4191         return;
4192       }
4193       out->print_raw("---- Metaspace Map (Class Space) ----");
4194     } else {
4195       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4196     }
4197     // Print legend:
4198     out->cr();
4199     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4200     out->cr();
4201     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4202     vsl->print_map(out);
4203     out->cr();
4204   }
4205 }
4206 
4207 void MetaspaceUtils::verify_free_chunks() {
4208   Metaspace::chunk_manager_metadata()->verify();
4209   if (Metaspace::using_class_space()) {
4210     Metaspace::chunk_manager_class()->verify();
4211   }
4212 }
4213 
4214 void MetaspaceUtils::verify_metrics() {
4215 #ifdef ASSERT
4216   // Please note: there are time windows where the internal counters are out of sync with
4217   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4218   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4219   // not be counted when iterating the CLDG. So be careful when you call this method.
4220   ClassLoaderMetaspaceStatistics total_stat;
4221   collect_statistics(&total_stat);
4222   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4223   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4224 
4225   bool mismatch = false;
4226   for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
4227     Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
4228     UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
4229     if (capacity_words(mdtype) != chunk_stat.cap() ||
4230         used_words(mdtype) != chunk_stat.used() ||
4231         overhead_words(mdtype) != chunk_stat.overhead()) {
4232       mismatch = true;
4233       tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
4234       tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4235                     capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
4236       tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4237                     chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
4238       tty->flush();
4239     }




















4240   }
4241   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4242 #endif
4243 }
4244 





4245 
4246 // Metaspace methods
4247 
4248 size_t Metaspace::_first_chunk_word_size = 0;
4249 size_t Metaspace::_first_class_chunk_word_size = 0;
4250 
4251 size_t Metaspace::_commit_alignment = 0;
4252 size_t Metaspace::_reserve_alignment = 0;
4253 
4254 VirtualSpaceList* Metaspace::_space_list = NULL;
4255 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4256 
4257 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4258 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4259 
4260 #define VIRTUALSPACEMULTIPLIER 2
4261 
4262 #ifdef _LP64
4263 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4264 


4423   // If we got here then the metaspace got allocated.
4424   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4425 
4426 #if INCLUDE_CDS
4427   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4428   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4429     FileMapInfo::stop_sharing_and_unmap(
4430         "Could not allocate metaspace at a compatible address");
4431   }
4432 #endif
4433   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4434                                   UseSharedSpaces ? (address)cds_base : 0);
4435 
4436   initialize_class_space(metaspace_rs);
4437 
4438   LogTarget(Trace, gc, metaspace) lt;
4439   if (lt.is_enabled()) {
4440     ResourceMark rm;
4441     LogStream ls(lt);
4442     print_compressed_class_space(&ls, requested_addr);
4443     ls.cr(); // ~LogStream does not autoflush.
4444   }
4445 }
4446 
4447 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4448   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4449                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4450   if (_class_space_list != NULL) {
4451     address base = (address)_class_space_list->current_virtual_space()->bottom();
4452     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4453                  compressed_class_space_size(), p2i(base));
4454     if (requested_addr != 0) {
4455       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4456     }
4457     st->cr();
4458   }
4459 }
4460 
4461 // For UseCompressedClassPointers the class space is reserved above the top of
4462 // the Java heap.  The argument passed in is at the base of the compressed space.
4463 void Metaspace::initialize_class_space(ReservedSpace rs) {


4645 
4646   // Zero initialize.
4647   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4648 
4649   return result;
4650 }
4651 
4652 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4653   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4654 
4655   // If result is still null, we are out of memory.
4656   Log(gc, metaspace, freelist) log;
4657   if (log.is_info()) {
4658     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4659              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4660     ResourceMark rm;
4661     if (log.is_debug()) {
4662       if (loader_data->metaspace_or_null() != NULL) {
4663         LogStream ls(log.debug());
4664         loader_data->print_value_on(&ls);
4665         ls.cr(); // ~LogStream does not autoflush.
4666       }
4667     }
4668     LogStream ls(log.info());
4669     // In case of an OOM, log out a short but still useful report.
4670     MetaspaceUtils::print_basic_report(&ls, 0);
4671     ls.cr(); // ~LogStream does not autoflush.
4672   }
4673 
4674   bool out_of_compressed_class_space = false;
4675   if (is_class_space_allocation(mdtype)) {
4676     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4677     out_of_compressed_class_space =
4678       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4679       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4680       CompressedClassSpaceSize;
4681   }
4682 
4683   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4684   const char* space_string = out_of_compressed_class_space ?
4685     "Compressed class space" : "Metaspace";
4686 
4687   report_java_out_of_memory(space_string);
4688 
4689   if (JvmtiExport::should_post_resource_exhausted()) {
4690     JvmtiExport::post_resource_exhausted(
4691         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


4726   }
4727 }
4728 
4729 bool Metaspace::contains(const void* ptr) {
4730   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4731     return true;
4732   }
4733   return contains_non_shared(ptr);
4734 }
4735 
4736 bool Metaspace::contains_non_shared(const void* ptr) {
4737   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4738      return true;
4739   }
4740 
4741   return get_space_list(NonClassType)->contains(ptr);
4742 }
4743 
4744 // ClassLoaderMetaspace
4745 
4746 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
4747   : _lock(lock)
4748   , _space_type(type)
4749   , _vsm(NULL)
4750   , _class_vsm(NULL)
4751 {
4752   initialize(lock, type);
4753 }
4754 
4755 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4756   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
4757   delete _vsm;
4758   if (Metaspace::using_class_space()) {
4759     delete _class_vsm;
4760   }
4761 }
4762 
4763 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4764   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4765   if (chunk != NULL) {
4766     // Add to this manager's list of chunks in use and make it the current_chunk().
4767     get_space_manager(mdtype)->add_chunk(chunk, true);
4768   }
4769 }
4770 
4771 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4772   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4773 
4774   // Get a chunk from the chunk freelist
4775   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4776 
4777   if (chunk == NULL) {
4778     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4779                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4780   }
4781 
4782   return chunk;
4783 }
4784 
4785 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4786   Metaspace::verify_global_initialization();
4787 
4788   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
4789 
4790   // Allocate SpaceManager for metadata objects.
4791   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4792 
4793   if (Metaspace::using_class_space()) {
4794     // Allocate SpaceManager for classes.
4795     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4796   }
4797 
4798   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4799 
4800   // Allocate chunk for metadata objects
4801   initialize_first_chunk(type, Metaspace::NonClassType);
4802 
4803   // Allocate chunk for class metadata objects
4804   if (Metaspace::using_class_space()) {
4805     initialize_first_chunk(type, Metaspace::ClassType);
4806   }
4807 }
4808 
4809 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4810   Metaspace::assert_not_frozen();
4811 
4812   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
4813 
4814   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4815   if (Metaspace::is_class_space_allocation(mdtype)) {
4816     return  class_vsm()->allocate(word_size);
4817   } else {
4818     return  vsm()->allocate(word_size);
4819   }
4820 }
4821 
4822 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4823   Metaspace::assert_not_frozen();
4824   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4825   assert(delta_bytes > 0, "Must be");
4826 
4827   size_t before = 0;
4828   size_t after = 0;
4829   MetaWord* res;
4830   bool incremented;
4831 
4832   // Each thread increments the HWM at most once. Even if the thread fails to increment
4833   // the HWM, an allocation is still attempted. This is because another thread must then
4834   // have incremented the HWM and therefore the allocation might still succeed.
4835   do {
4836     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4837     res = allocate(word_size, mdtype);
4838   } while (!incremented && res == NULL);
4839 
4840   if (incremented) {
4841     Metaspace::tracer()->report_gc_threshold(before, after,
4842                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4843     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4844   }
4845 
4846   return res;
4847 }
4848 






































4849 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4850   return (vsm()->used_words() +
4851       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
4852 }
4853 
4854 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4855   return (vsm()->capacity_words() +
4856       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
4857 }
4858 
4859 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4860   Metaspace::assert_not_frozen();
4861   assert(!SafepointSynchronize::is_at_safepoint()
4862          || Thread::current()->is_VM_thread(), "should be the VM thread");
4863 
4864   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
4865 
4866   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4867 
4868   if (is_class && Metaspace::using_class_space()) {
4869     class_vsm()->deallocate(ptr, word_size);
4870   } else {
4871     vsm()->deallocate(ptr, word_size);
4872   }
4873 }
4874 
4875 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4876   assert(Metaspace::using_class_space(), "Has to use class space");
4877   return class_vsm()->calc_chunk_size(word_size);
4878 }
4879 
4880 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4881   // Print both class virtual space counts and metaspace.
4882   if (Verbose) {
4883     vsm()->print_on(out);
4884     if (Metaspace::using_class_space()) {
4885       class_vsm()->print_on(out);
4886     }
4887   }
4888 }
4889 
4890 void ClassLoaderMetaspace::verify() {
4891   vsm()->verify();
4892   if (Metaspace::using_class_space()) {
4893     class_vsm()->verify();
4894   }
4895 }
4896 
4897 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4898   assert_lock_strong(lock());
4899   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4900   if (Metaspace::using_class_space()) {
4901     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());

4902   }
4903 }
4904 
4905 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4906   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4907   add_to_statistics_locked(out);
4908 }
4909 
4910 #ifdef ASSERT
4911 static void do_verify_chunk(Metachunk* chunk) {
4912   guarantee(chunk != NULL, "Sanity");
4913   // Verify chunk itself; then verify that it is consistent with the
4914   // occupany map of its containing node.
4915   chunk->verify();
4916   VirtualSpaceNode* const vsn = chunk->container();
4917   OccupancyMap* const ocmap = vsn->occupancy_map();
4918   ocmap->verify_for_chunk(chunk);
4919 }
4920 #endif
4921 
4922 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4923   chunk->set_is_tagged_free(!inuse);
4924   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4925   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4926 }
4927 
4928 /////////////// Unit tests ///////////////


5234     test_adjust_initial_chunk_size(false);
5235     test_adjust_initial_chunk_size(true);
5236   }
5237 };
5238 
5239 void SpaceManager_test_adjust_initial_chunk_size() {
5240   SpaceManagerTest::test_adjust_initial_chunk_size();
5241 }
5242 
5243 #endif // ASSERT
5244 
5245 struct chunkmanager_statistics_t {
5246   int num_specialized_chunks;
5247   int num_small_chunks;
5248   int num_medium_chunks;
5249   int num_humongous_chunks;
5250 };
5251 
5252 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5253   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5254   ChunkManagerStatistics stat;
5255   chunk_manager->collect_statistics(&stat);
5256   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5257   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5258   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5259   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5260 }
5261 
5262 struct chunk_geometry_t {
5263   size_t specialized_chunk_word_size;
5264   size_t small_chunk_word_size;
5265   size_t medium_chunk_word_size;
5266 };
5267 
5268 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5269   if (mdType == Metaspace::NonClassType) {
5270     out->specialized_chunk_word_size = SpecializedChunk;
5271     out->small_chunk_word_size = SmallChunk;
5272     out->medium_chunk_word_size = MediumChunk;
5273   } else {
5274     out->specialized_chunk_word_size = ClassSpecializedChunk;
5275     out->small_chunk_word_size = ClassSmallChunk;
5276     out->medium_chunk_word_size = ClassMediumChunk;
5277   }
5278 }
< prev index next >