16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "aot/aotLoader.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/collectorPolicy.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/binaryTreeDictionary.inline.hpp"
32 #include "memory/filemap.hpp"
33 #include "memory/freeList.inline.hpp"
34 #include "memory/metachunk.hpp"
35 #include "memory/metaspace.hpp"
36 #include "memory/metaspaceGCThresholdUpdater.hpp"
37 #include "memory/metaspaceShared.hpp"
38 #include "memory/metaspaceTracer.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/universe.hpp"
41 #include "runtime/atomic.hpp"
42 #include "runtime/globals.hpp"
43 #include "runtime/init.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/mutex.hpp"
46 #include "runtime/orderAccess.inline.hpp"
47 #include "services/memTracker.hpp"
48 #include "services/memoryService.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/copy.hpp"
51 #include "utilities/debug.hpp"
52 #include "utilities/macros.hpp"
53
54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
56
57 // Helper function that does a bunch of checks for a chunk.
58 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
59
60 // Given a Metachunk, update its in-use information (both in the
61 // chunk and the occupancy map).
62 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
63
64 size_t const allocation_from_dictionary_limit = 4 * K;
65
66 MetaWord* last_allocated = 0;
67
68 size_t Metaspace::_compressed_class_space_size;
69 const MetaspaceTracer* Metaspace::_tracer = NULL;
70
71 DEBUG_ONLY(bool Metaspace::_frozen = false;)
72
73 enum ChunkSizes { // in words.
74 ClassSpecializedChunk = 128,
75 SpecializedChunk = 128,
76 ClassSmallChunk = 256,
77 SmallChunk = 512,
78 ClassMediumChunk = 4 * K,
79 MediumChunk = 8 * K
80 };
81
82 // Returns size of this chunk type.
83 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
84 assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
85 size_t size = 0;
86 if (is_class) {
87 switch(chunktype) {
88 case SpecializedIndex: size = ClassSpecializedChunk; break;
89 case SmallIndex: size = ClassSmallChunk; break;
90 case MediumIndex: size = ClassMediumChunk; break;
91 default:
92 ShouldNotReachHere();
116 assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
117 return HumongousIndex;
118 }
119 } else {
120 if (size == SpecializedChunk) {
121 return SpecializedIndex;
122 } else if (size == SmallChunk) {
123 return SmallIndex;
124 } else if (size == MediumChunk) {
125 return MediumIndex;
126 } else if (size > MediumChunk) {
127 // A valid humongous chunk size is a multiple of the smallest chunk size.
128 assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
129 return HumongousIndex;
130 }
131 }
132 ShouldNotReachHere();
133 return (ChunkIndex)-1;
134 }
135
136
137 static ChunkIndex next_chunk_index(ChunkIndex i) {
138 assert(i < NumberOfInUseLists, "Out of bound");
139 return (ChunkIndex) (i+1);
140 }
141
142 static ChunkIndex prev_chunk_index(ChunkIndex i) {
143 assert(i > ZeroIndex, "Out of bound");
144 return (ChunkIndex) (i-1);
145 }
146
147 static const char* scale_unit(size_t scale) {
148 switch(scale) {
149 case 1: return "BYTES";
150 case K: return "KB";
151 case M: return "MB";
152 case G: return "GB";
153 default:
154 ShouldNotReachHere();
155 return NULL;
156 }
157 }
158
159 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
160 uint MetaspaceGC::_shrink_factor = 0;
161 bool MetaspaceGC::_should_concurrent_collect = false;
162
163 typedef class FreeList<Metachunk> ChunkList;
164
165 // Manages the global free lists of chunks.
166 class ChunkManager : public CHeapObj<mtInternal> {
167 friend class TestVirtualSpaceNodeTest;
168
169 // Free list of chunks of different sizes.
170 // SpecializedChunk
171 // SmallChunk
172 // MediumChunk
173 ChunkList _free_chunks[NumberOfFreeLists];
174
175 // Whether or not this is the class chunkmanager.
176 const bool _is_class;
177
178 // Return non-humongous chunk list by its index.
179 ChunkList* free_chunks(ChunkIndex index);
180
181 // Returns non-humongous chunk list for the given chunk word size.
182 ChunkList* find_free_chunks_list(size_t word_size);
223
224 // Helper for chunk merging:
225 // Given an address range with 1-n chunks which are all supposed to be
226 // free and hence currently managed by this ChunkManager, remove them
227 // from this ChunkManager and mark them as invalid.
228 // - This does not correct the occupancy map.
229 // - This does not adjust the counters in ChunkManager.
230 // - Does not adjust container count counter in containing VirtualSpaceNode.
231 // Returns number of chunks removed.
232 int remove_chunks_in_area(MetaWord* p, size_t word_size);
233
234 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
235 // split up the larger chunk into n smaller chunks, at least one of which should be
236 // the target chunk of target chunk size. The smaller chunks, including the target
237 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
238 // Note that this chunk is supposed to be removed from the freelist right away.
239 Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
240
241 public:
242
243 struct ChunkManagerStatistics {
244 size_t num_by_type[NumberOfFreeLists];
245 size_t single_size_by_type[NumberOfFreeLists];
246 size_t total_size_by_type[NumberOfFreeLists];
247 size_t num_humongous_chunks;
248 size_t total_size_humongous_chunks;
249 };
250
251 void locked_get_statistics(ChunkManagerStatistics* stat) const;
252 void get_statistics(ChunkManagerStatistics* stat) const;
253 static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
254
255
256 ChunkManager(bool is_class)
257 : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
258 _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
259 _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
260 _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
261 }
262
263 // Add or delete (return) a chunk to the global freelist.
264 Metachunk* chunk_freelist_allocate(size_t word_size);
265
266 // Map a size to a list index assuming that there are lists
267 // for special, small, medium, and humongous chunks.
268 ChunkIndex list_index(size_t size);
269
270 // Map a given index to the chunk size.
271 size_t size_by_index(ChunkIndex index) const;
272
273 bool is_class() const { return _is_class; }
274
275 // Convenience accessors.
341 // Debug support
342 void verify();
343 void slow_verify() {
344 if (VerifyMetaspace) {
345 verify();
346 }
347 }
348 void locked_verify();
349 void slow_locked_verify() {
350 if (VerifyMetaspace) {
351 locked_verify();
352 }
353 }
354 void verify_free_chunks_total();
355
356 void locked_print_free_chunks(outputStream* st);
357 void locked_print_sum_free_chunks(outputStream* st);
358
359 void print_on(outputStream* st) const;
360
361 // Prints composition for both non-class and (if available)
362 // class chunk manager.
363 static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
364 };
365
366 class SmallBlocks : public CHeapObj<mtClass> {
367 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize;
368 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
369
370 private:
371 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
372
373 FreeList<Metablock>& list_at(size_t word_size) {
374 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
375 return _small_lists[word_size - _small_block_min_size];
376 }
377
378 public:
379 SmallBlocks() {
380 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
381 uint k = i - _small_block_min_size;
382 _small_lists[k].set_size(i);
383 }
384 }
385
386 size_t total_size() const {
387 size_t result = 0;
388 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
389 uint k = i - _small_block_min_size;
390 result = result + _small_lists[k].count() * _small_lists[k].size();
391 }
392 return result;
393 }
394
395 static uint small_block_max_size() { return _small_block_max_size; }
396 static uint small_block_min_size() { return _small_block_min_size; }
397
398 MetaWord* get_block(size_t word_size) {
399 if (list_at(word_size).count() > 0) {
400 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
401 return new_block;
402 } else {
403 return NULL;
404 }
405 }
406 void return_block(Metablock* free_chunk, size_t word_size) {
407 list_at(word_size).return_chunk_at_head(free_chunk, false);
408 assert(list_at(word_size).count() > 0, "Should have a chunk");
409 }
410
411 void print_on(outputStream* st) const {
412 st->print_cr("SmallBlocks:");
413 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
414 uint k = i - _small_block_min_size;
427 // is at least 1/4th the size of the available block.
428 const static int WasteMultiplier = 4;
429
430 // Accessors
431 BlockTreeDictionary* dictionary() const { return _dictionary; }
432 SmallBlocks* small_blocks() {
433 if (_small_blocks == NULL) {
434 _small_blocks = new SmallBlocks();
435 }
436 return _small_blocks;
437 }
438
439 public:
440 BlockFreelist();
441 ~BlockFreelist();
442
443 // Get and return a block to the free list
444 MetaWord* get_block(size_t word_size);
445 void return_block(MetaWord* p, size_t word_size);
446
447 size_t total_size() const {
448 size_t result = dictionary()->total_size();
449 if (_small_blocks != NULL) {
450 result = result + _small_blocks->total_size();
451 }
452 return result;
453 }
454
455 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
456 void print_on(outputStream* st) const;
457 };
458
459 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
460 template <typename T> struct all_ones { static const T value; };
461 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
462 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
463
464 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
465 // keeps information about
466 // - where a chunk starts
467 // - whether a chunk is in-use or free
468 // A bit in this bitmap represents one range of memory in the smallest
469 // chunk size (SpecializedChunk or ClassSpecializedChunk).
470 class OccupancyMap : public CHeapObj<mtInternal> {
471
472 // The address range this map covers.
473 const MetaWord* const _reference_address;
474 const size_t _word_size;
840
841 // Allocate a chunk from the virtual space and return it.
842 Metachunk* get_chunk_vs(size_t chunk_word_size);
843
844 // Expands/shrinks the committed space in a virtual space. Delegates
845 // to Virtualspace
846 bool expand_by(size_t min_words, size_t preferred_words);
847
848 // In preparation for deleting this node, remove all the chunks
849 // in the node from any freelist.
850 void purge(ChunkManager* chunk_manager);
851
852 // If an allocation doesn't fit in the current node a new node is created.
853 // Allocate chunks out of the remaining committed space in this node
854 // to avoid wasting that memory.
855 // This always adds up because all the chunk sizes are multiples of
856 // the smallest chunk size.
857 void retire(ChunkManager* chunk_manager);
858
859
860 void print_on(outputStream* st) const;
861 void print_map(outputStream* st, bool is_class) const;
862
863 // Debug support
864 DEBUG_ONLY(void mangle();)
865 // Verify counters, all chunks in this list node and the occupancy map.
866 DEBUG_ONLY(void verify();)
867 // Verify that all free chunks in this node are ideally merged
868 // (there not should be multiple small chunks where a large chunk could exist.)
869 DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
870
871 };
872
873 #define assert_is_aligned(value, alignment) \
874 assert(is_aligned((value), (alignment)), \
875 SIZE_FORMAT_HEX " is not aligned to " \
876 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
877
878 // Decide if large pages should be committed when the memory is reserved.
879 static bool should_commit_large_pages_when_reserving(size_t bytes) {
880 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
881 size_t words = bytes / BytesPerWord;
882 bool is_class = false; // We never reserve large pages for the class space.
883 if (MetaspaceGC::can_expand(words, is_class) &&
884 MetaspaceGC::allowed_expansion() >= words) {
885 return true;
886 }
887 }
888
889 return false;
890 }
891
892 // byte_size is the size of the associated virtualspace.
893 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
894 _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
895 assert_is_aligned(bytes, Metaspace::reserve_alignment());
896 bool large_pages = should_commit_large_pages_when_reserving(bytes);
897 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
1164
1165 bool initialization_succeeded() { return _virtual_space_list != NULL; }
1166
1167 size_t reserved_words() { return _reserved_words; }
1168 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
1169 size_t committed_words() { return _committed_words; }
1170 size_t committed_bytes() { return committed_words() * BytesPerWord; }
1171
1172 void inc_reserved_words(size_t v);
1173 void dec_reserved_words(size_t v);
1174 void inc_committed_words(size_t v);
1175 void dec_committed_words(size_t v);
1176 void inc_virtual_space_count();
1177 void dec_virtual_space_count();
1178
1179 bool contains(const void* ptr);
1180
1181 // Unlink empty VirtualSpaceNodes and free it.
1182 void purge(ChunkManager* chunk_manager);
1183
1184 void print_on(outputStream* st) const;
1185 void print_map(outputStream* st) const;
1186
1187 class VirtualSpaceListIterator : public StackObj {
1188 VirtualSpaceNode* _virtual_spaces;
1189 public:
1190 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1191 _virtual_spaces(virtual_spaces) {}
1192
1193 bool repeat() {
1194 return _virtual_spaces != NULL;
1195 }
1196
1197 VirtualSpaceNode* get_next() {
1198 VirtualSpaceNode* result = _virtual_spaces;
1199 if (_virtual_spaces != NULL) {
1200 _virtual_spaces = _virtual_spaces->next();
1201 }
1202 return result;
1203 }
1204 };
1205 };
1206
1207 class Metadebug : AllStatic {
1208 // Debugging support for Metaspaces
1209 static int _allocation_fail_alot_count;
1210
1211 public:
1212
1213 static void init_allocation_fail_alot_count();
1214 #ifdef ASSERT
1215 static bool test_metadata_failure();
1216 #endif
1217 };
1218
1219 int Metadebug::_allocation_fail_alot_count = 0;
1220
1221 // SpaceManager - used by Metaspace to handle allocations
1222 class SpaceManager : public CHeapObj<mtClass> {
1223 friend class ClassLoaderMetaspace;
1224 friend class Metadebug;
1225
1226 private:
1227
1228 // protects allocations
1229 Mutex* const _lock;
1230
1231 // Type of metadata allocated.
1232 const Metaspace::MetadataType _mdtype;
1233
1234 // Type of metaspace
1235 const Metaspace::MetaspaceType _space_type;
1236
1237 // List of chunks in use by this SpaceManager. Allocations
1238 // are done from the current chunk. The list is used for deallocating
1239 // chunks when the SpaceManager is freed.
1240 Metachunk* _chunks_in_use[NumberOfInUseLists];
1241 Metachunk* _current_chunk;
1242
1243 // Maximum number of small chunks to allocate to a SpaceManager
1244 static uint const _small_chunk_limit;
1245
1246 // Maximum number of specialize chunks to allocate for anonymous and delegating
1247 // metadata space to a SpaceManager
1248 static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1249
1250 // Sum of all space in allocated chunks
1251 size_t _allocated_blocks_words;
1252
1253 // Sum of all allocated chunks
1254 size_t _allocated_chunks_words;
1255 size_t _allocated_chunks_count;
1256
1257 // Free lists of blocks are per SpaceManager since they
1258 // are assumed to be in chunks in use by the SpaceManager
1259 // and all chunks in use by a SpaceManager are freed when
1260 // the class loader using the SpaceManager is collected.
1261 BlockFreelist* _block_freelists;
1262
1263 private:
1264 // Accessors
1265 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1266 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1267 _chunks_in_use[index] = v;
1268 }
1269
1270 BlockFreelist* block_freelists() const { return _block_freelists; }
1271
1272 Metaspace::MetadataType mdtype() { return _mdtype; }
1273
1274 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
1275 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1276
1277 Metachunk* current_chunk() const { return _current_chunk; }
1278 void set_current_chunk(Metachunk* v) {
1279 _current_chunk = v;
1280 }
1281
1282 Metachunk* find_current_chunk(size_t word_size);
1283
1284 // Add chunk to the list of chunks in use
1285 void add_chunk(Metachunk* v, bool make_current);
1286 void retire_current_chunk();
1287
1288 Mutex* lock() const { return _lock; }
1289
1290 protected:
1291 void initialize();
1292
1293 public:
1294 SpaceManager(Metaspace::MetadataType mdtype,
1295 Metaspace::MetaspaceType space_type,
1296 Mutex* lock);
1297 ~SpaceManager();
1298
1299 enum ChunkMultiples {
1300 MediumChunkMultiple = 4
1301 };
1302
1303 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1304 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; }
1305 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; }
1306
1307 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); }
1308
1309 // Accessors
1310 bool is_class() const { return _mdtype == Metaspace::ClassType; }
1311
1312 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1313 size_t small_chunk_size() const { return small_chunk_size(is_class()); }
1314 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); }
1315
1316 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); }
1317
1318 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; }
1319
1320 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1321 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1322 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1323 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1324 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1325
1326 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1327
1328 // Increment the per Metaspace and global running sums for Metachunks
1329 // by the given size. This is used when a Metachunk to added to
1330 // the in-use list.
1331 void inc_size_metrics(size_t words);
1332 // Increment the per Metaspace and global running sums Metablocks by the given
1333 // size. This is used when a Metablock is allocated.
1334 void inc_used_metrics(size_t words);
1335 // Delete the portion of the running sums for this SpaceManager. That is,
1336 // the globals running sums for the Metachunks and Metablocks are
1337 // decremented for all the Metachunks in-use by this SpaceManager.
1338 void dec_total_from_size_metrics();
1339
1340 // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1341 // or return the unadjusted size if the requested size is humongous.
1342 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1343 size_t adjust_initial_chunk_size(size_t requested) const;
1344
1345 // Get the initial chunks size for this metaspace type.
1346 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1347
1348 size_t sum_capacity_in_chunks_in_use() const;
1349 size_t sum_used_in_chunks_in_use() const;
1350 size_t sum_free_in_chunks_in_use() const;
1351 size_t sum_waste_in_chunks_in_use() const;
1352 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1353
1354 size_t sum_count_in_chunks_in_use();
1355 size_t sum_count_in_chunks_in_use(ChunkIndex i);
1356
1357 Metachunk* get_new_chunk(size_t chunk_word_size);
1358
1359 // Block allocation and deallocation.
1360 // Allocates a block from the current chunk
1361 MetaWord* allocate(size_t word_size);
1362
1363 // Helper for allocations
1364 MetaWord* allocate_work(size_t word_size);
1365
1366 // Returns a block to the per manager freelist
1367 void deallocate(MetaWord* p, size_t word_size);
1368
1369 // Based on the allocation size and a minimum chunk size,
1370 // returned chunk size (for expanding space for chunk allocation).
1371 size_t calc_chunk_size(size_t allocation_word_size);
1372
1373 // Called when an allocation from the current chunk fails.
1374 // Gets a new chunk (may require getting a new virtual space),
1375 // and allocates from that chunk.
1376 MetaWord* grow_and_allocate(size_t word_size);
1377
1378 // Notify memory usage to MemoryService.
1379 void track_metaspace_memory_usage();
1380
1381 // debugging support.
1382
1383 void dump(outputStream* const out) const;
1384 void print_on(outputStream* st) const;
1385 void locked_print_chunks_in_use_on(outputStream* st) const;
1386
1387 void verify();
1388 void verify_chunk_size(Metachunk* chunk);
1389 #ifdef ASSERT
1390 void verify_allocated_blocks_words();
1391 #endif
1392
1393 // This adjusts the size given to be greater than the minimum allocation size in
1394 // words for data in metaspace. Esentially the minimum size is currently 3 words.
1395 size_t get_allocation_word_size(size_t word_size) {
1396 size_t byte_size = word_size * BytesPerWord;
1397
1398 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1399 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1400
1401 size_t raw_word_size = raw_bytes_size / BytesPerWord;
1402 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1403
1404 return raw_word_size;
1405 }
1406 };
1407
1408 uint const SpaceManager::_small_chunk_limit = 4;
1409 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1410
1411 void VirtualSpaceNode::inc_container_count() {
1412 assert_lock_strong(MetaspaceExpand_lock);
1413 _container_count++;
1414 }
1415
1416 void VirtualSpaceNode::dec_container_count() {
1417 assert_lock_strong(MetaspaceExpand_lock);
1418 _container_count--;
1419 }
1420
1421 #ifdef ASSERT
1422 void VirtualSpaceNode::verify_container_count() {
1423 assert(_container_count == container_count_slow(),
1424 "Inconsistency in container_count _container_count " UINTX_FORMAT
1425 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1686 }
1687
1688
1689 // Expand the virtual space (commit more of the reserved space)
1690 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1691 size_t min_bytes = min_words * BytesPerWord;
1692 size_t preferred_bytes = preferred_words * BytesPerWord;
1693
1694 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1695
1696 if (uncommitted < min_bytes) {
1697 return false;
1698 }
1699
1700 size_t commit = MIN2(preferred_bytes, uncommitted);
1701 bool result = virtual_space()->expand_by(commit, false);
1702
1703 if (result) {
1704 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1705 (is_class() ? "class" : "non-class"), commit);
1706 } else {
1707 log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1708 (is_class() ? "class" : "non-class"), commit);
1709 }
1710
1711 assert(result, "Failed to commit memory");
1712
1713 return result;
1714 }
1715
1716 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1717 assert_lock_strong(MetaspaceExpand_lock);
1718 Metachunk* result = take_from_committed(chunk_word_size);
1719 return result;
1720 }
1721
1722 bool VirtualSpaceNode::initialize() {
1723
1724 if (!_rs.is_reserved()) {
1725 return false;
1745 set_top((MetaWord*)virtual_space()->low());
1746 set_reserved(MemRegion((HeapWord*)_rs.base(),
1747 (HeapWord*)(_rs.base() + _rs.size())));
1748
1749 assert(reserved()->start() == (HeapWord*) _rs.base(),
1750 "Reserved start was not set properly " PTR_FORMAT
1751 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1752 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1753 "Reserved size was not set properly " SIZE_FORMAT
1754 " != " SIZE_FORMAT, reserved()->word_size(),
1755 _rs.size() / BytesPerWord);
1756 }
1757
1758 // Initialize Occupancy Map.
1759 const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1760 _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1761
1762 return result;
1763 }
1764
1765 void VirtualSpaceNode::print_on(outputStream* st) const {
1766 size_t used = used_words_in_vs();
1767 size_t capacity = capacity_words_in_vs();
1768 VirtualSpace* vs = virtual_space();
1769 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1770 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1771 PTR_FORMAT ", " PTR_FORMAT ")",
1772 p2i(vs), capacity / K,
1773 capacity == 0 ? 0 : used * 100 / capacity,
1774 p2i(bottom()), p2i(top()), p2i(end()),
1775 p2i(vs->high_boundary()));
1776 }
1777
1778 #ifdef ASSERT
1779 void VirtualSpaceNode::mangle() {
1780 size_t word_size = capacity_words_in_vs();
1781 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1782 }
1783 #endif // ASSERT
1784
1785 // VirtualSpaceList methods
1786 // Space allocated from the VirtualSpace
1787
1788 VirtualSpaceList::~VirtualSpaceList() {
1789 VirtualSpaceListIterator iter(virtual_space_list());
1790 while (iter.repeat()) {
1791 VirtualSpaceNode* vsl = iter.get_next();
1792 delete vsl;
1793 }
1975 // Walk the list of VirtualSpaceNodes and delete
1976 // nodes with a 0 container_count. Remove Metachunks in
1977 // the node from their respective freelists.
1978 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1979 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1980 assert_lock_strong(MetaspaceExpand_lock);
1981 // Don't use a VirtualSpaceListIterator because this
1982 // list is being changed and a straightforward use of an iterator is not safe.
1983 VirtualSpaceNode* purged_vsl = NULL;
1984 VirtualSpaceNode* prev_vsl = virtual_space_list();
1985 VirtualSpaceNode* next_vsl = prev_vsl;
1986 while (next_vsl != NULL) {
1987 VirtualSpaceNode* vsl = next_vsl;
1988 DEBUG_ONLY(vsl->verify_container_count();)
1989 next_vsl = vsl->next();
1990 // Don't free the current virtual space since it will likely
1991 // be needed soon.
1992 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1993 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1994 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
1995 // Unlink it from the list
1996 if (prev_vsl == vsl) {
1997 // This is the case of the current node being the first node.
1998 assert(vsl == virtual_space_list(), "Expected to be the first node");
1999 set_virtual_space_list(vsl->next());
2000 } else {
2001 prev_vsl->set_next(vsl->next());
2002 }
2003
2004 vsl->purge(chunk_manager);
2005 dec_reserved_words(vsl->reserved_words());
2006 dec_committed_words(vsl->committed_words());
2007 dec_virtual_space_count();
2008 purged_vsl = vsl;
2009 delete vsl;
2010 } else {
2011 prev_vsl = vsl;
2012 }
2013 }
2014 #ifdef ASSERT
2122 if (vs_word_size == 0) {
2123 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2124 return false;
2125 }
2126
2127 // Reserve the space
2128 size_t vs_byte_size = vs_word_size * BytesPerWord;
2129 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2130
2131 // Allocate the meta virtual space and initialize it.
2132 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2133 if (!new_entry->initialize()) {
2134 delete new_entry;
2135 return false;
2136 } else {
2137 assert(new_entry->reserved_words() == vs_word_size,
2138 "Reserved memory size differs from requested memory size");
2139 // ensure lock-free iteration sees fully initialized node
2140 OrderAccess::storestore();
2141 link_vs(new_entry);
2142 return true;
2143 }
2144 }
2145
2146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2147 if (virtual_space_list() == NULL) {
2148 set_virtual_space_list(new_entry);
2149 } else {
2150 current_virtual_space()->set_next(new_entry);
2151 }
2152 set_current_virtual_space(new_entry);
2153 inc_reserved_words(new_entry->reserved_words());
2154 inc_committed_words(new_entry->committed_words());
2155 inc_virtual_space_count();
2156 #ifdef ASSERT
2157 new_entry->mangle();
2158 #endif
2159 LogTarget(Trace, gc, metaspace) lt;
2160 if (lt.is_enabled()) {
2161 LogStream ls(lt);
2270 // We must have enough space for the requested size and any
2271 // additional reqired padding chunks.
2272 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2273
2274 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2275 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2276 if (min_word_size >= preferred_word_size) {
2277 // Can happen when humongous chunks are allocated.
2278 preferred_word_size = min_word_size;
2279 }
2280
2281 bool expanded = expand_by(min_word_size, preferred_word_size);
2282 if (expanded) {
2283 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2284 assert(next != NULL, "The allocation was expected to succeed after the expansion");
2285 }
2286
2287 return next;
2288 }
2289
2290 void VirtualSpaceList::print_on(outputStream* st) const {
2291 VirtualSpaceListIterator iter(virtual_space_list());
2292 while (iter.repeat()) {
2293 VirtualSpaceNode* node = iter.get_next();
2294 node->print_on(st);
2295 }
2296 }
2297
2298 void VirtualSpaceList::print_map(outputStream* st) const {
2299 VirtualSpaceNode* list = virtual_space_list();
2300 VirtualSpaceListIterator iter(list);
2301 unsigned i = 0;
2302 while (iter.repeat()) {
2303 st->print_cr("Node %u:", i);
2304 VirtualSpaceNode* node = iter.get_next();
2305 node->print_map(st, this->is_class());
2306 i ++;
2307 }
2308 }
2309
2310 // MetaspaceGC methods
2311
2312 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2313 // Within the VM operation after the GC the attempt to allocate the metadata
2314 // should succeed. If the GC did not free enough space for the metaspace
3055 size_chunks_returned += cur->word_size();
3056 }
3057 return_single_chunk(index, cur);
3058 cur = next;
3059 }
3060 if (log.is_enabled()) { // tracing
3061 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3062 num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3063 if (index != HumongousIndex) {
3064 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3065 } else {
3066 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3067 }
3068 }
3069 }
3070
3071 void ChunkManager::print_on(outputStream* out) const {
3072 _humongous_dictionary.report_statistics(out);
3073 }
3074
3075 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3076 assert_lock_strong(MetaspaceExpand_lock);
3077 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3078 stat->num_by_type[i] = num_free_chunks(i);
3079 stat->single_size_by_type[i] = size_by_index(i);
3080 stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3081 }
3082 stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3083 stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3084 }
3085
3086 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3087 MutexLockerEx cl(MetaspaceExpand_lock,
3088 Mutex::_no_safepoint_check_flag);
3089 locked_get_statistics(stat);
3090 }
3091
3092 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3093 size_t total = 0;
3094 assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3095
3096 const char* unit = scale_unit(scale);
3097 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3098 out->print(" " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3099 stat->num_by_type[i], chunk_size_name(i),
3100 stat->single_size_by_type[i]);
3101 if (scale == 1) {
3102 out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3103 } else {
3104 out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3105 }
3106
3107 total += stat->total_size_by_type[i];
3108 }
3109
3110
3111 total += stat->total_size_humongous_chunks;
3112
3113 if (scale == 1) {
3114 out->print_cr(" " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3115 stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3116
3117 out->print_cr(" total size: " SIZE_FORMAT " bytes.", total);
3118 } else {
3119 out->print_cr(" " SIZE_FORMAT " humongous chunks, total %.2f%s",
3120 stat->num_humongous_chunks,
3121 (float)stat->total_size_humongous_chunks / scale, unit);
3122
3123 out->print_cr(" total size: %.2f%s.", (float)total / scale, unit);
3124 }
3125
3126 }
3127
3128 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3129 assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3130
3131 // Note: keep lock protection only to retrieving statistics; keep printing
3132 // out of lock protection
3133 ChunkManagerStatistics stat;
3134 out->print_cr("Chunkmanager (non-class):");
3135 const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3136 if (non_class_cm != NULL) {
3137 non_class_cm->get_statistics(&stat);
3138 ChunkManager::print_statistics(&stat, out, scale);
3139 } else {
3140 out->print_cr("unavailable.");
3141 }
3142 out->print_cr("Chunkmanager (class):");
3143 const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3144 if (class_cm != NULL) {
3145 class_cm->get_statistics(&stat);
3146 ChunkManager::print_statistics(&stat, out, scale);
3147 } else {
3148 out->print_cr("unavailable.");
3149 }
3150 }
3151
3152 // SpaceManager methods
3153
3154 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3155 size_t chunk_sizes[] = {
3156 specialized_chunk_size(is_class_space),
3157 small_chunk_size(is_class_space),
3158 medium_chunk_size(is_class_space)
3159 };
3160
3161 // Adjust up to one of the fixed chunk sizes ...
3162 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3163 if (requested <= chunk_sizes[i]) {
3164 return chunk_sizes[i];
3165 }
3166 }
3167
3168 // ... or return the size as a humongous chunk.
3184 default: requested = ClassSmallChunk; break;
3185 }
3186 } else {
3187 switch (type) {
3188 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break;
3189 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break;
3190 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3191 default: requested = SmallChunk; break;
3192 }
3193 }
3194
3195 // Adjust to one of the fixed chunk sizes (unless humongous)
3196 const size_t adjusted = adjust_initial_chunk_size(requested);
3197
3198 assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3199 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3200
3201 return adjusted;
3202 }
3203
3204 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3205 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3206 size_t free = 0;
3207 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3208 Metachunk* chunk = chunks_in_use(i);
3209 while (chunk != NULL) {
3210 free += chunk->free_word_size();
3211 chunk = chunk->next();
3212 }
3213 }
3214 return free;
3215 }
3216
3217 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3218 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3219 size_t result = 0;
3220 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3221 result += sum_waste_in_chunks_in_use(i);
3222 }
3223
3224 return result;
3225 }
3226
3227 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3228 size_t result = 0;
3229 Metachunk* chunk = chunks_in_use(index);
3230 // Count the free space in all the chunk but not the
3231 // current chunk from which allocations are still being done.
3232 while (chunk != NULL) {
3233 if (chunk != current_chunk()) {
3234 result += chunk->free_word_size();
3235 }
3236 chunk = chunk->next();
3237 }
3238 return result;
3239 }
3240
3241 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3242 // For CMS use "allocated_chunks_words()" which does not need the
3243 // Metaspace lock. For the other collectors sum over the
3244 // lists. Use both methods as a check that "allocated_chunks_words()"
3245 // is correct. That is, sum_capacity_in_chunks() is too expensive
3246 // to use in the product and allocated_chunks_words() should be used
3247 // but allow for checking that allocated_chunks_words() returns the same
3248 // value as sum_capacity_in_chunks_in_use() which is the definitive
3249 // answer.
3250 if (UseConcMarkSweepGC) {
3251 return allocated_chunks_words();
3252 } else {
3253 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3254 size_t sum = 0;
3255 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3256 Metachunk* chunk = chunks_in_use(i);
3257 while (chunk != NULL) {
3258 sum += chunk->word_size();
3259 chunk = chunk->next();
3260 }
3261 }
3262 return sum;
3263 }
3264 }
3265
3266 size_t SpaceManager::sum_count_in_chunks_in_use() {
3267 size_t count = 0;
3268 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3269 count = count + sum_count_in_chunks_in_use(i);
3270 }
3271
3272 return count;
3273 }
3274
3275 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3276 size_t count = 0;
3277 Metachunk* chunk = chunks_in_use(i);
3278 while (chunk != NULL) {
3279 count++;
3280 chunk = chunk->next();
3281 }
3282 return count;
3283 }
3284
3285
3286 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3287 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3288 size_t used = 0;
3289 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3290 Metachunk* chunk = chunks_in_use(i);
3291 while (chunk != NULL) {
3292 used += chunk->used_word_size();
3293 chunk = chunk->next();
3294 }
3295 }
3296 return used;
3297 }
3298
3299 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3300
3301 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302 Metachunk* chunk = chunks_in_use(i);
3303 st->print("SpaceManager: %s " PTR_FORMAT,
3304 chunk_size_name(i), p2i(chunk));
3305 if (chunk != NULL) {
3306 st->print_cr(" free " SIZE_FORMAT,
3307 chunk->free_word_size());
3308 } else {
3309 st->cr();
3310 }
3311 }
3312
3313 chunk_manager()->locked_print_free_chunks(st);
3314 chunk_manager()->locked_print_sum_free_chunks(st);
3315 }
3316
3317 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3318
3410 // If the new chunk is humongous, it was created to serve a single large allocation. In that
3411 // case it usually makes no sense to make it the current chunk, since the next allocation would
3412 // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3413 // good chunk which could be used for more normal allocations.
3414 bool make_current = true;
3415 if (next->get_chunk_type() == HumongousIndex &&
3416 current_chunk() != NULL) {
3417 make_current = false;
3418 }
3419 add_chunk(next, make_current);
3420 mem = next->allocate(word_size);
3421 }
3422
3423 // Track metaspace memory usage statistic.
3424 track_metaspace_memory_usage();
3425
3426 return mem;
3427 }
3428
3429 void SpaceManager::print_on(outputStream* st) const {
3430
3431 for (ChunkIndex i = ZeroIndex;
3432 i < NumberOfInUseLists ;
3433 i = next_chunk_index(i) ) {
3434 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3435 p2i(chunks_in_use(i)),
3436 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3437 }
3438 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3439 " Humongous " SIZE_FORMAT,
3440 sum_waste_in_chunks_in_use(SmallIndex),
3441 sum_waste_in_chunks_in_use(MediumIndex),
3442 sum_waste_in_chunks_in_use(HumongousIndex));
3443 // block free lists
3444 if (block_freelists() != NULL) {
3445 st->print_cr("total in block free lists " SIZE_FORMAT,
3446 block_freelists()->total_size());
3447 }
3448 }
3449
3450 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3451 Metaspace::MetaspaceType space_type,
3452 Mutex* lock) :
3453 _mdtype(mdtype),
3454 _space_type(space_type),
3455 _allocated_blocks_words(0),
3456 _allocated_chunks_words(0),
3457 _allocated_chunks_count(0),
3458 _block_freelists(NULL),
3459 _lock(lock)
3460 {
3461 initialize();
3462 }
3463
3464 void SpaceManager::inc_size_metrics(size_t words) {
3465 assert_lock_strong(MetaspaceExpand_lock);
3466 // Total of allocated Metachunks and allocated Metachunks count
3467 // for each SpaceManager
3468 _allocated_chunks_words = _allocated_chunks_words + words;
3469 _allocated_chunks_count++;
3470 // Global total of capacity in allocated Metachunks
3471 MetaspaceUtils::inc_capacity(mdtype(), words);
3472 // Global total of allocated Metablocks.
3473 // used_words_slow() includes the overhead in each
3474 // Metachunk so include it in the used when the
3475 // Metachunk is first added (so only added once per
3476 // Metachunk).
3477 MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3478 }
3479
3480 void SpaceManager::inc_used_metrics(size_t words) {
3481 // Add to the per SpaceManager total
3482 Atomic::add(words, &_allocated_blocks_words);
3483 // Add to the global total
3484 MetaspaceUtils::inc_used(mdtype(), words);
3485 }
3486
3487 void SpaceManager::dec_total_from_size_metrics() {
3488 MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3489 MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3490 // Also deduct the overhead per Metachunk
3491 MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3492 }
3493
3494 void SpaceManager::initialize() {
3495 Metadebug::init_allocation_fail_alot_count();
3496 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3497 _chunks_in_use[i] = NULL;
3498 }
3499 _current_chunk = NULL;
3500 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3501 }
3502
3503 SpaceManager::~SpaceManager() {
3504 // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3505 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3506 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3507 " allocated_chunks_words() " SIZE_FORMAT,
3508 sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3509
3510 MutexLockerEx fcl(MetaspaceExpand_lock,
3511 Mutex::_no_safepoint_check_flag);
3512
3513 assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3514 "sum_count_in_chunks_in_use() " SIZE_FORMAT
3515 " allocated_chunks_count() " SIZE_FORMAT,
3516 sum_count_in_chunks_in_use(), allocated_chunks_count());
3517
3518 chunk_manager()->slow_locked_verify();
3519
3520 dec_total_from_size_metrics();
3521
3522 Log(gc, metaspace, freelist) log;
3523 if (log.is_trace()) {
3524 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3525 ResourceMark rm;
3526 LogStream ls(log.trace());
3527 locked_print_chunks_in_use_on(&ls);
3528 if (block_freelists() != NULL) {
3529 block_freelists()->print_on(&ls);
3530 }
3531 }
3532
3533 // Add all the chunks in use by this space manager
3534 // to the global list of free chunks.
3535
3536 // Follow each list of chunks-in-use and add them to the
3537 // free lists. Each list is NULL terminated.
3538
3539 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3540 Metachunk* chunks = chunks_in_use(i);
3541 chunk_manager()->return_chunk_list(i, chunks);
3542 set_chunks_in_use(i, NULL);
3543 }
3544
3545 chunk_manager()->slow_locked_verify();
3546
3547 if (_block_freelists != NULL) {
3548 delete _block_freelists;
3549 }
3550 }
3551
3552 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3553 assert_lock_strong(_lock);
3554 // Allocations and deallocations are in raw_word_size
3555 size_t raw_word_size = get_allocation_word_size(word_size);
3556 // Lazily create a block_freelist
3557 if (block_freelists() == NULL) {
3558 _block_freelists = new BlockFreelist();
3559 }
3560 block_freelists()->return_block(p, raw_word_size);
3561 }
3562
3563 // Adds a chunk to the list of chunks in use.
3564 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3565
3566 assert_lock_strong(_lock);
3567 assert(new_chunk != NULL, "Should not be NULL");
3568 assert(new_chunk->next() == NULL, "Should not be on a list");
3569
3570 new_chunk->reset_empty();
3571
3572 // Find the correct list and and set the current
3573 // chunk for that list.
3574 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3575
3576 if (make_current) {
3577 // If we are to make the chunk current, retire the old current chunk and replace
3578 // it with the new chunk.
3579 retire_current_chunk();
3580 set_current_chunk(new_chunk);
3581 }
3582
3583 // Add the new chunk at the head of its respective chunk list.
3584 new_chunk->set_next(chunks_in_use(index));
3585 set_chunks_in_use(index, new_chunk);
3586
3587 // Add to the running sum of capacity
3588 inc_size_metrics(new_chunk->word_size());
3589
3590 assert(new_chunk->is_empty(), "Not ready for reuse");
3591 Log(gc, metaspace, freelist) log;
3592 if (log.is_trace()) {
3593 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3594 ResourceMark rm;
3595 LogStream ls(log.trace());
3596 new_chunk->print_on(&ls);
3597 chunk_manager()->locked_print_free_chunks(&ls);
3598 }
3599 }
3600
3601 void SpaceManager::retire_current_chunk() {
3602 if (current_chunk() != NULL) {
3603 size_t remaining_words = current_chunk()->free_word_size();
3604 if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3605 MetaWord* ptr = current_chunk()->allocate(remaining_words);
3606 deallocate(ptr, remaining_words);
3607 inc_used_metrics(remaining_words);
3608 }
3609 }
3610 }
3611
3612 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3613 // Get a chunk from the chunk freelist
3614 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3615
3616 if (next == NULL) {
3617 next = vs_list()->get_new_chunk(chunk_word_size,
3618 medium_chunk_bunch());
3619 }
3620
3621 Log(gc, metaspace, alloc) log;
3622 if (log.is_debug() && next != NULL &&
3623 SpaceManager::is_humongous(next->word_size())) {
3624 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size());
3625 }
3626
3627 return next;
3628 }
3629
3630 MetaWord* SpaceManager::allocate(size_t word_size) {
3631 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3632 size_t raw_word_size = get_allocation_word_size(word_size);
3633 BlockFreelist* fl = block_freelists();
3634 MetaWord* p = NULL;
3635 // Allocation from the dictionary is expensive in the sense that
3636 // the dictionary has to be searched for a size. Don't allocate
3637 // from the dictionary until it starts to get fat. Is this
3638 // a reasonable policy? Maybe an skinny dictionary is fast enough
3639 // for allocations. Do some profiling. JJJ
3640 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3641 p = fl->get_block(raw_word_size);
3642 }
3643 if (p == NULL) {
3644 p = allocate_work(raw_word_size);
3645 }
3646
3647 return p;
3648 }
3649
3650 // Returns the address of spaced allocated for "word_size".
3651 // This methods does not know about blocks (Metablocks)
3652 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3653 assert_lock_strong(_lock);
3654 #ifdef ASSERT
3655 if (Metadebug::test_metadata_failure()) {
3656 return NULL;
3657 }
3658 #endif
3659 // Is there space in the current chunk?
3660 MetaWord* result = NULL;
3661
3662 if (current_chunk() != NULL) {
3663 result = current_chunk()->allocate(word_size);
3664 }
3665
3666 if (result == NULL) {
3667 result = grow_and_allocate(word_size);
3668 }
3669
3670 if (result != NULL) {
3671 inc_used_metrics(word_size);
3672 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3673 "Head of the list is being allocated");
3674 }
3675
3676 return result;
3677 }
3678
3679 void SpaceManager::verify() {
3680 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3681 Metachunk* curr = chunks_in_use(i);
3682 while (curr != NULL) {
3683 DEBUG_ONLY(do_verify_chunk(curr);)
3684 assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3685 curr = curr->next();
3686 }
3687 }
3688 }
3689
3690 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3691 assert(is_humongous(chunk->word_size()) ||
3692 chunk->word_size() == medium_chunk_size() ||
3693 chunk->word_size() == small_chunk_size() ||
3694 chunk->word_size() == specialized_chunk_size(),
3695 "Chunk size is wrong");
3696 return;
3697 }
3698
3699 #ifdef ASSERT
3700 void SpaceManager::verify_allocated_blocks_words() {
3701 // Verification is only guaranteed at a safepoint.
3702 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3703 "Verification can fail if the applications is running");
3704 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3705 "allocation total is not consistent " SIZE_FORMAT
3706 " vs " SIZE_FORMAT,
3707 allocated_blocks_words(), sum_used_in_chunks_in_use());
3708 }
3709
3710 #endif
3711
3712 void SpaceManager::dump(outputStream* const out) const {
3713 size_t curr_total = 0;
3714 size_t waste = 0;
3715 uint i = 0;
3716 size_t used = 0;
3717 size_t capacity = 0;
3718
3719 // Add up statistics for all chunks in this SpaceManager.
3720 for (ChunkIndex index = ZeroIndex;
3721 index < NumberOfInUseLists;
3722 index = next_chunk_index(index)) {
3723 for (Metachunk* curr = chunks_in_use(index);
3724 curr != NULL;
3725 curr = curr->next()) {
3726 out->print("%d) ", i++);
3727 curr->print_on(out);
3728 curr_total += curr->word_size();
3729 used += curr->used_word_size();
3730 capacity += curr->word_size();
3731 waste += curr->free_word_size() + curr->overhead();;
3732 }
3733 }
3734
3735 if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3736 if (block_freelists() != NULL) block_freelists()->print_on(out);
3737 }
3738
3739 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3740 // Free space isn't wasted.
3741 waste -= free;
3742
3743 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
3744 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3745 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3746 }
3747
3748 // MetaspaceUtils
3749
3750
3751 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3752 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3753
3754 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3755 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3756 return list == NULL ? 0 : list->free_bytes();
3757 }
3758
3759 size_t MetaspaceUtils::free_bytes() {
3760 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3761 }
3762
3763 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3764 assert_lock_strong(MetaspaceExpand_lock);
3765 assert(words <= capacity_words(mdtype),
3766 "About to decrement below 0: words " SIZE_FORMAT
3767 " is greater than _capacity_words[%u] " SIZE_FORMAT,
3768 words, mdtype, capacity_words(mdtype));
3769 _capacity_words[mdtype] -= words;
3770 }
3771
3772 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3773 assert_lock_strong(MetaspaceExpand_lock);
3774 // Needs to be atomic
3775 _capacity_words[mdtype] += words;
3776 }
3777
3778 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3779 assert(words <= used_words(mdtype),
3780 "About to decrement below 0: words " SIZE_FORMAT
3781 " is greater than _used_words[%u] " SIZE_FORMAT,
3782 words, mdtype, used_words(mdtype));
3783 // For CMS deallocation of the Metaspaces occurs during the
3784 // sweep which is a concurrent phase. Protection by the MetaspaceExpand_lock
3785 // is not enough since allocation is on a per Metaspace basis
3786 // and protected by the Metaspace lock.
3787 Atomic::sub(words, &_used_words[mdtype]);
3788 }
3789
3790 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3791 // _used_words tracks allocations for
3792 // each piece of metadata. Those allocations are
3793 // generally done concurrently by different application
3794 // threads so must be done atomically.
3795 Atomic::add(words, &_used_words[mdtype]);
3796 }
3797
3798 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3799 size_t used = 0;
3800 ClassLoaderDataGraphMetaspaceIterator iter;
3801 while (iter.repeat()) {
3802 ClassLoaderMetaspace* msp = iter.get_next();
3803 // Sum allocated_blocks_words for each metaspace
3804 if (msp != NULL) {
3805 used += msp->used_words_slow(mdtype);
3806 }
3807 }
3808 return used * BytesPerWord;
3809 }
3810
3811 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3812 size_t free = 0;
3813 ClassLoaderDataGraphMetaspaceIterator iter;
3814 while (iter.repeat()) {
3815 ClassLoaderMetaspace* msp = iter.get_next();
3816 if (msp != NULL) {
3817 free += msp->free_words_slow(mdtype);
3818 }
3819 }
3820 return free * BytesPerWord;
3821 }
3822
3823 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3824 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3825 return 0;
3826 }
3827 // Don't count the space in the freelists. That space will be
3828 // added to the capacity calculation as needed.
3829 size_t capacity = 0;
3830 ClassLoaderDataGraphMetaspaceIterator iter;
3831 while (iter.repeat()) {
3832 ClassLoaderMetaspace* msp = iter.get_next();
3833 if (msp != NULL) {
3834 capacity += msp->capacity_words_slow(mdtype);
3835 }
3836 }
3837 return capacity * BytesPerWord;
3838 }
3839
3840 size_t MetaspaceUtils::capacity_bytes_slow() {
3841 #ifdef PRODUCT
3842 // Use capacity_bytes() in PRODUCT instead of this function.
3843 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3844 #endif
3845 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3846 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3847 assert(capacity_bytes() == class_capacity + non_class_capacity,
3848 "bad accounting: capacity_bytes() " SIZE_FORMAT
3849 " class_capacity + non_class_capacity " SIZE_FORMAT
3850 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3851 capacity_bytes(), class_capacity + non_class_capacity,
3852 class_capacity, non_class_capacity);
3853
3854 return class_capacity + non_class_capacity;
3855 }
3856
3857 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3858 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3859 return list == NULL ? 0 : list->reserved_bytes();
3860 }
3861
3862 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3863 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3864 return list == NULL ? 0 : list->committed_bytes();
3865 }
3866
3867 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3868
3869 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3870 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3871 if (chunk_manager == NULL) {
3872 return 0;
3873 }
3874 chunk_manager->slow_verify();
3916 "reserved " SIZE_FORMAT "K",
3917 used_bytes()/K,
3918 capacity_bytes()/K,
3919 committed_bytes()/K,
3920 reserved_bytes()/K);
3921
3922 if (Metaspace::using_class_space()) {
3923 Metaspace::MetadataType ct = Metaspace::ClassType;
3924 out->print_cr(" class space "
3925 "used " SIZE_FORMAT "K, "
3926 "capacity " SIZE_FORMAT "K, "
3927 "committed " SIZE_FORMAT "K, "
3928 "reserved " SIZE_FORMAT "K",
3929 used_bytes(ct)/K,
3930 capacity_bytes(ct)/K,
3931 committed_bytes(ct)/K,
3932 reserved_bytes(ct)/K);
3933 }
3934 }
3935
3936 // Print information for class space and data space separately.
3937 // This is almost the same as above.
3938 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3939 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3940 size_t capacity_bytes = capacity_bytes_slow(mdtype);
3941 size_t used_bytes = used_bytes_slow(mdtype);
3942 size_t free_bytes = free_bytes_slow(mdtype);
3943 size_t used_and_free = used_bytes + free_bytes +
3944 free_chunks_capacity_bytes;
3945 out->print_cr(" Chunk accounting: (used in chunks " SIZE_FORMAT
3946 "K + unused in chunks " SIZE_FORMAT "K + "
3947 " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3948 "K capacity in allocated chunks " SIZE_FORMAT "K",
3949 used_bytes / K,
3950 free_bytes / K,
3951 free_chunks_capacity_bytes / K,
3952 used_and_free / K,
3953 capacity_bytes / K);
3954 // Accounting can only be correct if we got the values during a safepoint
3955 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3956 }
3957
3958 // Print total fragmentation for class metaspaces
3959 void MetaspaceUtils::print_class_waste(outputStream* out) {
3960 assert(Metaspace::using_class_space(), "class metaspace not used");
3961 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3962 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3963 ClassLoaderDataGraphMetaspaceIterator iter;
3964 while (iter.repeat()) {
3965 ClassLoaderMetaspace* msp = iter.get_next();
3966 if (msp != NULL) {
3967 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3968 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3969 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3970 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3971 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3972 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3973 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3974 }
3975 }
3976 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3977 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3978 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3979 "large count " SIZE_FORMAT,
3980 cls_specialized_count, cls_specialized_waste,
3981 cls_small_count, cls_small_waste,
3982 cls_medium_count, cls_medium_waste, cls_humongous_count);
3983 }
3984
3985 // Print total fragmentation for data and class metaspaces separately
3986 void MetaspaceUtils::print_waste(outputStream* out) {
3987 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
3988 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
3989
3990 ClassLoaderDataGraphMetaspaceIterator iter;
3991 while (iter.repeat()) {
3992 ClassLoaderMetaspace* msp = iter.get_next();
3993 if (msp != NULL) {
3994 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3995 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3996 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3997 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
3998 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3999 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4000 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4001 }
4002 }
4003 out->print_cr("Total fragmentation waste (words) doesn't count free space");
4004 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4005 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4006 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4007 "large count " SIZE_FORMAT,
4008 specialized_count, specialized_waste, small_count,
4009 small_waste, medium_count, medium_waste, humongous_count);
4010 if (Metaspace::using_class_space()) {
4011 print_class_waste(out);
4012 }
4013 }
4014
4015 class MetadataStats {
4016 private:
4017 size_t _capacity;
4018 size_t _used;
4019 size_t _free;
4020 size_t _waste;
4021
4022 public:
4023 MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4024 MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4025 : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4026
4027 void add(const MetadataStats& stats) {
4028 _capacity += stats.capacity();
4029 _used += stats.used();
4030 _free += stats.free();
4031 _waste += stats.waste();
4032 }
4033
4034 size_t capacity() const { return _capacity; }
4035 size_t used() const { return _used; }
4036 size_t free() const { return _free; }
4037 size_t waste() const { return _waste; }
4038
4039 void print_on(outputStream* out, size_t scale) const;
4040 };
4041
4042
4043 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4044 const char* unit = scale_unit(scale);
4045 out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4046 (float)capacity() / scale, unit,
4047 (float)used() / scale, unit,
4048 (float)free() / scale, unit,
4049 (float)waste() / scale, unit);
4050 }
4051
4052 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4053 private:
4054 outputStream* _out;
4055 size_t _scale;
4056
4057 size_t _total_count;
4058 MetadataStats _total_metadata;
4059 MetadataStats _total_class;
4060
4061 size_t _total_anon_count;
4062 MetadataStats _total_anon_metadata;
4063 MetadataStats _total_anon_class;
4064
4065 public:
4066 PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4067 : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4068
4069 ~PrintCLDMetaspaceInfoClosure() {
4070 print_summary();
4071 }
4072
4073 void do_cld(ClassLoaderData* cld) {
4074 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4075
4076 if (cld->is_unloading()) return;
4077 ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4078 if (msp == NULL) {
4079 return;
4080 }
4081
4082 bool anonymous = false;
4083 if (cld->is_anonymous()) {
4084 _out->print_cr("ClassLoader: for anonymous class");
4085 anonymous = true;
4086 } else {
4087 ResourceMark rm;
4088 _out->print_cr("ClassLoader: %s", cld->loader_name());
4089 }
4090
4091 print_metaspace(msp, anonymous);
4092 _out->cr();
4093 }
4094
4095 private:
4096 void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4097 void print_summary() const;
4098 };
4099
4100 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4101 assert(msp != NULL, "Sanity");
4102 SpaceManager* vsm = msp->vsm();
4103 const char* unit = scale_unit(_scale);
4104
4105 size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4106 size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4107 size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4108 size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4109
4110 _total_count ++;
4111 MetadataStats metadata_stats(capacity, used, free, waste);
4112 _total_metadata.add(metadata_stats);
4113
4114 if (anonymous) {
4115 _total_anon_count ++;
4116 _total_anon_metadata.add(metadata_stats);
4117 }
4118
4119 _out->print(" Metadata ");
4120 metadata_stats.print_on(_out, _scale);
4121
4122 if (Metaspace::using_class_space()) {
4123 vsm = msp->class_vsm();
4124
4125 capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4126 used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4127 free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4128 waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4129
4130 MetadataStats class_stats(capacity, used, free, waste);
4131 _total_class.add(class_stats);
4132
4133 if (anonymous) {
4134 _total_anon_class.add(class_stats);
4135 }
4136
4137 _out->print(" Class data ");
4138 class_stats.print_on(_out, _scale);
4139 }
4140 }
4141
4142 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4143 const char* unit = scale_unit(_scale);
4144 _out->cr();
4145 _out->print_cr("Summary:");
4146
4147 MetadataStats total;
4148 total.add(_total_metadata);
4149 total.add(_total_class);
4150
4151 _out->print(" Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4152 total.print_on(_out, _scale);
4153
4154 _out->print(" Metadata ");
4155 _total_metadata.print_on(_out, _scale);
4156
4157 if (Metaspace::using_class_space()) {
4158 _out->print(" Class data ");
4159 _total_class.print_on(_out, _scale);
4160 }
4161 _out->cr();
4162
4163 MetadataStats total_anon;
4164 total_anon.add(_total_anon_metadata);
4165 total_anon.add(_total_anon_class);
4166
4167 _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4168 total_anon.print_on(_out, _scale);
4169
4170 _out->print(" Metadata ");
4171 _total_anon_metadata.print_on(_out, _scale);
4172
4173 if (Metaspace::using_class_space()) {
4174 _out->print(" Class data ");
4175 _total_anon_class.print_on(_out, _scale);
4176 }
4177 }
4178
4179 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4180 const char* unit = scale_unit(scale);
4181 out->print_cr("Metaspaces:");
4182 out->print_cr(" Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4183 reserved_bytes(Metaspace::NonClassType) / scale, unit,
4184 committed_bytes(Metaspace::NonClassType) / scale, unit);
4185 if (Metaspace::using_class_space()) {
4186 out->print_cr(" Class space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4187 reserved_bytes(Metaspace::ClassType) / scale, unit,
4188 committed_bytes(Metaspace::ClassType) / scale, unit);
4189 }
4190
4191 out->cr();
4192 ChunkManager::print_all_chunkmanagers(out, scale);
4193
4194 out->cr();
4195 out->print_cr("Per-classloader metadata:");
4196 out->cr();
4197
4198 PrintCLDMetaspaceInfoClosure cl(out, scale);
4199 ClassLoaderDataGraph::cld_do(&cl);
4200 }
4201
4202
4203 // Dump global metaspace things from the end of ClassLoaderDataGraph
4204 void MetaspaceUtils::dump(outputStream* out) {
4205 out->print_cr("All Metaspace:");
4206 out->print("data space: "); print_on(out, Metaspace::NonClassType);
4207 out->print("class space: "); print_on(out, Metaspace::ClassType);
4208 print_waste(out);
4209 }
4210
4211 // Prints an ASCII representation of the given space.
4212 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4213 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4214 const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4215 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4216 if (vsl != NULL) {
4217 if (for_class) {
4218 if (!Metaspace::using_class_space()) {
4219 out->print_cr("No Class Space.");
4220 return;
4221 }
4222 out->print_raw("---- Metaspace Map (Class Space) ----");
4223 } else {
4224 out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4225 }
4226 // Print legend:
4227 out->cr();
4228 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4229 out->cr();
4230 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4231 vsl->print_map(out);
4232 out->cr();
4233 }
4234 }
4235
4236 void MetaspaceUtils::verify_free_chunks() {
4237 Metaspace::chunk_manager_metadata()->verify();
4238 if (Metaspace::using_class_space()) {
4239 Metaspace::chunk_manager_class()->verify();
4240 }
4241 }
4242
4243 void MetaspaceUtils::verify_capacity() {
4244 #ifdef ASSERT
4245 size_t running_sum_capacity_bytes = capacity_bytes();
4246 // For purposes of the running sum of capacity, verify against capacity
4247 size_t capacity_in_use_bytes = capacity_bytes_slow();
4248 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4249 "capacity_words() * BytesPerWord " SIZE_FORMAT
4250 " capacity_bytes_slow()" SIZE_FORMAT,
4251 running_sum_capacity_bytes, capacity_in_use_bytes);
4252 for (Metaspace::MetadataType i = Metaspace::ClassType;
4253 i < Metaspace:: MetadataTypeCount;
4254 i = (Metaspace::MetadataType)(i + 1)) {
4255 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4256 assert(capacity_bytes(i) == capacity_in_use_bytes,
4257 "capacity_bytes(%u) " SIZE_FORMAT
4258 " capacity_bytes_slow(%u)" SIZE_FORMAT,
4259 i, capacity_bytes(i), i, capacity_in_use_bytes);
4260 }
4261 #endif
4262 }
4263
4264 void MetaspaceUtils::verify_used() {
4265 #ifdef ASSERT
4266 size_t running_sum_used_bytes = used_bytes();
4267 // For purposes of the running sum of used, verify against used
4268 size_t used_in_use_bytes = used_bytes_slow();
4269 assert(used_bytes() == used_in_use_bytes,
4270 "used_bytes() " SIZE_FORMAT
4271 " used_bytes_slow()" SIZE_FORMAT,
4272 used_bytes(), used_in_use_bytes);
4273 for (Metaspace::MetadataType i = Metaspace::ClassType;
4274 i < Metaspace:: MetadataTypeCount;
4275 i = (Metaspace::MetadataType)(i + 1)) {
4276 size_t used_in_use_bytes = used_bytes_slow(i);
4277 assert(used_bytes(i) == used_in_use_bytes,
4278 "used_bytes(%u) " SIZE_FORMAT
4279 " used_bytes_slow(%u)" SIZE_FORMAT,
4280 i, used_bytes(i), i, used_in_use_bytes);
4281 }
4282 #endif
4283 }
4284
4285 void MetaspaceUtils::verify_metrics() {
4286 verify_capacity();
4287 verify_used();
4288 }
4289
4290
4291 // Metaspace methods
4292
4293 size_t Metaspace::_first_chunk_word_size = 0;
4294 size_t Metaspace::_first_class_chunk_word_size = 0;
4295
4296 size_t Metaspace::_commit_alignment = 0;
4297 size_t Metaspace::_reserve_alignment = 0;
4298
4299 VirtualSpaceList* Metaspace::_space_list = NULL;
4300 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4301
4302 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4303 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4304
4305 #define VIRTUALSPACEMULTIPLIER 2
4306
4307 #ifdef _LP64
4308 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4309
4692
4693 return result;
4694 }
4695
4696 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4697 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4698
4699 // If result is still null, we are out of memory.
4700 Log(gc, metaspace, freelist) log;
4701 if (log.is_info()) {
4702 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4703 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4704 ResourceMark rm;
4705 if (log.is_debug()) {
4706 if (loader_data->metaspace_or_null() != NULL) {
4707 LogStream ls(log.debug());
4708 loader_data->print_value_on(&ls);
4709 }
4710 }
4711 LogStream ls(log.info());
4712 MetaspaceUtils::dump(&ls);
4713 MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4714 ChunkManager::print_all_chunkmanagers(&ls);
4715 }
4716
4717 bool out_of_compressed_class_space = false;
4718 if (is_class_space_allocation(mdtype)) {
4719 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4720 out_of_compressed_class_space =
4721 MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4722 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4723 CompressedClassSpaceSize;
4724 }
4725
4726 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4727 const char* space_string = out_of_compressed_class_space ?
4728 "Compressed class space" : "Metaspace";
4729
4730 report_java_out_of_memory(space_string);
4731
4732 if (JvmtiExport::should_post_resource_exhausted()) {
4733 JvmtiExport::post_resource_exhausted(
4734 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4769 }
4770 }
4771
4772 bool Metaspace::contains(const void* ptr) {
4773 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4774 return true;
4775 }
4776 return contains_non_shared(ptr);
4777 }
4778
4779 bool Metaspace::contains_non_shared(const void* ptr) {
4780 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4781 return true;
4782 }
4783
4784 return get_space_list(NonClassType)->contains(ptr);
4785 }
4786
4787 // ClassLoaderMetaspace
4788
4789 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4790 initialize(lock, type);
4791 }
4792
4793 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4794 delete _vsm;
4795 if (Metaspace::using_class_space()) {
4796 delete _class_vsm;
4797 }
4798 }
4799 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4800 Metachunk* chunk = get_initialization_chunk(type, mdtype);
4801 if (chunk != NULL) {
4802 // Add to this manager's list of chunks in use and make it the current_chunk().
4803 get_space_manager(mdtype)->add_chunk(chunk, true);
4804 }
4805 }
4806
4807 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4808 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4809
4810 // Get a chunk from the chunk freelist
4811 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4812
4813 if (chunk == NULL) {
4814 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4815 get_space_manager(mdtype)->medium_chunk_bunch());
4816 }
4817
4818 return chunk;
4819 }
4820
4821 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4822 Metaspace::verify_global_initialization();
4823
4824 // Allocate SpaceManager for metadata objects.
4825 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4826
4827 if (Metaspace::using_class_space()) {
4828 // Allocate SpaceManager for classes.
4829 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4830 }
4831
4832 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4833
4834 // Allocate chunk for metadata objects
4835 initialize_first_chunk(type, Metaspace::NonClassType);
4836
4837 // Allocate chunk for class metadata objects
4838 if (Metaspace::using_class_space()) {
4839 initialize_first_chunk(type, Metaspace::ClassType);
4840 }
4841 }
4842
4843 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4844 Metaspace::assert_not_frozen();
4845 // Don't use class_vsm() unless UseCompressedClassPointers is true.
4846 if (Metaspace::is_class_space_allocation(mdtype)) {
4847 return class_vsm()->allocate(word_size);
4848 } else {
4849 return vsm()->allocate(word_size);
4850 }
4851 }
4852
4853 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4854 Metaspace::assert_not_frozen();
4855 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4856 assert(delta_bytes > 0, "Must be");
4857
4858 size_t before = 0;
4859 size_t after = 0;
4860 MetaWord* res;
4861 bool incremented;
4862
4863 // Each thread increments the HWM at most once. Even if the thread fails to increment
4864 // the HWM, an allocation is still attempted. This is because another thread must then
4865 // have incremented the HWM and therefore the allocation might still succeed.
4866 do {
4867 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4868 res = allocate(word_size, mdtype);
4869 } while (!incremented && res == NULL);
4870
4871 if (incremented) {
4872 Metaspace::tracer()->report_gc_threshold(before, after,
4873 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4874 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4875 }
4876
4877 return res;
4878 }
4879
4880 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4881 if (mdtype == Metaspace::ClassType) {
4882 return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4883 } else {
4884 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
4885 }
4886 }
4887
4888 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4889 Metaspace::assert_not_frozen();
4890 if (mdtype == Metaspace::ClassType) {
4891 return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4892 } else {
4893 return vsm()->sum_free_in_chunks_in_use();
4894 }
4895 }
4896
4897 // Space capacity in the Metaspace. It includes
4898 // space in the list of chunks from which allocations
4899 // have been made. Don't include space in the global freelist and
4900 // in the space available in the dictionary which
4901 // is already counted in some chunk.
4902 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4903 if (mdtype == Metaspace::ClassType) {
4904 return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4905 } else {
4906 return vsm()->sum_capacity_in_chunks_in_use();
4907 }
4908 }
4909
4910 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4911 return used_words_slow(mdtype) * BytesPerWord;
4912 }
4913
4914 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4915 return capacity_words_slow(mdtype) * BytesPerWord;
4916 }
4917
4918 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4919 return vsm()->allocated_blocks_bytes() +
4920 (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4921 }
4922
4923 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4924 return vsm()->allocated_chunks_bytes() +
4925 (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4926 }
4927
4928 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4929 Metaspace::assert_not_frozen();
4930 assert(!SafepointSynchronize::is_at_safepoint()
4931 || Thread::current()->is_VM_thread(), "should be the VM thread");
4932
4933 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4934
4935 if (is_class && Metaspace::using_class_space()) {
4936 class_vsm()->deallocate(ptr, word_size);
4937 } else {
4938 vsm()->deallocate(ptr, word_size);
4939 }
4940 }
4941
4942 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4943 assert(Metaspace::using_class_space(), "Has to use class space");
4944 return class_vsm()->calc_chunk_size(word_size);
4945 }
4946
4947 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4948 // Print both class virtual space counts and metaspace.
4949 if (Verbose) {
4950 vsm()->print_on(out);
4951 if (Metaspace::using_class_space()) {
4952 class_vsm()->print_on(out);
4953 }
4954 }
4955 }
4956
4957 void ClassLoaderMetaspace::verify() {
4958 vsm()->verify();
4959 if (Metaspace::using_class_space()) {
4960 class_vsm()->verify();
4961 }
4962 }
4963
4964 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4965 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4966 vsm()->dump(out);
4967 if (Metaspace::using_class_space()) {
4968 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4969 class_vsm()->dump(out);
4970 }
4971 }
4972
4973
4974
4975 #ifdef ASSERT
4976 static void do_verify_chunk(Metachunk* chunk) {
4977 guarantee(chunk != NULL, "Sanity");
4978 // Verify chunk itself; then verify that it is consistent with the
4979 // occupany map of its containing node.
4980 chunk->verify();
4981 VirtualSpaceNode* const vsn = chunk->container();
4982 OccupancyMap* const ocmap = vsn->occupancy_map();
4983 ocmap->verify_for_chunk(chunk);
4984 }
4985 #endif
4986
4987 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4988 chunk->set_is_tagged_free(!inuse);
4989 OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4990 ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4991 }
4992
4993 /////////////// Unit tests ///////////////
5299 test_adjust_initial_chunk_size(false);
5300 test_adjust_initial_chunk_size(true);
5301 }
5302 };
5303
5304 void SpaceManager_test_adjust_initial_chunk_size() {
5305 SpaceManagerTest::test_adjust_initial_chunk_size();
5306 }
5307
5308 #endif // ASSERT
5309
5310 struct chunkmanager_statistics_t {
5311 int num_specialized_chunks;
5312 int num_small_chunks;
5313 int num_medium_chunks;
5314 int num_humongous_chunks;
5315 };
5316
5317 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5318 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5319 ChunkManager::ChunkManagerStatistics stat;
5320 chunk_manager->get_statistics(&stat);
5321 out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5322 out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5323 out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5324 out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5325 }
5326
5327 struct chunk_geometry_t {
5328 size_t specialized_chunk_word_size;
5329 size_t small_chunk_word_size;
5330 size_t medium_chunk_word_size;
5331 };
5332
5333 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5334 if (mdType == Metaspace::NonClassType) {
5335 out->specialized_chunk_word_size = SpecializedChunk;
5336 out->small_chunk_word_size = SmallChunk;
5337 out->medium_chunk_word_size = MediumChunk;
5338 } else {
5339 out->specialized_chunk_word_size = ClassSpecializedChunk;
5340 out->small_chunk_word_size = ClassSmallChunk;
5341 out->medium_chunk_word_size = ClassMediumChunk;
5342 }
5343 }
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "aot/aotLoader.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/collectorPolicy.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/binaryTreeDictionary.inline.hpp"
32 #include "memory/filemap.hpp"
33 #include "memory/freeList.inline.hpp"
34 #include "memory/metachunk.hpp"
35 #include "memory/metaspace.hpp"
36 #include "memory/metaspace/metaspaceCommon.hpp"
37 #include "memory/metaspace/metaspaceStatistics.hpp"
38 #include "memory/metaspaceGCThresholdUpdater.hpp"
39 #include "memory/metaspaceShared.hpp"
40 #include "memory/metaspaceTracer.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "runtime/atomic.hpp"
44 #include "runtime/globals.hpp"
45 #include "runtime/init.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutex.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "runtime/orderAccess.inline.hpp"
50 #include "services/memTracker.hpp"
51 #include "services/memoryService.hpp"
52 #include "utilities/align.hpp"
53 #include "utilities/copy.hpp"
54 #include "utilities/debug.hpp"
55 #include "utilities/macros.hpp"
56
57 using namespace metaspace::internals;
58
59 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
60 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
61
62 // Helper function that does a bunch of checks for a chunk.
63 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
64
65 // Given a Metachunk, update its in-use information (both in the
66 // chunk and the occupancy map).
67 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
68
69 size_t const allocation_from_dictionary_limit = 4 * K;
70
71 MetaWord* last_allocated = 0;
72
73 size_t Metaspace::_compressed_class_space_size;
74 const MetaspaceTracer* Metaspace::_tracer = NULL;
75
76 DEBUG_ONLY(bool Metaspace::_frozen = false;)
77
78 // Internal statistics.
79 #ifdef ASSERT
80 static struct {
81 // Number of allocations.
82 uintx num_allocs;
83 // Number of times a ClassLoaderMetaspace was born...
84 uintx num_metaspace_births;
85 // ... and died.
86 uintx num_metaspace_deaths;
87 // Number of times VirtualSpaceListNodes were created...
88 uintx num_vsnodes_created;
89 // ... and purged.
90 uintx num_vsnodes_purged;
91 // Number of times we expanded the committed section of the space.
92 uintx num_committed_space_expanded;
93 // Number of deallocations
94 uintx num_deallocs;
95 // Number of deallocations triggered from outside ("real" deallocations).
96 uintx num_external_deallocs;
97 // Number of times an allocation was satisfied from deallocated blocks.
98 uintx num_allocs_from_deallocated_blocks;
99 } g_internal_statistics;
100 #endif
101
102 enum ChunkSizes { // in words.
103 ClassSpecializedChunk = 128,
104 SpecializedChunk = 128,
105 ClassSmallChunk = 256,
106 SmallChunk = 512,
107 ClassMediumChunk = 4 * K,
108 MediumChunk = 8 * K
109 };
110
111 // Returns size of this chunk type.
112 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
113 assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
114 size_t size = 0;
115 if (is_class) {
116 switch(chunktype) {
117 case SpecializedIndex: size = ClassSpecializedChunk; break;
118 case SmallIndex: size = ClassSmallChunk; break;
119 case MediumIndex: size = ClassMediumChunk; break;
120 default:
121 ShouldNotReachHere();
145 assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
146 return HumongousIndex;
147 }
148 } else {
149 if (size == SpecializedChunk) {
150 return SpecializedIndex;
151 } else if (size == SmallChunk) {
152 return SmallIndex;
153 } else if (size == MediumChunk) {
154 return MediumIndex;
155 } else if (size > MediumChunk) {
156 // A valid humongous chunk size is a multiple of the smallest chunk size.
157 assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
158 return HumongousIndex;
159 }
160 }
161 ShouldNotReachHere();
162 return (ChunkIndex)-1;
163 }
164
165 ChunkIndex next_chunk_index(ChunkIndex i) {
166 assert(i < NumberOfInUseLists, "Out of bound");
167 return (ChunkIndex) (i+1);
168 }
169
170 ChunkIndex prev_chunk_index(ChunkIndex i) {
171 assert(i > ZeroIndex, "Out of bound");
172 return (ChunkIndex) (i-1);
173 }
174
175 static const char* space_type_name(Metaspace::MetaspaceType t) {
176 const char* s = NULL;
177 switch (t) {
178 case Metaspace::StandardMetaspaceType: s = "Standard"; break;
179 case Metaspace::BootMetaspaceType: s = "Boot"; break;
180 case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
181 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
182 }
183 assert(s != NULL, "Invalid space type");
184 return s;
185 }
186
187 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
188 uint MetaspaceGC::_shrink_factor = 0;
189 bool MetaspaceGC::_should_concurrent_collect = false;
190
191
192 typedef class FreeList<Metachunk> ChunkList;
193
194 // Manages the global free lists of chunks.
195 class ChunkManager : public CHeapObj<mtInternal> {
196 friend class TestVirtualSpaceNodeTest;
197
198 // Free list of chunks of different sizes.
199 // SpecializedChunk
200 // SmallChunk
201 // MediumChunk
202 ChunkList _free_chunks[NumberOfFreeLists];
203
204 // Whether or not this is the class chunkmanager.
205 const bool _is_class;
206
207 // Return non-humongous chunk list by its index.
208 ChunkList* free_chunks(ChunkIndex index);
209
210 // Returns non-humongous chunk list for the given chunk word size.
211 ChunkList* find_free_chunks_list(size_t word_size);
252
253 // Helper for chunk merging:
254 // Given an address range with 1-n chunks which are all supposed to be
255 // free and hence currently managed by this ChunkManager, remove them
256 // from this ChunkManager and mark them as invalid.
257 // - This does not correct the occupancy map.
258 // - This does not adjust the counters in ChunkManager.
259 // - Does not adjust container count counter in containing VirtualSpaceNode.
260 // Returns number of chunks removed.
261 int remove_chunks_in_area(MetaWord* p, size_t word_size);
262
263 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
264 // split up the larger chunk into n smaller chunks, at least one of which should be
265 // the target chunk of target chunk size. The smaller chunks, including the target
266 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
267 // Note that this chunk is supposed to be removed from the freelist right away.
268 Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
269
270 public:
271
272 ChunkManager(bool is_class)
273 : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
274 _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
275 _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
276 _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
277 }
278
279 // Add or delete (return) a chunk to the global freelist.
280 Metachunk* chunk_freelist_allocate(size_t word_size);
281
282 // Map a size to a list index assuming that there are lists
283 // for special, small, medium, and humongous chunks.
284 ChunkIndex list_index(size_t size);
285
286 // Map a given index to the chunk size.
287 size_t size_by_index(ChunkIndex index) const;
288
289 bool is_class() const { return _is_class; }
290
291 // Convenience accessors.
357 // Debug support
358 void verify();
359 void slow_verify() {
360 if (VerifyMetaspace) {
361 verify();
362 }
363 }
364 void locked_verify();
365 void slow_locked_verify() {
366 if (VerifyMetaspace) {
367 locked_verify();
368 }
369 }
370 void verify_free_chunks_total();
371
372 void locked_print_free_chunks(outputStream* st);
373 void locked_print_sum_free_chunks(outputStream* st);
374
375 void print_on(outputStream* st) const;
376
377 // Fill in current statistic values to the given statistics object.
378 void collect_statistics(ChunkManagerStatistics* out) const;
379
380 };
381
382 class SmallBlocks : public CHeapObj<mtClass> {
383 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize;
384 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
385
386 private:
387 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
388
389 FreeList<Metablock>& list_at(size_t word_size) {
390 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
391 return _small_lists[word_size - _small_block_min_size];
392 }
393
394 public:
395 SmallBlocks() {
396 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
397 uint k = i - _small_block_min_size;
398 _small_lists[k].set_size(i);
399 }
400 }
401
402 // Returns the total size, in words, of all blocks, across all block sizes.
403 size_t total_size() const {
404 size_t result = 0;
405 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
406 uint k = i - _small_block_min_size;
407 result = result + _small_lists[k].count() * _small_lists[k].size();
408 }
409 return result;
410 }
411
412 // Returns the total number of all blocks across all block sizes.
413 uintx total_num_blocks() const {
414 uintx result = 0;
415 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
416 uint k = i - _small_block_min_size;
417 result = result + _small_lists[k].count();
418 }
419 return result;
420 }
421
422 static uint small_block_max_size() { return _small_block_max_size; }
423 static uint small_block_min_size() { return _small_block_min_size; }
424
425 MetaWord* get_block(size_t word_size) {
426 if (list_at(word_size).count() > 0) {
427 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
428 return new_block;
429 } else {
430 return NULL;
431 }
432 }
433 void return_block(Metablock* free_chunk, size_t word_size) {
434 list_at(word_size).return_chunk_at_head(free_chunk, false);
435 assert(list_at(word_size).count() > 0, "Should have a chunk");
436 }
437
438 void print_on(outputStream* st) const {
439 st->print_cr("SmallBlocks:");
440 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
441 uint k = i - _small_block_min_size;
454 // is at least 1/4th the size of the available block.
455 const static int WasteMultiplier = 4;
456
457 // Accessors
458 BlockTreeDictionary* dictionary() const { return _dictionary; }
459 SmallBlocks* small_blocks() {
460 if (_small_blocks == NULL) {
461 _small_blocks = new SmallBlocks();
462 }
463 return _small_blocks;
464 }
465
466 public:
467 BlockFreelist();
468 ~BlockFreelist();
469
470 // Get and return a block to the free list
471 MetaWord* get_block(size_t word_size);
472 void return_block(MetaWord* p, size_t word_size);
473
474 // Returns the total size, in words, of all blocks kept in this structure.
475 size_t total_size() const {
476 size_t result = dictionary()->total_size();
477 if (_small_blocks != NULL) {
478 result = result + _small_blocks->total_size();
479 }
480 return result;
481 }
482
483 // Returns the number of all blocks kept in this structure.
484 uintx num_blocks() const {
485 uintx result = dictionary()->total_free_blocks();
486 if (_small_blocks != NULL) {
487 result = result + _small_blocks->total_num_blocks();
488 }
489 return result;
490 }
491
492 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
493 void print_on(outputStream* st) const;
494 };
495
496 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
497 template <typename T> struct all_ones { static const T value; };
498 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
499 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
500
501 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
502 // keeps information about
503 // - where a chunk starts
504 // - whether a chunk is in-use or free
505 // A bit in this bitmap represents one range of memory in the smallest
506 // chunk size (SpecializedChunk or ClassSpecializedChunk).
507 class OccupancyMap : public CHeapObj<mtInternal> {
508
509 // The address range this map covers.
510 const MetaWord* const _reference_address;
511 const size_t _word_size;
877
878 // Allocate a chunk from the virtual space and return it.
879 Metachunk* get_chunk_vs(size_t chunk_word_size);
880
881 // Expands/shrinks the committed space in a virtual space. Delegates
882 // to Virtualspace
883 bool expand_by(size_t min_words, size_t preferred_words);
884
885 // In preparation for deleting this node, remove all the chunks
886 // in the node from any freelist.
887 void purge(ChunkManager* chunk_manager);
888
889 // If an allocation doesn't fit in the current node a new node is created.
890 // Allocate chunks out of the remaining committed space in this node
891 // to avoid wasting that memory.
892 // This always adds up because all the chunk sizes are multiples of
893 // the smallest chunk size.
894 void retire(ChunkManager* chunk_manager);
895
896
897 void print_on(outputStream* st) const { print_on(st, K); }
898 void print_on(outputStream* st, size_t scale) const;
899 void print_map(outputStream* st, bool is_class) const;
900
901 // Debug support
902 DEBUG_ONLY(void mangle();)
903 // Verify counters, all chunks in this list node and the occupancy map.
904 DEBUG_ONLY(void verify();)
905 // Verify that all free chunks in this node are ideally merged
906 // (there not should be multiple small chunks where a large chunk could exist.)
907 DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
908
909 };
910
911 #define assert_is_aligned(value, alignment) \
912 assert(is_aligned((value), (alignment)), \
913 SIZE_FORMAT_HEX " is not aligned to " \
914 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
915
916 #define assert_counter(expected_value, real_value, msg) \
917 assert( (expected_value) == (real_value), \
918 "Counter mismatch (%s): expected " SIZE_FORMAT \
919 ", but got: " SIZE_FORMAT ".", msg, expected_value, \
920 real_value);
921
922 // Decide if large pages should be committed when the memory is reserved.
923 static bool should_commit_large_pages_when_reserving(size_t bytes) {
924 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
925 size_t words = bytes / BytesPerWord;
926 bool is_class = false; // We never reserve large pages for the class space.
927 if (MetaspaceGC::can_expand(words, is_class) &&
928 MetaspaceGC::allowed_expansion() >= words) {
929 return true;
930 }
931 }
932
933 return false;
934 }
935
936 // byte_size is the size of the associated virtualspace.
937 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
938 _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
939 assert_is_aligned(bytes, Metaspace::reserve_alignment());
940 bool large_pages = should_commit_large_pages_when_reserving(bytes);
941 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
1208
1209 bool initialization_succeeded() { return _virtual_space_list != NULL; }
1210
1211 size_t reserved_words() { return _reserved_words; }
1212 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
1213 size_t committed_words() { return _committed_words; }
1214 size_t committed_bytes() { return committed_words() * BytesPerWord; }
1215
1216 void inc_reserved_words(size_t v);
1217 void dec_reserved_words(size_t v);
1218 void inc_committed_words(size_t v);
1219 void dec_committed_words(size_t v);
1220 void inc_virtual_space_count();
1221 void dec_virtual_space_count();
1222
1223 bool contains(const void* ptr);
1224
1225 // Unlink empty VirtualSpaceNodes and free it.
1226 void purge(ChunkManager* chunk_manager);
1227
1228 void print_on(outputStream* st) const { print_on(st, K); }
1229 void print_on(outputStream* st, size_t scale) const;
1230 void print_map(outputStream* st) const;
1231
1232 class VirtualSpaceListIterator : public StackObj {
1233 VirtualSpaceNode* _virtual_spaces;
1234 public:
1235 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1236 _virtual_spaces(virtual_spaces) {}
1237
1238 bool repeat() {
1239 return _virtual_spaces != NULL;
1240 }
1241
1242 VirtualSpaceNode* get_next() {
1243 VirtualSpaceNode* result = _virtual_spaces;
1244 if (_virtual_spaces != NULL) {
1245 _virtual_spaces = _virtual_spaces->next();
1246 }
1247 return result;
1248 }
1249 };
1250 };
1251
1252 class Metadebug : AllStatic {
1253 // Debugging support for Metaspaces
1254 static int _allocation_fail_alot_count;
1255
1256 public:
1257
1258 static void init_allocation_fail_alot_count();
1259 #ifdef ASSERT
1260 static bool test_metadata_failure();
1261 #endif
1262 };
1263
1264 int Metadebug::_allocation_fail_alot_count = 0;
1265
1266
1267 // SpaceManager - used by Metaspace to handle allocations
1268 class SpaceManager : public CHeapObj<mtClass> {
1269 friend class ClassLoaderMetaspace;
1270 friend class Metadebug;
1271
1272 private:
1273
1274 // protects allocations
1275 Mutex* const _lock;
1276
1277 // Type of metadata allocated.
1278 const Metaspace::MetadataType _mdtype;
1279
1280 // Type of metaspace
1281 const Metaspace::MetaspaceType _space_type;
1282
1283 // List of chunks in use by this SpaceManager. Allocations
1284 // are done from the current chunk. The list is used for deallocating
1285 // chunks when the SpaceManager is freed.
1286 Metachunk* _chunks_in_use[NumberOfInUseLists];
1287 Metachunk* _current_chunk;
1288
1289 // Maximum number of small chunks to allocate to a SpaceManager
1290 static uint const _small_chunk_limit;
1291
1292 // Maximum number of specialize chunks to allocate for anonymous and delegating
1293 // metadata space to a SpaceManager
1294 static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1295
1296 // Some running counters, but lets keep their number small to not add to much to
1297 // the per-classloader footprint.
1298 // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1299 // free and waste. Their sum can be deduced from the three other values.
1300 size_t _overhead_words;
1301 size_t _capacity_words;
1302 size_t _used_words;
1303
1304 // Free lists of blocks are per SpaceManager since they
1305 // are assumed to be in chunks in use by the SpaceManager
1306 // and all chunks in use by a SpaceManager are freed when
1307 // the class loader using the SpaceManager is collected.
1308 BlockFreelist* _block_freelists;
1309
1310 private:
1311 // Accessors
1312 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1313 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1314 _chunks_in_use[index] = v;
1315 }
1316
1317 BlockFreelist* block_freelists() const { return _block_freelists; }
1318
1319 Metaspace::MetadataType mdtype() { return _mdtype; }
1320
1321 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
1322 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1323
1324 Metachunk* current_chunk() const { return _current_chunk; }
1325 void set_current_chunk(Metachunk* v) {
1326 _current_chunk = v;
1327 }
1328
1329 Metachunk* find_current_chunk(size_t word_size);
1330
1331 // Add chunk to the list of chunks in use
1332 void add_chunk(Metachunk* v, bool make_current);
1333 void retire_current_chunk();
1334
1335 Mutex* lock() const { return _lock; }
1336
1337 // Adds to the given statistic object. Expects to be locked with lock().
1338 void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1339
1340 // Verify internal counters against the current state. Expects to be locked with lock().
1341 DEBUG_ONLY(void verify_metrics_locked() const;)
1342
1343 protected:
1344 void initialize();
1345
1346 public:
1347 SpaceManager(Metaspace::MetadataType mdtype,
1348 Metaspace::MetaspaceType space_type,
1349 Mutex* lock);
1350 ~SpaceManager();
1351
1352 enum ChunkMultiples {
1353 MediumChunkMultiple = 4
1354 };
1355
1356 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1357 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; }
1358 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; }
1359
1360 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); }
1361
1362 // Accessors
1363 bool is_class() const { return _mdtype == Metaspace::ClassType; }
1364
1365 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1366 size_t small_chunk_size() const { return small_chunk_size(is_class()); }
1367 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); }
1368
1369 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); }
1370
1371 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; }
1372
1373 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1374
1375 size_t capacity_words() const { return _capacity_words; }
1376 size_t used_words() const { return _used_words; }
1377 size_t overhead_words() const { return _overhead_words; }
1378
1379 // Adjust local, global counters after a new chunk has been added.
1380 void account_for_new_chunk(const Metachunk* new_chunk);
1381
1382 // Adjust local, global counters after space has been allocated from the current chunk.
1383 void account_for_allocation(size_t words);
1384
1385 // Adjust global counters just before the SpaceManager dies, after all its chunks
1386 // have been returned to the freelist.
1387 void account_for_spacemanager_death();
1388
1389 // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1390 // or return the unadjusted size if the requested size is humongous.
1391 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1392 size_t adjust_initial_chunk_size(size_t requested) const;
1393
1394 // Get the initial chunks size for this metaspace type.
1395 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1396
1397 // Todo: remove this once we have counters by chunk type.
1398 size_t sum_count_in_chunks_in_use(ChunkIndex i);
1399
1400 Metachunk* get_new_chunk(size_t chunk_word_size);
1401
1402 // Block allocation and deallocation.
1403 // Allocates a block from the current chunk
1404 MetaWord* allocate(size_t word_size);
1405
1406 // Helper for allocations
1407 MetaWord* allocate_work(size_t word_size);
1408
1409 // Returns a block to the per manager freelist
1410 void deallocate(MetaWord* p, size_t word_size);
1411
1412 // Based on the allocation size and a minimum chunk size,
1413 // returned chunk size (for expanding space for chunk allocation).
1414 size_t calc_chunk_size(size_t allocation_word_size);
1415
1416 // Called when an allocation from the current chunk fails.
1417 // Gets a new chunk (may require getting a new virtual space),
1418 // and allocates from that chunk.
1419 MetaWord* grow_and_allocate(size_t word_size);
1420
1421 // Notify memory usage to MemoryService.
1422 void track_metaspace_memory_usage();
1423
1424 // debugging support.
1425
1426 void print_on(outputStream* st) const;
1427 void locked_print_chunks_in_use_on(outputStream* st) const;
1428
1429 void verify();
1430 void verify_chunk_size(Metachunk* chunk);
1431
1432 // This adjusts the size given to be greater than the minimum allocation size in
1433 // words for data in metaspace. Esentially the minimum size is currently 3 words.
1434 size_t get_allocation_word_size(size_t word_size) {
1435 size_t byte_size = word_size * BytesPerWord;
1436
1437 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1438 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1439
1440 size_t raw_word_size = raw_bytes_size / BytesPerWord;
1441 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1442
1443 return raw_word_size;
1444 }
1445
1446 // Adds to the given statistic object.
1447 void add_to_statistics(SpaceManagerStatistics* out) const;
1448
1449 // Verify internal counters against the current state.
1450 DEBUG_ONLY(void verify_metrics() const;)
1451
1452 };
1453
1454 uint const SpaceManager::_small_chunk_limit = 4;
1455 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1456
1457 void VirtualSpaceNode::inc_container_count() {
1458 assert_lock_strong(MetaspaceExpand_lock);
1459 _container_count++;
1460 }
1461
1462 void VirtualSpaceNode::dec_container_count() {
1463 assert_lock_strong(MetaspaceExpand_lock);
1464 _container_count--;
1465 }
1466
1467 #ifdef ASSERT
1468 void VirtualSpaceNode::verify_container_count() {
1469 assert(_container_count == container_count_slow(),
1470 "Inconsistency in container_count _container_count " UINTX_FORMAT
1471 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1732 }
1733
1734
1735 // Expand the virtual space (commit more of the reserved space)
1736 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1737 size_t min_bytes = min_words * BytesPerWord;
1738 size_t preferred_bytes = preferred_words * BytesPerWord;
1739
1740 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1741
1742 if (uncommitted < min_bytes) {
1743 return false;
1744 }
1745
1746 size_t commit = MIN2(preferred_bytes, uncommitted);
1747 bool result = virtual_space()->expand_by(commit, false);
1748
1749 if (result) {
1750 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1751 (is_class() ? "class" : "non-class"), commit);
1752 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1753 } else {
1754 log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1755 (is_class() ? "class" : "non-class"), commit);
1756 }
1757
1758 assert(result, "Failed to commit memory");
1759
1760 return result;
1761 }
1762
1763 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1764 assert_lock_strong(MetaspaceExpand_lock);
1765 Metachunk* result = take_from_committed(chunk_word_size);
1766 return result;
1767 }
1768
1769 bool VirtualSpaceNode::initialize() {
1770
1771 if (!_rs.is_reserved()) {
1772 return false;
1792 set_top((MetaWord*)virtual_space()->low());
1793 set_reserved(MemRegion((HeapWord*)_rs.base(),
1794 (HeapWord*)(_rs.base() + _rs.size())));
1795
1796 assert(reserved()->start() == (HeapWord*) _rs.base(),
1797 "Reserved start was not set properly " PTR_FORMAT
1798 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1799 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1800 "Reserved size was not set properly " SIZE_FORMAT
1801 " != " SIZE_FORMAT, reserved()->word_size(),
1802 _rs.size() / BytesPerWord);
1803 }
1804
1805 // Initialize Occupancy Map.
1806 const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1807 _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1808
1809 return result;
1810 }
1811
1812 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1813 size_t used_words = used_words_in_vs();
1814 size_t commit_words = committed_words();
1815 size_t res_words = reserved_words();
1816 VirtualSpace* vs = virtual_space();
1817
1818 st->print("node @" PTR_FORMAT ": ", p2i(this));
1819 st->print("reserved=");
1820 print_scaled_words(st, res_words, scale);
1821 st->print(", committed=");
1822 print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1823 st->print(", used=");
1824 print_scaled_words_and_percentage(st, used_words, res_words, scale);
1825 st->cr();
1826 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", "
1827 PTR_FORMAT ", " PTR_FORMAT ")",
1828 p2i(bottom()), p2i(top()), p2i(end()),
1829 p2i(vs->high_boundary()));
1830 }
1831
1832 #ifdef ASSERT
1833 void VirtualSpaceNode::mangle() {
1834 size_t word_size = capacity_words_in_vs();
1835 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1836 }
1837 #endif // ASSERT
1838
1839 // VirtualSpaceList methods
1840 // Space allocated from the VirtualSpace
1841
1842 VirtualSpaceList::~VirtualSpaceList() {
1843 VirtualSpaceListIterator iter(virtual_space_list());
1844 while (iter.repeat()) {
1845 VirtualSpaceNode* vsl = iter.get_next();
1846 delete vsl;
1847 }
2029 // Walk the list of VirtualSpaceNodes and delete
2030 // nodes with a 0 container_count. Remove Metachunks in
2031 // the node from their respective freelists.
2032 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2033 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2034 assert_lock_strong(MetaspaceExpand_lock);
2035 // Don't use a VirtualSpaceListIterator because this
2036 // list is being changed and a straightforward use of an iterator is not safe.
2037 VirtualSpaceNode* purged_vsl = NULL;
2038 VirtualSpaceNode* prev_vsl = virtual_space_list();
2039 VirtualSpaceNode* next_vsl = prev_vsl;
2040 while (next_vsl != NULL) {
2041 VirtualSpaceNode* vsl = next_vsl;
2042 DEBUG_ONLY(vsl->verify_container_count();)
2043 next_vsl = vsl->next();
2044 // Don't free the current virtual space since it will likely
2045 // be needed soon.
2046 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2047 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2048 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2049 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2050 // Unlink it from the list
2051 if (prev_vsl == vsl) {
2052 // This is the case of the current node being the first node.
2053 assert(vsl == virtual_space_list(), "Expected to be the first node");
2054 set_virtual_space_list(vsl->next());
2055 } else {
2056 prev_vsl->set_next(vsl->next());
2057 }
2058
2059 vsl->purge(chunk_manager);
2060 dec_reserved_words(vsl->reserved_words());
2061 dec_committed_words(vsl->committed_words());
2062 dec_virtual_space_count();
2063 purged_vsl = vsl;
2064 delete vsl;
2065 } else {
2066 prev_vsl = vsl;
2067 }
2068 }
2069 #ifdef ASSERT
2177 if (vs_word_size == 0) {
2178 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2179 return false;
2180 }
2181
2182 // Reserve the space
2183 size_t vs_byte_size = vs_word_size * BytesPerWord;
2184 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2185
2186 // Allocate the meta virtual space and initialize it.
2187 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2188 if (!new_entry->initialize()) {
2189 delete new_entry;
2190 return false;
2191 } else {
2192 assert(new_entry->reserved_words() == vs_word_size,
2193 "Reserved memory size differs from requested memory size");
2194 // ensure lock-free iteration sees fully initialized node
2195 OrderAccess::storestore();
2196 link_vs(new_entry);
2197 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2198 return true;
2199 }
2200 }
2201
2202 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2203 if (virtual_space_list() == NULL) {
2204 set_virtual_space_list(new_entry);
2205 } else {
2206 current_virtual_space()->set_next(new_entry);
2207 }
2208 set_current_virtual_space(new_entry);
2209 inc_reserved_words(new_entry->reserved_words());
2210 inc_committed_words(new_entry->committed_words());
2211 inc_virtual_space_count();
2212 #ifdef ASSERT
2213 new_entry->mangle();
2214 #endif
2215 LogTarget(Trace, gc, metaspace) lt;
2216 if (lt.is_enabled()) {
2217 LogStream ls(lt);
2326 // We must have enough space for the requested size and any
2327 // additional reqired padding chunks.
2328 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2329
2330 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2331 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2332 if (min_word_size >= preferred_word_size) {
2333 // Can happen when humongous chunks are allocated.
2334 preferred_word_size = min_word_size;
2335 }
2336
2337 bool expanded = expand_by(min_word_size, preferred_word_size);
2338 if (expanded) {
2339 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2340 assert(next != NULL, "The allocation was expected to succeed after the expansion");
2341 }
2342
2343 return next;
2344 }
2345
2346 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2347 st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2348 _virtual_space_count, p2i(_current_virtual_space));
2349 VirtualSpaceListIterator iter(virtual_space_list());
2350 while (iter.repeat()) {
2351 st->cr();
2352 VirtualSpaceNode* node = iter.get_next();
2353 node->print_on(st, scale);
2354 }
2355 }
2356
2357 void VirtualSpaceList::print_map(outputStream* st) const {
2358 VirtualSpaceNode* list = virtual_space_list();
2359 VirtualSpaceListIterator iter(list);
2360 unsigned i = 0;
2361 while (iter.repeat()) {
2362 st->print_cr("Node %u:", i);
2363 VirtualSpaceNode* node = iter.get_next();
2364 node->print_map(st, this->is_class());
2365 i ++;
2366 }
2367 }
2368
2369 // MetaspaceGC methods
2370
2371 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2372 // Within the VM operation after the GC the attempt to allocate the metadata
2373 // should succeed. If the GC did not free enough space for the metaspace
3114 size_chunks_returned += cur->word_size();
3115 }
3116 return_single_chunk(index, cur);
3117 cur = next;
3118 }
3119 if (log.is_enabled()) { // tracing
3120 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3121 num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3122 if (index != HumongousIndex) {
3123 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3124 } else {
3125 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3126 }
3127 }
3128 }
3129
3130 void ChunkManager::print_on(outputStream* out) const {
3131 _humongous_dictionary.report_statistics(out);
3132 }
3133
3134 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3135 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3136 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3137 out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3138 }
3139 }
3140
3141 // SpaceManager methods
3142
3143 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3144 size_t chunk_sizes[] = {
3145 specialized_chunk_size(is_class_space),
3146 small_chunk_size(is_class_space),
3147 medium_chunk_size(is_class_space)
3148 };
3149
3150 // Adjust up to one of the fixed chunk sizes ...
3151 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3152 if (requested <= chunk_sizes[i]) {
3153 return chunk_sizes[i];
3154 }
3155 }
3156
3157 // ... or return the size as a humongous chunk.
3173 default: requested = ClassSmallChunk; break;
3174 }
3175 } else {
3176 switch (type) {
3177 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break;
3178 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break;
3179 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3180 default: requested = SmallChunk; break;
3181 }
3182 }
3183
3184 // Adjust to one of the fixed chunk sizes (unless humongous)
3185 const size_t adjusted = adjust_initial_chunk_size(requested);
3186
3187 assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3188 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3189
3190 return adjusted;
3191 }
3192
3193 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3194 size_t count = 0;
3195 Metachunk* chunk = chunks_in_use(i);
3196 while (chunk != NULL) {
3197 count++;
3198 chunk = chunk->next();
3199 }
3200 return count;
3201 }
3202
3203 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3204
3205 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3206 Metachunk* chunk = chunks_in_use(i);
3207 st->print("SpaceManager: %s " PTR_FORMAT,
3208 chunk_size_name(i), p2i(chunk));
3209 if (chunk != NULL) {
3210 st->print_cr(" free " SIZE_FORMAT,
3211 chunk->free_word_size());
3212 } else {
3213 st->cr();
3214 }
3215 }
3216
3217 chunk_manager()->locked_print_free_chunks(st);
3218 chunk_manager()->locked_print_sum_free_chunks(st);
3219 }
3220
3221 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3222
3314 // If the new chunk is humongous, it was created to serve a single large allocation. In that
3315 // case it usually makes no sense to make it the current chunk, since the next allocation would
3316 // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3317 // good chunk which could be used for more normal allocations.
3318 bool make_current = true;
3319 if (next->get_chunk_type() == HumongousIndex &&
3320 current_chunk() != NULL) {
3321 make_current = false;
3322 }
3323 add_chunk(next, make_current);
3324 mem = next->allocate(word_size);
3325 }
3326
3327 // Track metaspace memory usage statistic.
3328 track_metaspace_memory_usage();
3329
3330 return mem;
3331 }
3332
3333 void SpaceManager::print_on(outputStream* st) const {
3334 SpaceManagerStatistics stat;
3335 add_to_statistics(&stat); // will lock _lock.
3336 stat.print_on(st, 1*K, false);
3337 }
3338
3339 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3340 Metaspace::MetaspaceType space_type,
3341 Mutex* lock) :
3342 _mdtype(mdtype),
3343 _space_type(space_type),
3344 _capacity_words(0),
3345 _used_words(0),
3346 _overhead_words(0),
3347 _block_freelists(NULL),
3348 _lock(lock)
3349 {
3350 initialize();
3351 }
3352
3353 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3354
3355 assert_lock_strong(MetaspaceExpand_lock);
3356
3357 _capacity_words += new_chunk->word_size();
3358 _overhead_words += Metachunk::overhead();
3359
3360 // Adjust global counters:
3361 MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3362 MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3363 }
3364
3365 void SpaceManager::account_for_allocation(size_t words) {
3366 // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3367 // We may or may not be locked with the global metaspace expansion lock.
3368 assert_lock_strong(lock());
3369
3370 // Add to the per SpaceManager totals. This can be done non-atomically.
3371 _used_words += words;
3372
3373 // Adjust global counters. This will be done atomically.
3374 MetaspaceUtils::inc_used(mdtype(), words);
3375 }
3376
3377 void SpaceManager::account_for_spacemanager_death() {
3378
3379 assert_lock_strong(MetaspaceExpand_lock);
3380
3381 MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3382 MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3383 MetaspaceUtils::dec_used(mdtype(), _used_words);
3384 }
3385
3386 void SpaceManager::initialize() {
3387 Metadebug::init_allocation_fail_alot_count();
3388 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3389 _chunks_in_use[i] = NULL;
3390 }
3391 _current_chunk = NULL;
3392 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3393 }
3394
3395 SpaceManager::~SpaceManager() {
3396
3397 // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3398 DEBUG_ONLY(verify_metrics());
3399
3400 MutexLockerEx fcl(MetaspaceExpand_lock,
3401 Mutex::_no_safepoint_check_flag);
3402
3403 chunk_manager()->slow_locked_verify();
3404
3405 account_for_spacemanager_death();
3406
3407 Log(gc, metaspace, freelist) log;
3408 if (log.is_trace()) {
3409 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3410 ResourceMark rm;
3411 LogStream ls(log.trace());
3412 locked_print_chunks_in_use_on(&ls);
3413 if (block_freelists() != NULL) {
3414 block_freelists()->print_on(&ls);
3415 }
3416 }
3417
3418 // Add all the chunks in use by this space manager
3419 // to the global list of free chunks.
3420
3421 // Follow each list of chunks-in-use and add them to the
3422 // free lists. Each list is NULL terminated.
3423
3424 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3425 Metachunk* chunks = chunks_in_use(i);
3426 chunk_manager()->return_chunk_list(i, chunks);
3427 set_chunks_in_use(i, NULL);
3428 }
3429
3430 chunk_manager()->slow_locked_verify();
3431
3432 if (_block_freelists != NULL) {
3433 delete _block_freelists;
3434 }
3435 }
3436
3437 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3438 assert_lock_strong(lock());
3439 // Allocations and deallocations are in raw_word_size
3440 size_t raw_word_size = get_allocation_word_size(word_size);
3441 // Lazily create a block_freelist
3442 if (block_freelists() == NULL) {
3443 _block_freelists = new BlockFreelist();
3444 }
3445 block_freelists()->return_block(p, raw_word_size);
3446 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3447 }
3448
3449 // Adds a chunk to the list of chunks in use.
3450 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3451
3452 assert_lock_strong(_lock);
3453 assert(new_chunk != NULL, "Should not be NULL");
3454 assert(new_chunk->next() == NULL, "Should not be on a list");
3455
3456 new_chunk->reset_empty();
3457
3458 // Find the correct list and and set the current
3459 // chunk for that list.
3460 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3461
3462 if (make_current) {
3463 // If we are to make the chunk current, retire the old current chunk and replace
3464 // it with the new chunk.
3465 retire_current_chunk();
3466 set_current_chunk(new_chunk);
3467 }
3468
3469 // Add the new chunk at the head of its respective chunk list.
3470 new_chunk->set_next(chunks_in_use(index));
3471 set_chunks_in_use(index, new_chunk);
3472
3473 // Adjust counters.
3474 account_for_new_chunk(new_chunk);
3475
3476 assert(new_chunk->is_empty(), "Not ready for reuse");
3477 Log(gc, metaspace, freelist) log;
3478 if (log.is_trace()) {
3479 log.trace("SpaceManager::added chunk: ");
3480 ResourceMark rm;
3481 LogStream ls(log.trace());
3482 new_chunk->print_on(&ls);
3483 chunk_manager()->locked_print_free_chunks(&ls);
3484 }
3485 }
3486
3487 void SpaceManager::retire_current_chunk() {
3488 if (current_chunk() != NULL) {
3489 size_t remaining_words = current_chunk()->free_word_size();
3490 if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3491 MetaWord* ptr = current_chunk()->allocate(remaining_words);
3492 deallocate(ptr, remaining_words);
3493 account_for_allocation(remaining_words);
3494 }
3495 }
3496 }
3497
3498 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3499 // Get a chunk from the chunk freelist
3500 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3501
3502 if (next == NULL) {
3503 next = vs_list()->get_new_chunk(chunk_word_size,
3504 medium_chunk_bunch());
3505 }
3506
3507 Log(gc, metaspace, alloc) log;
3508 if (log.is_debug() && next != NULL &&
3509 SpaceManager::is_humongous(next->word_size())) {
3510 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size());
3511 }
3512
3513 return next;
3514 }
3515
3516 MetaWord* SpaceManager::allocate(size_t word_size) {
3517 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3518 size_t raw_word_size = get_allocation_word_size(word_size);
3519 BlockFreelist* fl = block_freelists();
3520 MetaWord* p = NULL;
3521
3522 DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
3523
3524 // Allocation from the dictionary is expensive in the sense that
3525 // the dictionary has to be searched for a size. Don't allocate
3526 // from the dictionary until it starts to get fat. Is this
3527 // a reasonable policy? Maybe an skinny dictionary is fast enough
3528 // for allocations. Do some profiling. JJJ
3529 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3530 p = fl->get_block(raw_word_size);
3531 if (p != NULL) {
3532 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3533 }
3534 }
3535 if (p == NULL) {
3536 p = allocate_work(raw_word_size);
3537 }
3538
3539 return p;
3540 }
3541
3542 // Returns the address of spaced allocated for "word_size".
3543 // This methods does not know about blocks (Metablocks)
3544 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3545 assert_lock_strong(lock());
3546 #ifdef ASSERT
3547 if (Metadebug::test_metadata_failure()) {
3548 return NULL;
3549 }
3550 #endif
3551 // Is there space in the current chunk?
3552 MetaWord* result = NULL;
3553
3554 if (current_chunk() != NULL) {
3555 result = current_chunk()->allocate(word_size);
3556 }
3557
3558 if (result == NULL) {
3559 result = grow_and_allocate(word_size);
3560 }
3561
3562 if (result != NULL) {
3563 account_for_allocation(word_size);
3564 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3565 "Head of the list is being allocated");
3566 }
3567
3568 return result;
3569 }
3570
3571 void SpaceManager::verify() {
3572 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3573 Metachunk* curr = chunks_in_use(i);
3574 while (curr != NULL) {
3575 DEBUG_ONLY(do_verify_chunk(curr);)
3576 assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3577 curr = curr->next();
3578 }
3579 }
3580 }
3581
3582 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3583 assert(is_humongous(chunk->word_size()) ||
3584 chunk->word_size() == medium_chunk_size() ||
3585 chunk->word_size() == small_chunk_size() ||
3586 chunk->word_size() == specialized_chunk_size(),
3587 "Chunk size is wrong");
3588 return;
3589 }
3590
3591 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3592 assert_lock_strong(lock());
3593 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3594 UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
3595 Metachunk* chunk = chunks_in_use(i);
3596 while (chunk != NULL) {
3597 chunk_stat.add_num(1);
3598 chunk_stat.add_cap(chunk->word_size());
3599 chunk_stat.add_overhead(Metachunk::overhead());
3600 chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3601 if (chunk != current_chunk()) {
3602 chunk_stat.add_waste(chunk->free_word_size());
3603 } else {
3604 chunk_stat.add_free(chunk->free_word_size());
3605 }
3606 chunk = chunk->next();
3607 }
3608 }
3609 if (block_freelists() != NULL) {
3610 out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3611 }
3612 }
3613
3614 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3615 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3616 add_to_statistics_locked(out);
3617 }
3618
3619 #ifdef ASSERT
3620 void SpaceManager::verify_metrics_locked() const {
3621 assert_lock_strong(lock());
3622
3623 SpaceManagerStatistics stat;
3624 add_to_statistics_locked(&stat);
3625
3626 UsedChunksStatistics chunk_stats = stat.totals();
3627
3628 DEBUG_ONLY(chunk_stats.check_sanity());
3629
3630 assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3631 assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3632 assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
3633 }
3634
3635 void SpaceManager::verify_metrics() const {
3636 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3637 verify_metrics_locked();
3638 }
3639 #endif // ASSERT
3640
3641
3642
3643 // MetaspaceUtils
3644 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3645 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3646 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3647
3648 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3649 // output will be the accumulated values for all live metaspaces.
3650 // Note: method does not do any locking.
3651 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3652 out->reset();
3653 ClassLoaderDataGraphMetaspaceIterator iter;
3654 while (iter.repeat()) {
3655 ClassLoaderMetaspace* msp = iter.get_next();
3656 if (msp != NULL) {
3657 msp->add_to_statistics(out);
3658 }
3659 }
3660 }
3661
3662 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
3663 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3664 return list == NULL ? 0 : list->free_bytes();
3665 }
3666
3667 size_t MetaspaceUtils::free_in_vs_bytes() {
3668 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
3669 }
3670
3671 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
3672 assert_lock_strong(MetaspaceExpand_lock);
3673 (*pstat) += words;
3674 }
3675
3676 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
3677 assert_lock_strong(MetaspaceExpand_lock);
3678 const size_t size_now = *pstat;
3679 assert(size_now >= words, "About to decrement counter below zero "
3680 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3681 size_now, words);
3682 *pstat = size_now - words;
3683 }
3684
3685 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
3686 Atomic::add(words, pstat);
3687 }
3688
3689 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
3690 const size_t size_now = *pstat;
3691 assert(size_now >= words, "About to decrement counter below zero "
3692 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3693 size_now, words);
3694 Atomic::sub(words, pstat);
3695 }
3696
3697 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3698 dec_stat_nonatomically(&_capacity_words[mdtype], words);
3699 }
3700 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3701 inc_stat_nonatomically(&_capacity_words[mdtype], words);
3702 }
3703 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3704 dec_stat_atomically(&_used_words[mdtype], words);
3705 }
3706 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3707 inc_stat_atomically(&_used_words[mdtype], words);
3708 }
3709 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
3710 dec_stat_nonatomically(&_overhead_words[mdtype], words);
3711 }
3712 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
3713 inc_stat_nonatomically(&_overhead_words[mdtype], words);
3714 }
3715
3716 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3717 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3718 return list == NULL ? 0 : list->reserved_bytes();
3719 }
3720
3721 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3722 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3723 return list == NULL ? 0 : list->committed_bytes();
3724 }
3725
3726 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3727
3728 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3729 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3730 if (chunk_manager == NULL) {
3731 return 0;
3732 }
3733 chunk_manager->slow_verify();
3775 "reserved " SIZE_FORMAT "K",
3776 used_bytes()/K,
3777 capacity_bytes()/K,
3778 committed_bytes()/K,
3779 reserved_bytes()/K);
3780
3781 if (Metaspace::using_class_space()) {
3782 Metaspace::MetadataType ct = Metaspace::ClassType;
3783 out->print_cr(" class space "
3784 "used " SIZE_FORMAT "K, "
3785 "capacity " SIZE_FORMAT "K, "
3786 "committed " SIZE_FORMAT "K, "
3787 "reserved " SIZE_FORMAT "K",
3788 used_bytes(ct)/K,
3789 capacity_bytes(ct)/K,
3790 committed_bytes(ct)/K,
3791 reserved_bytes(ct)/K);
3792 }
3793 }
3794
3795 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3796 private:
3797 outputStream* const _out;
3798 const size_t _scale;
3799 const bool _do_print;
3800 const bool _break_down_by_chunktype;
3801
3802 public:
3803
3804 uintx _num_loaders;
3805 ClassLoaderMetaspaceStatistics _stats_total;
3806
3807 uintx _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3808 ClassLoaderMetaspaceStatistics _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
3809
3810 public:
3811 PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3812 : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3813 , _num_loaders(0)
3814 {
3815 memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3816 }
3817
3818 void do_cld(ClassLoaderData* cld) {
3819
3820 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3821
3822 ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3823 if (msp == NULL) {
3824 return;
3825 }
3826
3827 // Collect statistics for this class loader metaspace
3828 ClassLoaderMetaspaceStatistics this_cld_stat;
3829 msp->add_to_statistics(&this_cld_stat);
3830
3831 // And add it to the running totals
3832 _stats_total.add(this_cld_stat);
3833 _num_loaders ++;
3834 _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3835 _num_loaders_by_spacetype[msp->space_type()] ++;
3836
3837 // Optionally, print.
3838 if (_do_print) {
3839
3840 _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3841
3842 if (cld->is_anonymous()) {
3843 _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
3844 } else {
3845 ResourceMark rm;
3846 _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3847 }
3848
3849 if (cld->is_unloading()) {
3850 _out->print(" (unloading)");
3851 }
3852
3853 this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3854 _out->cr();
3855
3856 }
3857
3858 } // do_cld
3859
3860 };
3861
3862 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3863 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3864 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3865 {
3866 if (Metaspace::using_class_space()) {
3867 out->print(" Non-class space: ");
3868 }
3869 print_scaled_words(out, reserved_nonclass_words, scale, 7);
3870 out->print(" reserved, ");
3871 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3872 out->print_cr(" committed ");
3873
3874 if (Metaspace::using_class_space()) {
3875 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3876 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3877 out->print(" Class space: ");
3878 print_scaled_words(out, reserved_class_words, scale, 7);
3879 out->print(" reserved, ");
3880 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
3881 out->print_cr(" committed ");
3882
3883 const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
3884 const size_t committed_words = committed_nonclass_words + committed_class_words;
3885 out->print(" Both: ");
3886 print_scaled_words(out, reserved_words, scale, 7);
3887 out->print(" reserved, ");
3888 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
3889 out->print_cr(" committed ");
3890 }
3891 }
3892 }
3893
3894 // This will print out a basic metaspace usage report but
3895 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
3896 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
3897
3898 out->cr();
3899 out->print_cr("Usage:");
3900
3901 if (Metaspace::using_class_space()) {
3902 out->print(" Non-class: ");
3903 }
3904
3905 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
3906 // MetaspaceUtils.
3907 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
3908 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
3909 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
3910 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
3911
3912 print_scaled_words(out, cap_nc, scale, 5);
3913 out->print(" capacity, ");
3914 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
3915 out->print(" used, ");
3916 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
3917 out->print(" free+waste, ");
3918 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
3919 out->print(" overhead. ");
3920 out->cr();
3921
3922 if (Metaspace::using_class_space()) {
3923 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
3924 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
3925 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
3926 const size_t free_and_waste_c = cap_c - overhead_c - used_c;
3927 out->print(" Class: ");
3928 print_scaled_words(out, cap_c, scale, 5);
3929 out->print(" capacity, ");
3930 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
3931 out->print(" used, ");
3932 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
3933 out->print(" free+waste, ");
3934 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
3935 out->print(" overhead. ");
3936 out->cr();
3937
3938 out->print(" Both: ");
3939 const size_t cap = cap_nc + cap_c;
3940
3941 print_scaled_words(out, cap, scale, 5);
3942 out->print(" capacity, ");
3943 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
3944 out->print(" used, ");
3945 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
3946 out->print(" free+waste, ");
3947 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
3948 out->print(" overhead. ");
3949 out->cr();
3950 }
3951
3952 out->cr();
3953 out->print_cr("Virtual space:");
3954
3955 print_vs(out, scale);
3956
3957 out->cr();
3958 out->print_cr("Chunk freelists:");
3959
3960 if (Metaspace::using_class_space()) {
3961 out->print(" Non-Class: ");
3962 }
3963 out->print_human_readable_size(Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3964 out->cr();
3965 if (Metaspace::using_class_space()) {
3966 out->print(" Class: ");
3967 out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
3968 out->cr();
3969 out->print(" Both: ");
3970 out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words() +
3971 Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3972 out->cr();
3973 }
3974 out->cr();
3975
3976 }
3977
3978 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
3979
3980 const bool print_loaders = (flags & rf_show_loaders) > 0;
3981 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
3982 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
3983
3984 // Some report options require walking the class loader data graph.
3985 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
3986 if (print_loaders) {
3987 out->cr();
3988 out->print_cr("Usage per loader:");
3989 out->cr();
3990 }
3991
3992 ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
3993
3994 // Print totals, broken up by space type.
3995 if (print_by_spacetype) {
3996 out->cr();
3997 out->print_cr("Usage per space type:");
3998 out->cr();
3999 for (int space_type = (int)Metaspace::ZeroMetaspaceType;
4000 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
4001 {
4002 uintx num = cl._num_loaders_by_spacetype[space_type];
4003 out->print("%s (" UINTX_FORMAT " loader%s)%c",
4004 space_type_name((Metaspace::MetaspaceType)space_type),
4005 num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
4006 if (num > 0) {
4007 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
4008 }
4009 out->cr();
4010 }
4011 }
4012
4013 // Print totals for in-use data:
4014 out->cr();
4015 out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
4016 cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
4017
4018 cl._stats_total.print_on(out, scale, print_by_chunktype);
4019
4020 // -- Print Virtual space.
4021 out->cr();
4022 out->print_cr("Virtual space:");
4023
4024 print_vs(out, scale);
4025
4026 // -- Print VirtualSpaceList details.
4027 if ((flags & rf_show_vslist) > 0) {
4028 out->cr();
4029 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
4030
4031 if (Metaspace::using_class_space()) {
4032 out->print_cr(" Non-Class:");
4033 }
4034 Metaspace::space_list()->print_on(out, scale);
4035 if (Metaspace::using_class_space()) {
4036 out->print_cr(" Class:");
4037 Metaspace::class_space_list()->print_on(out, scale);
4038 }
4039 }
4040 out->cr();
4041
4042 // -- Print VirtualSpaceList map.
4043 if ((flags & rf_show_vsmap) > 0) {
4044 out->cr();
4045 out->print_cr("Virtual space map:");
4046
4047 if (Metaspace::using_class_space()) {
4048 out->print_cr(" Non-Class:");
4049 }
4050 Metaspace::space_list()->print_map(out);
4051 if (Metaspace::using_class_space()) {
4052 out->print_cr(" Class:");
4053 Metaspace::class_space_list()->print_map(out);
4054 }
4055 }
4056 out->cr();
4057
4058 // -- Print Freelists (ChunkManager) details
4059 out->cr();
4060 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
4061
4062 ChunkManagerStatistics non_class_cm_stat;
4063 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
4064
4065 if (Metaspace::using_class_space()) {
4066 out->print_cr(" Non-Class:");
4067 }
4068 non_class_cm_stat.print_on(out, scale);
4069
4070 if (Metaspace::using_class_space()) {
4071 ChunkManagerStatistics class_cm_stat;
4072 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
4073 out->print_cr(" Class:");
4074 class_cm_stat.print_on(out, scale);
4075 }
4076
4077 // As a convenience, print a summary of common waste.
4078 out->cr();
4079 out->print("Waste: ");
4080 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4081 const size_t committed_words = committed_bytes() / BytesPerWord;
4082
4083 out->print("(Percentage values refer to total committed size (");
4084 print_scaled_words(out, committed_words, scale);
4085 out->print_cr(").");
4086
4087 // Print waste for in-use chunks.
4088 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4089 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4090 UsedChunksStatistics ucs_all;
4091 ucs_all.add(ucs_nonclass);
4092 ucs_all.add(ucs_class);
4093 out->print(" Waste in chunks in use: ");
4094 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4095 out->cr();
4096 out->print(" Free in chunks in use: ");
4097 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4098 out->cr();
4099
4100 // Print waste in free chunks.
4101 const size_t total_capacity_in_free_chunks =
4102 Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4103 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
4104 out->print(" In free chunks: ");
4105 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4106 out->cr();
4107
4108 // Print waste in deallocated blocks.
4109 const uintx free_blocks_num =
4110 cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4111 cl._stats_total.class_sm_stats().free_blocks_num();
4112 const size_t free_blocks_cap_words =
4113 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4114 cl._stats_total.class_sm_stats().free_blocks_cap_words();
4115 out->print("Deallocated from chunks in use: " UINTX_FORMAT " blocks, total size ", free_blocks_num);
4116 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4117 out->cr();
4118
4119 // Print internal statistics
4120 #ifdef ASSERT
4121 out->cr();
4122 out->cr();
4123 out->print_cr("Internal statistics:");
4124 out->cr();
4125 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4126 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4127 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4128 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4129 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4130 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4131 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
4132 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4133 out->cr();
4134 #endif
4135
4136 // Print some interesting settings
4137 out->cr();
4138 out->cr();
4139 out->print("MaxMetaspaceSize: ");
4140 out->print_human_readable_size(MaxMetaspaceSize, scale);
4141 out->cr();
4142 out->print("InitialBootClassLoaderMetaspaceSize: ");
4143 out->print_human_readable_size(InitialBootClassLoaderMetaspaceSize, scale);
4144 out->cr();
4145
4146 out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4147 out->cr();
4148 if (Metaspace::using_class_space()) {
4149 out->print("CompressedClassSpaceSize: ");
4150 out->print_human_readable_size(CompressedClassSpaceSize, scale);
4151 }
4152
4153 out->cr();
4154 out->cr();
4155
4156 } // MetaspaceUtils::print_report()
4157
4158 // Prints an ASCII representation of the given space.
4159 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4160 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4161 const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4162 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4163 if (vsl != NULL) {
4164 if (for_class) {
4165 if (!Metaspace::using_class_space()) {
4166 out->print_cr("No Class Space.");
4167 return;
4168 }
4169 out->print_raw("---- Metaspace Map (Class Space) ----");
4170 } else {
4171 out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4172 }
4173 // Print legend:
4174 out->cr();
4175 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4176 out->cr();
4177 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4178 vsl->print_map(out);
4179 out->cr();
4180 }
4181 }
4182
4183 void MetaspaceUtils::verify_free_chunks() {
4184 Metaspace::chunk_manager_metadata()->verify();
4185 if (Metaspace::using_class_space()) {
4186 Metaspace::chunk_manager_class()->verify();
4187 }
4188 }
4189
4190 void MetaspaceUtils::verify_metrics() {
4191 #ifdef ASSERT
4192 // Please note: there are time windows where the internal counters are out of sync with
4193 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4194 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4195 // not be counted when iterating the CLDG. So be careful when you call this method.
4196 ClassLoaderMetaspaceStatistics total_stat;
4197 collect_statistics(&total_stat);
4198 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4199 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4200
4201 bool mismatch = false;
4202 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
4203 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
4204 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
4205 if (capacity_words(mdtype) != chunk_stat.cap() ||
4206 used_words(mdtype) != chunk_stat.used() ||
4207 overhead_words(mdtype) != chunk_stat.overhead()) {
4208 mismatch = true;
4209 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
4210 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4211 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
4212 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4213 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
4214 tty->flush();
4215 }
4216 }
4217 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4218 #endif
4219 }
4220
4221
4222 // Metaspace methods
4223
4224 size_t Metaspace::_first_chunk_word_size = 0;
4225 size_t Metaspace::_first_class_chunk_word_size = 0;
4226
4227 size_t Metaspace::_commit_alignment = 0;
4228 size_t Metaspace::_reserve_alignment = 0;
4229
4230 VirtualSpaceList* Metaspace::_space_list = NULL;
4231 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4232
4233 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4234 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4235
4236 #define VIRTUALSPACEMULTIPLIER 2
4237
4238 #ifdef _LP64
4239 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4240
4623
4624 return result;
4625 }
4626
4627 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4628 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4629
4630 // If result is still null, we are out of memory.
4631 Log(gc, metaspace, freelist) log;
4632 if (log.is_info()) {
4633 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4634 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4635 ResourceMark rm;
4636 if (log.is_debug()) {
4637 if (loader_data->metaspace_or_null() != NULL) {
4638 LogStream ls(log.debug());
4639 loader_data->print_value_on(&ls);
4640 }
4641 }
4642 LogStream ls(log.info());
4643 // In case of an OOM, log out a short but still useful report.
4644 MetaspaceUtils::print_basic_report(&ls, 0);
4645 }
4646
4647 bool out_of_compressed_class_space = false;
4648 if (is_class_space_allocation(mdtype)) {
4649 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4650 out_of_compressed_class_space =
4651 MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4652 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4653 CompressedClassSpaceSize;
4654 }
4655
4656 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4657 const char* space_string = out_of_compressed_class_space ?
4658 "Compressed class space" : "Metaspace";
4659
4660 report_java_out_of_memory(space_string);
4661
4662 if (JvmtiExport::should_post_resource_exhausted()) {
4663 JvmtiExport::post_resource_exhausted(
4664 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4699 }
4700 }
4701
4702 bool Metaspace::contains(const void* ptr) {
4703 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4704 return true;
4705 }
4706 return contains_non_shared(ptr);
4707 }
4708
4709 bool Metaspace::contains_non_shared(const void* ptr) {
4710 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4711 return true;
4712 }
4713
4714 return get_space_list(NonClassType)->contains(ptr);
4715 }
4716
4717 // ClassLoaderMetaspace
4718
4719 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
4720 : _lock(lock)
4721 , _space_type(type)
4722 , _vsm(NULL)
4723 , _class_vsm(NULL)
4724 {
4725 initialize(lock, type);
4726 }
4727
4728 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4729 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
4730 delete _vsm;
4731 if (Metaspace::using_class_space()) {
4732 delete _class_vsm;
4733 }
4734 }
4735
4736 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4737 Metachunk* chunk = get_initialization_chunk(type, mdtype);
4738 if (chunk != NULL) {
4739 // Add to this manager's list of chunks in use and make it the current_chunk().
4740 get_space_manager(mdtype)->add_chunk(chunk, true);
4741 }
4742 }
4743
4744 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4745 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4746
4747 // Get a chunk from the chunk freelist
4748 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4749
4750 if (chunk == NULL) {
4751 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4752 get_space_manager(mdtype)->medium_chunk_bunch());
4753 }
4754
4755 return chunk;
4756 }
4757
4758 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4759 Metaspace::verify_global_initialization();
4760
4761 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
4762
4763 // Allocate SpaceManager for metadata objects.
4764 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4765
4766 if (Metaspace::using_class_space()) {
4767 // Allocate SpaceManager for classes.
4768 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4769 }
4770
4771 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4772
4773 // Allocate chunk for metadata objects
4774 initialize_first_chunk(type, Metaspace::NonClassType);
4775
4776 // Allocate chunk for class metadata objects
4777 if (Metaspace::using_class_space()) {
4778 initialize_first_chunk(type, Metaspace::ClassType);
4779 }
4780 }
4781
4782 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4783 Metaspace::assert_not_frozen();
4784
4785 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
4786
4787 // Don't use class_vsm() unless UseCompressedClassPointers is true.
4788 if (Metaspace::is_class_space_allocation(mdtype)) {
4789 return class_vsm()->allocate(word_size);
4790 } else {
4791 return vsm()->allocate(word_size);
4792 }
4793 }
4794
4795 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4796 Metaspace::assert_not_frozen();
4797 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4798 assert(delta_bytes > 0, "Must be");
4799
4800 size_t before = 0;
4801 size_t after = 0;
4802 MetaWord* res;
4803 bool incremented;
4804
4805 // Each thread increments the HWM at most once. Even if the thread fails to increment
4806 // the HWM, an allocation is still attempted. This is because another thread must then
4807 // have incremented the HWM and therefore the allocation might still succeed.
4808 do {
4809 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4810 res = allocate(word_size, mdtype);
4811 } while (!incremented && res == NULL);
4812
4813 if (incremented) {
4814 Metaspace::tracer()->report_gc_threshold(before, after,
4815 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4816 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4817 }
4818
4819 return res;
4820 }
4821
4822 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4823 return (vsm()->used_words() +
4824 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
4825 }
4826
4827 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4828 return (vsm()->capacity_words() +
4829 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
4830 }
4831
4832 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4833 Metaspace::assert_not_frozen();
4834 assert(!SafepointSynchronize::is_at_safepoint()
4835 || Thread::current()->is_VM_thread(), "should be the VM thread");
4836
4837 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
4838
4839 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4840
4841 if (is_class && Metaspace::using_class_space()) {
4842 class_vsm()->deallocate(ptr, word_size);
4843 } else {
4844 vsm()->deallocate(ptr, word_size);
4845 }
4846 }
4847
4848 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4849 assert(Metaspace::using_class_space(), "Has to use class space");
4850 return class_vsm()->calc_chunk_size(word_size);
4851 }
4852
4853 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4854 // Print both class virtual space counts and metaspace.
4855 if (Verbose) {
4856 vsm()->print_on(out);
4857 if (Metaspace::using_class_space()) {
4858 class_vsm()->print_on(out);
4859 }
4860 }
4861 }
4862
4863 void ClassLoaderMetaspace::verify() {
4864 vsm()->verify();
4865 if (Metaspace::using_class_space()) {
4866 class_vsm()->verify();
4867 }
4868 }
4869
4870 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4871 assert_lock_strong(lock());
4872 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4873 if (Metaspace::using_class_space()) {
4874 class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
4875 }
4876 }
4877
4878 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4879 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4880 add_to_statistics_locked(out);
4881 }
4882
4883 #ifdef ASSERT
4884 static void do_verify_chunk(Metachunk* chunk) {
4885 guarantee(chunk != NULL, "Sanity");
4886 // Verify chunk itself; then verify that it is consistent with the
4887 // occupany map of its containing node.
4888 chunk->verify();
4889 VirtualSpaceNode* const vsn = chunk->container();
4890 OccupancyMap* const ocmap = vsn->occupancy_map();
4891 ocmap->verify_for_chunk(chunk);
4892 }
4893 #endif
4894
4895 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4896 chunk->set_is_tagged_free(!inuse);
4897 OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4898 ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4899 }
4900
4901 /////////////// Unit tests ///////////////
5207 test_adjust_initial_chunk_size(false);
5208 test_adjust_initial_chunk_size(true);
5209 }
5210 };
5211
5212 void SpaceManager_test_adjust_initial_chunk_size() {
5213 SpaceManagerTest::test_adjust_initial_chunk_size();
5214 }
5215
5216 #endif // ASSERT
5217
5218 struct chunkmanager_statistics_t {
5219 int num_specialized_chunks;
5220 int num_small_chunks;
5221 int num_medium_chunks;
5222 int num_humongous_chunks;
5223 };
5224
5225 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5226 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5227 ChunkManagerStatistics stat;
5228 chunk_manager->collect_statistics(&stat);
5229 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5230 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5231 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5232 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5233 }
5234
5235 struct chunk_geometry_t {
5236 size_t specialized_chunk_word_size;
5237 size_t small_chunk_word_size;
5238 size_t medium_chunk_word_size;
5239 };
5240
5241 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5242 if (mdType == Metaspace::NonClassType) {
5243 out->specialized_chunk_word_size = SpecializedChunk;
5244 out->small_chunk_word_size = SmallChunk;
5245 out->medium_chunk_word_size = MediumChunk;
5246 } else {
5247 out->specialized_chunk_word_size = ClassSpecializedChunk;
5248 out->small_chunk_word_size = ClassSmallChunk;
5249 out->medium_chunk_word_size = ClassMediumChunk;
5250 }
5251 }
|