rev 49017 : [mq]: metaspace-stat
1 /* 2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "aot/aotLoader.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectorPolicy.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/binaryTreeDictionary.hpp" 33 #include "memory/filemap.hpp" 34 #include "memory/freeList.hpp" 35 #include "memory/metachunk.hpp" 36 #include "memory/metaspace.hpp" 37 #include "memory/metaspaceGCThresholdUpdater.hpp" 38 #include "memory/metaspaceShared.hpp" 39 #include "memory/metaspaceTracer.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "memory/virtualspace.hpp" 43 #include "runtime/atomic.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/init.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/mutex.hpp" 48 #include "runtime/orderAccess.inline.hpp" 49 #include "services/memTracker.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/align.hpp" 52 #include "utilities/copy.hpp" 53 #include "utilities/debug.hpp" 54 #include "utilities/macros.hpp" 55 #include "utilities/ostream.hpp" 56 57 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 58 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 59 60 // Set this constant to enable slow integrity checking of the free chunk lists 61 const bool metaspace_slow_verify = false; 62 63 size_t const allocation_from_dictionary_limit = 4 * K; 64 65 MetaWord* last_allocated = 0; 66 67 size_t Metaspace::_compressed_class_space_size; 68 const MetaspaceTracer* Metaspace::_tracer = NULL; 69 70 DEBUG_ONLY(bool Metaspace::_frozen = false;) 71 72 // Used in declarations in SpaceManager and ChunkManager 73 enum ChunkIndex { 74 ZeroIndex = 0, 75 SpecializedIndex = ZeroIndex, 76 SmallIndex = SpecializedIndex + 1, 77 MediumIndex = SmallIndex + 1, 78 HumongousIndex = MediumIndex + 1, 79 NumberOfFreeLists = 3, 80 NumberOfInUseLists = 4 81 }; 82 83 // Helper, returns a descriptive name for the given index. 84 static const char* chunk_size_name(ChunkIndex index) { 85 switch (index) { 86 case SpecializedIndex: 87 return "specialized"; 88 case SmallIndex: 89 return "small"; 90 case MediumIndex: 91 return "medium"; 92 case HumongousIndex: 93 return "humongous"; 94 default: 95 return "Invalid index"; 96 } 97 } 98 99 enum ChunkSizes { // in words. 100 ClassSpecializedChunk = 128, 101 SpecializedChunk = 128, 102 ClassSmallChunk = 256, 103 SmallChunk = 512, 104 ClassMediumChunk = 4 * K, 105 MediumChunk = 8 * K 106 }; 107 108 static ChunkIndex next_chunk_index(ChunkIndex i) { 109 assert(i < NumberOfInUseLists, "Out of bound"); 110 return (ChunkIndex) (i+1); 111 } 112 113 static const char* scale_unit(size_t scale) { 114 switch(scale) { 115 case 1: return "BYTES"; 116 case K: return "KB"; 117 case M: return "MB"; 118 case G: return "GB"; 119 default: 120 ShouldNotReachHere(); 121 return NULL; 122 } 123 } 124 125 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 126 uint MetaspaceGC::_shrink_factor = 0; 127 bool MetaspaceGC::_should_concurrent_collect = false; 128 129 typedef class FreeList<Metachunk> ChunkList; 130 131 // Manages the global free lists of chunks. 132 class ChunkManager : public CHeapObj<mtInternal> { 133 friend class TestVirtualSpaceNodeTest; 134 135 // Free list of chunks of different sizes. 136 // SpecializedChunk 137 // SmallChunk 138 // MediumChunk 139 ChunkList _free_chunks[NumberOfFreeLists]; 140 141 // Return non-humongous chunk list by its index. 142 ChunkList* free_chunks(ChunkIndex index); 143 144 // Returns non-humongous chunk list for the given chunk word size. 145 ChunkList* find_free_chunks_list(size_t word_size); 146 147 // HumongousChunk 148 ChunkTreeDictionary _humongous_dictionary; 149 150 // Returns the humongous chunk dictionary. 151 ChunkTreeDictionary* humongous_dictionary() { 152 return &_humongous_dictionary; 153 } 154 155 // Size, in metaspace words, of all chunks managed by this ChunkManager 156 size_t _free_chunks_total_words; 157 // Number of chunks in this ChunkManager 158 size_t _free_chunks_count; 159 160 // Update counters after a chunk was added or removed removed. 161 void account_for_added_chunk(const Metachunk* c); 162 void account_for_removed_chunk(const Metachunk* c); 163 164 // Debug support 165 166 size_t sum_free_chunks(); 167 size_t sum_free_chunks_count(); 168 169 void locked_verify_free_chunks_total(); 170 void slow_locked_verify_free_chunks_total() { 171 if (metaspace_slow_verify) { 172 locked_verify_free_chunks_total(); 173 } 174 } 175 void locked_verify_free_chunks_count(); 176 void slow_locked_verify_free_chunks_count() { 177 if (metaspace_slow_verify) { 178 locked_verify_free_chunks_count(); 179 } 180 } 181 182 struct ChunkManagerStatistics { 183 size_t num_by_type[NumberOfFreeLists]; 184 size_t single_size_by_type[NumberOfFreeLists]; 185 size_t total_size_by_type[NumberOfFreeLists]; 186 size_t num_humongous_chunks; 187 size_t total_size_humongous_chunks; 188 }; 189 190 void locked_get_statistics(ChunkManagerStatistics* stat) const; 191 void get_statistics(ChunkManagerStatistics* stat) const; 192 static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale); 193 194 public: 195 196 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 197 : _free_chunks_total_words(0), _free_chunks_count(0) { 198 _free_chunks[SpecializedIndex].set_size(specialized_size); 199 _free_chunks[SmallIndex].set_size(small_size); 200 _free_chunks[MediumIndex].set_size(medium_size); 201 } 202 203 // add or delete (return) a chunk to the global freelist. 204 Metachunk* chunk_freelist_allocate(size_t word_size); 205 206 // Map a size to a list index assuming that there are lists 207 // for special, small, medium, and humongous chunks. 208 ChunkIndex list_index(size_t size); 209 210 // Map a given index to the chunk size. 211 size_t size_by_index(ChunkIndex index) const; 212 213 // Take a chunk from the ChunkManager. The chunk is expected to be in 214 // the chunk manager (the freelist if non-humongous, the dictionary if 215 // humongous). 216 void remove_chunk(Metachunk* chunk); 217 218 // Return a single chunk of type index to the ChunkManager. 219 void return_single_chunk(ChunkIndex index, Metachunk* chunk); 220 221 // Add the simple linked list of chunks to the freelist of chunks 222 // of type index. 223 void return_chunk_list(ChunkIndex index, Metachunk* chunk); 224 225 // Total of the space in the free chunks list 226 size_t free_chunks_total_words(); 227 size_t free_chunks_total_bytes(); 228 229 // Number of chunks in the free chunks list 230 size_t free_chunks_count(); 231 232 // Remove from a list by size. Selects list based on size of chunk. 233 Metachunk* free_chunks_get(size_t chunk_word_size); 234 235 #define index_bounds_check(index) \ 236 assert(index == SpecializedIndex || \ 237 index == SmallIndex || \ 238 index == MediumIndex || \ 239 index == HumongousIndex, "Bad index: %d", (int) index) 240 241 size_t num_free_chunks(ChunkIndex index) const { 242 index_bounds_check(index); 243 244 if (index == HumongousIndex) { 245 return _humongous_dictionary.total_free_blocks(); 246 } 247 248 ssize_t count = _free_chunks[index].count(); 249 return count == -1 ? 0 : (size_t) count; 250 } 251 252 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 253 index_bounds_check(index); 254 255 size_t word_size = 0; 256 if (index == HumongousIndex) { 257 word_size = _humongous_dictionary.total_size(); 258 } else { 259 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 260 word_size = size_per_chunk_in_words * num_free_chunks(index); 261 } 262 263 return word_size * BytesPerWord; 264 } 265 266 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 267 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 268 num_free_chunks(SmallIndex), 269 num_free_chunks(MediumIndex), 270 num_free_chunks(HumongousIndex), 271 size_free_chunks_in_bytes(SpecializedIndex), 272 size_free_chunks_in_bytes(SmallIndex), 273 size_free_chunks_in_bytes(MediumIndex), 274 size_free_chunks_in_bytes(HumongousIndex)); 275 } 276 277 // Debug support 278 void verify(); 279 void slow_verify() { 280 if (metaspace_slow_verify) { 281 verify(); 282 } 283 } 284 void locked_verify(); 285 void slow_locked_verify() { 286 if (metaspace_slow_verify) { 287 locked_verify(); 288 } 289 } 290 291 // Prints composition (number and total size of chunks per chunk size). 292 void locked_print_on(outputStream* out, size_t scale = 1) const; 293 void print_on(outputStream* out, size_t scale = 1) const; 294 295 // Prints composition (number and total size of chunks per chunk size) for all 296 // (class and non-class) chunk managers. 297 static void print_all_chunkmanagers_on(outputStream* out, size_t scale = 1); 298 299 }; 300 301 class SmallBlocks : public CHeapObj<mtClass> { 302 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 303 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 304 305 private: 306 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 307 308 FreeList<Metablock>& list_at(size_t word_size) { 309 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 310 return _small_lists[word_size - _small_block_min_size]; 311 } 312 313 public: 314 SmallBlocks() { 315 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 316 uint k = i - _small_block_min_size; 317 _small_lists[k].set_size(i); 318 } 319 } 320 321 size_t total_size() const { 322 size_t result = 0; 323 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 324 uint k = i - _small_block_min_size; 325 result = result + _small_lists[k].count() * _small_lists[k].size(); 326 } 327 return result; 328 } 329 330 static uint small_block_max_size() { return _small_block_max_size; } 331 static uint small_block_min_size() { return _small_block_min_size; } 332 333 MetaWord* get_block(size_t word_size) { 334 if (list_at(word_size).count() > 0) { 335 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 336 return new_block; 337 } else { 338 return NULL; 339 } 340 } 341 void return_block(Metablock* free_chunk, size_t word_size) { 342 list_at(word_size).return_chunk_at_head(free_chunk, false); 343 assert(list_at(word_size).count() > 0, "Should have a chunk"); 344 } 345 346 void print_on(outputStream* st) const { 347 st->print_cr("SmallBlocks:"); 348 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 349 uint k = i - _small_block_min_size; 350 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 351 } 352 } 353 }; 354 355 // Used to manage the free list of Metablocks (a block corresponds 356 // to the allocation of a quantum of metadata). 357 class BlockFreelist : public CHeapObj<mtClass> { 358 BlockTreeDictionary* const _dictionary; 359 SmallBlocks* _small_blocks; 360 361 // Only allocate and split from freelist if the size of the allocation 362 // is at least 1/4th the size of the available block. 363 const static int WasteMultiplier = 4; 364 365 // Accessors 366 BlockTreeDictionary* dictionary() const { return _dictionary; } 367 SmallBlocks* small_blocks() { 368 if (_small_blocks == NULL) { 369 _small_blocks = new SmallBlocks(); 370 } 371 return _small_blocks; 372 } 373 374 public: 375 BlockFreelist(); 376 ~BlockFreelist(); 377 378 // Get and return a block to the free list 379 MetaWord* get_block(size_t word_size); 380 void return_block(MetaWord* p, size_t word_size); 381 382 size_t total_size() const { 383 size_t result = dictionary()->total_size(); 384 if (_small_blocks != NULL) { 385 result = result + _small_blocks->total_size(); 386 } 387 return result; 388 } 389 390 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 391 void print_on(outputStream* st) const; 392 }; 393 394 // A VirtualSpaceList node. 395 class VirtualSpaceNode : public CHeapObj<mtClass> { 396 friend class VirtualSpaceList; 397 398 // Link to next VirtualSpaceNode 399 VirtualSpaceNode* _next; 400 401 // total in the VirtualSpace 402 MemRegion _reserved; 403 ReservedSpace _rs; 404 VirtualSpace _virtual_space; 405 MetaWord* _top; 406 // count of chunks contained in this VirtualSpace 407 uintx _container_count; 408 409 // Convenience functions to access the _virtual_space 410 char* low() const { return virtual_space()->low(); } 411 char* high() const { return virtual_space()->high(); } 412 413 // The first Metachunk will be allocated at the bottom of the 414 // VirtualSpace 415 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 416 417 // Committed but unused space in the virtual space 418 size_t free_words_in_vs() const; 419 public: 420 421 VirtualSpaceNode(size_t byte_size); 422 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 423 ~VirtualSpaceNode(); 424 425 // Convenience functions for logical bottom and end 426 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 427 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 428 429 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 430 431 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 432 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 433 434 bool is_pre_committed() const { return _virtual_space.special(); } 435 436 // address of next available space in _virtual_space; 437 // Accessors 438 VirtualSpaceNode* next() { return _next; } 439 void set_next(VirtualSpaceNode* v) { _next = v; } 440 441 void set_reserved(MemRegion const v) { _reserved = v; } 442 void set_top(MetaWord* v) { _top = v; } 443 444 // Accessors 445 MemRegion* reserved() { return &_reserved; } 446 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 447 448 // Returns true if "word_size" is available in the VirtualSpace 449 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 450 451 MetaWord* top() const { return _top; } 452 void inc_top(size_t word_size) { _top += word_size; } 453 454 uintx container_count() { return _container_count; } 455 void inc_container_count(); 456 void dec_container_count(); 457 #ifdef ASSERT 458 uintx container_count_slow(); 459 void verify_container_count(); 460 #endif 461 462 // used and capacity in this single entry in the list 463 size_t used_words_in_vs() const; 464 size_t capacity_words_in_vs() const; 465 466 bool initialize(); 467 468 // get space from the virtual space 469 Metachunk* take_from_committed(size_t chunk_word_size); 470 471 // Allocate a chunk from the virtual space and return it. 472 Metachunk* get_chunk_vs(size_t chunk_word_size); 473 474 // Expands/shrinks the committed space in a virtual space. Delegates 475 // to Virtualspace 476 bool expand_by(size_t min_words, size_t preferred_words); 477 478 // In preparation for deleting this node, remove all the chunks 479 // in the node from any freelist. 480 void purge(ChunkManager* chunk_manager); 481 482 // If an allocation doesn't fit in the current node a new node is created. 483 // Allocate chunks out of the remaining committed space in this node 484 // to avoid wasting that memory. 485 // This always adds up because all the chunk sizes are multiples of 486 // the smallest chunk size. 487 void retire(ChunkManager* chunk_manager); 488 489 #ifdef ASSERT 490 // Debug support 491 void mangle(); 492 #endif 493 494 void print_on(outputStream* st) const; 495 void print_map(outputStream* st, bool is_class) const; 496 }; 497 498 #define assert_is_aligned(value, alignment) \ 499 assert(is_aligned((value), (alignment)), \ 500 SIZE_FORMAT_HEX " is not aligned to " \ 501 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment)) 502 503 // Decide if large pages should be committed when the memory is reserved. 504 static bool should_commit_large_pages_when_reserving(size_t bytes) { 505 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 506 size_t words = bytes / BytesPerWord; 507 bool is_class = false; // We never reserve large pages for the class space. 508 if (MetaspaceGC::can_expand(words, is_class) && 509 MetaspaceGC::allowed_expansion() >= words) { 510 return true; 511 } 512 } 513 514 return false; 515 } 516 517 // byte_size is the size of the associated virtualspace. 518 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 519 assert_is_aligned(bytes, Metaspace::reserve_alignment()); 520 bool large_pages = should_commit_large_pages_when_reserving(bytes); 521 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 522 523 if (_rs.is_reserved()) { 524 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 525 assert(_rs.size() != 0, "Catch if we get a 0 size"); 526 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); 527 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); 528 529 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 530 } 531 } 532 533 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 534 Metachunk* chunk = first_chunk(); 535 Metachunk* invalid_chunk = (Metachunk*) top(); 536 while (chunk < invalid_chunk ) { 537 assert(chunk->is_tagged_free(), "Should be tagged free"); 538 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 539 chunk_manager->remove_chunk(chunk); 540 assert(chunk->next() == NULL && 541 chunk->prev() == NULL, 542 "Was not removed from its list"); 543 chunk = (Metachunk*) next; 544 } 545 } 546 547 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const { 548 549 // Format: 550 // <ptr> 551 // <ptr> . .. . . .. 552 // SSxSSMMMMMMMMMMMMMMMMsssXX 553 // 112114444444444444444 554 // <ptr> . .. . . .. 555 // SSxSSMMMMMMMMMMMMMMMMsssXX 556 // 112114444444444444444 557 558 if (bottom() == top()) { 559 return; 560 } 561 562 // First line: dividers for every med-chunk-sized interval 563 // Second line: a dot for the start of a chunk 564 // Third line: a letter per chunk type (x,s,m,h), uppercase if in use. 565 566 const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; 567 const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk; 568 const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk; 569 570 int line_len = 100; 571 const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size); 572 line_len = (int)(section_len / spec_chunk_size); 573 574 char* line1 = (char*)os::malloc(line_len, mtInternal); 575 char* line2 = (char*)os::malloc(line_len, mtInternal); 576 char* line3 = (char*)os::malloc(line_len, mtInternal); 577 int pos = 0; 578 const MetaWord* p = bottom(); 579 const Metachunk* chunk = (const Metachunk*)p; 580 const MetaWord* chunk_end = p + chunk->word_size(); 581 while (p < top()) { 582 if (pos == line_len) { 583 pos = 0; 584 st->fill_to(22); 585 st->print_raw(line1, line_len); 586 st->cr(); 587 st->fill_to(22); 588 st->print_raw(line2, line_len); 589 st->cr(); 590 } 591 if (pos == 0) { 592 st->print(PTR_FORMAT ":", p2i(p)); 593 } 594 if (p == chunk_end) { 595 chunk = (Metachunk*)p; 596 chunk_end = p + chunk->word_size(); 597 } 598 if (p == (const MetaWord*)chunk) { 599 // chunk starts. 600 line1[pos] = '.'; 601 } else { 602 line1[pos] = ' '; 603 } 604 // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if 605 // chunk is in use. 606 const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free(); 607 if (chunk->word_size() == spec_chunk_size) { 608 line2[pos] = chunk_is_free ? 'x' : 'X'; 609 } else if (chunk->word_size() == small_chunk_size) { 610 line2[pos] = chunk_is_free ? 's' : 'S'; 611 } else if (chunk->word_size() == med_chunk_size) { 612 line2[pos] = chunk_is_free ? 'm' : 'M'; 613 } else if (chunk->word_size() > med_chunk_size) { 614 line2[pos] = chunk_is_free ? 'h' : 'H'; 615 } else { 616 ShouldNotReachHere(); 617 } 618 p += spec_chunk_size; 619 pos ++; 620 } 621 if (pos > 0) { 622 st->fill_to(22); 623 st->print_raw(line1, pos); 624 st->cr(); 625 st->fill_to(22); 626 st->print_raw(line2, pos); 627 st->cr(); 628 } 629 os::free(line1); 630 os::free(line2); 631 os::free(line3); 632 } 633 634 635 #ifdef ASSERT 636 uintx VirtualSpaceNode::container_count_slow() { 637 uintx count = 0; 638 Metachunk* chunk = first_chunk(); 639 Metachunk* invalid_chunk = (Metachunk*) top(); 640 while (chunk < invalid_chunk ) { 641 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 642 // Don't count the chunks on the free lists. Those are 643 // still part of the VirtualSpaceNode but not currently 644 // counted. 645 if (!chunk->is_tagged_free()) { 646 count++; 647 } 648 chunk = (Metachunk*) next; 649 } 650 return count; 651 } 652 #endif 653 654 // List of VirtualSpaces for metadata allocation. 655 class VirtualSpaceList : public CHeapObj<mtClass> { 656 friend class VirtualSpaceNode; 657 658 enum VirtualSpaceSizes { 659 VirtualSpaceSize = 256 * K 660 }; 661 662 // Head of the list 663 VirtualSpaceNode* _virtual_space_list; 664 // virtual space currently being used for allocations 665 VirtualSpaceNode* _current_virtual_space; 666 667 // Is this VirtualSpaceList used for the compressed class space 668 bool _is_class; 669 670 // Sum of reserved and committed memory in the virtual spaces 671 size_t _reserved_words; 672 size_t _committed_words; 673 674 // Number of virtual spaces 675 size_t _virtual_space_count; 676 677 ~VirtualSpaceList(); 678 679 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 680 681 void set_virtual_space_list(VirtualSpaceNode* v) { 682 _virtual_space_list = v; 683 } 684 void set_current_virtual_space(VirtualSpaceNode* v) { 685 _current_virtual_space = v; 686 } 687 688 void link_vs(VirtualSpaceNode* new_entry); 689 690 // Get another virtual space and add it to the list. This 691 // is typically prompted by a failed attempt to allocate a chunk 692 // and is typically followed by the allocation of a chunk. 693 bool create_new_virtual_space(size_t vs_word_size); 694 695 // Chunk up the unused committed space in the current 696 // virtual space and add the chunks to the free list. 697 void retire_current_virtual_space(); 698 699 public: 700 VirtualSpaceList(size_t word_size); 701 VirtualSpaceList(ReservedSpace rs); 702 703 size_t free_bytes(); 704 705 Metachunk* get_new_chunk(size_t chunk_word_size, 706 size_t suggested_commit_granularity); 707 708 bool expand_node_by(VirtualSpaceNode* node, 709 size_t min_words, 710 size_t preferred_words); 711 712 bool expand_by(size_t min_words, 713 size_t preferred_words); 714 715 VirtualSpaceNode* current_virtual_space() { 716 return _current_virtual_space; 717 } 718 719 bool is_class() const { return _is_class; } 720 721 bool initialization_succeeded() { return _virtual_space_list != NULL; } 722 723 size_t reserved_words() { return _reserved_words; } 724 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 725 size_t committed_words() { return _committed_words; } 726 size_t committed_bytes() { return committed_words() * BytesPerWord; } 727 728 void inc_reserved_words(size_t v); 729 void dec_reserved_words(size_t v); 730 void inc_committed_words(size_t v); 731 void dec_committed_words(size_t v); 732 void inc_virtual_space_count(); 733 void dec_virtual_space_count(); 734 735 bool contains(const void* ptr); 736 737 // Unlink empty VirtualSpaceNodes and free it. 738 void purge(ChunkManager* chunk_manager); 739 740 void print_on(outputStream* st) const; 741 void print_map(outputStream* st) const; 742 743 class VirtualSpaceListIterator : public StackObj { 744 VirtualSpaceNode* _virtual_spaces; 745 public: 746 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 747 _virtual_spaces(virtual_spaces) {} 748 749 bool repeat() { 750 return _virtual_spaces != NULL; 751 } 752 753 VirtualSpaceNode* get_next() { 754 VirtualSpaceNode* result = _virtual_spaces; 755 if (_virtual_spaces != NULL) { 756 _virtual_spaces = _virtual_spaces->next(); 757 } 758 return result; 759 } 760 }; 761 }; 762 763 class Metadebug : AllStatic { 764 // Debugging support for Metaspaces 765 static int _allocation_fail_alot_count; 766 767 public: 768 769 static void init_allocation_fail_alot_count(); 770 #ifdef ASSERT 771 static bool test_metadata_failure(); 772 #endif 773 }; 774 775 int Metadebug::_allocation_fail_alot_count = 0; 776 777 // SpaceManager - used by Metaspace to handle allocations 778 class SpaceManager : public CHeapObj<mtClass> { 779 friend class Metaspace; 780 friend class Metadebug; 781 782 private: 783 784 // protects allocations 785 Mutex* const _lock; 786 787 // Type of metadata allocated. 788 const Metaspace::MetadataType _mdtype; 789 790 // Type of metaspace 791 const Metaspace::MetaspaceType _space_type; 792 793 // List of chunks in use by this SpaceManager. Allocations 794 // are done from the current chunk. The list is used for deallocating 795 // chunks when the SpaceManager is freed. 796 Metachunk* _chunks_in_use[NumberOfInUseLists]; 797 Metachunk* _current_chunk; 798 799 // Maximum number of small chunks to allocate to a SpaceManager 800 static uint const _small_chunk_limit; 801 802 // Maximum number of specialize chunks to allocate for anonymous 803 // metadata space to a SpaceManager 804 static uint const _anon_metadata_specialize_chunk_limit; 805 806 // Sum of all space in allocated chunks 807 size_t _allocated_blocks_words; 808 809 // Sum of all allocated chunks 810 size_t _allocated_chunks_words; 811 size_t _allocated_chunks_count; 812 813 // Free lists of blocks are per SpaceManager since they 814 // are assumed to be in chunks in use by the SpaceManager 815 // and all chunks in use by a SpaceManager are freed when 816 // the class loader using the SpaceManager is collected. 817 BlockFreelist* _block_freelists; 818 819 // protects virtualspace and chunk expansions 820 static const char* _expand_lock_name; 821 static const int _expand_lock_rank; 822 static Mutex* const _expand_lock; 823 824 private: 825 // Accessors 826 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 827 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 828 _chunks_in_use[index] = v; 829 } 830 831 BlockFreelist* block_freelists() const { return _block_freelists; } 832 833 Metaspace::MetadataType mdtype() { return _mdtype; } 834 835 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 836 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 837 838 Metachunk* current_chunk() const { return _current_chunk; } 839 void set_current_chunk(Metachunk* v) { 840 _current_chunk = v; 841 } 842 843 Metachunk* find_current_chunk(size_t word_size); 844 845 // Add chunk to the list of chunks in use 846 void add_chunk(Metachunk* v, bool make_current); 847 void retire_current_chunk(); 848 849 Mutex* lock() const { return _lock; } 850 851 protected: 852 void initialize(); 853 854 public: 855 SpaceManager(Metaspace::MetadataType mdtype, 856 Metaspace::MetaspaceType space_type, 857 Mutex* lock); 858 ~SpaceManager(); 859 860 enum ChunkMultiples { 861 MediumChunkMultiple = 4 862 }; 863 864 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } 865 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } 866 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } 867 868 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } 869 870 // Accessors 871 bool is_class() const { return _mdtype == Metaspace::ClassType; } 872 873 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } 874 size_t small_chunk_size() const { return small_chunk_size(is_class()); } 875 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } 876 877 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } 878 879 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } 880 881 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 882 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 883 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 884 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 885 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 886 887 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 888 889 static Mutex* expand_lock() { return _expand_lock; } 890 891 // Increment the per Metaspace and global running sums for Metachunks 892 // by the given size. This is used when a Metachunk to added to 893 // the in-use list. 894 void inc_size_metrics(size_t words); 895 // Increment the per Metaspace and global running sums Metablocks by the given 896 // size. This is used when a Metablock is allocated. 897 void inc_used_metrics(size_t words); 898 // Delete the portion of the running sums for this SpaceManager. That is, 899 // the globals running sums for the Metachunks and Metablocks are 900 // decremented for all the Metachunks in-use by this SpaceManager. 901 void dec_total_from_size_metrics(); 902 903 // Adjust the initial chunk size to match one of the fixed chunk list sizes, 904 // or return the unadjusted size if the requested size is humongous. 905 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); 906 size_t adjust_initial_chunk_size(size_t requested) const; 907 908 // Get the initial chunks size for this metaspace type. 909 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; 910 911 size_t sum_capacity_in_chunks_in_use() const; 912 size_t sum_used_in_chunks_in_use() const; 913 size_t sum_free_in_chunks_in_use() const; 914 size_t sum_waste_in_chunks_in_use() const; 915 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 916 917 size_t sum_count_in_chunks_in_use(); 918 size_t sum_count_in_chunks_in_use(ChunkIndex i); 919 920 Metachunk* get_new_chunk(size_t chunk_word_size); 921 922 // Block allocation and deallocation. 923 // Allocates a block from the current chunk 924 MetaWord* allocate(size_t word_size); 925 // Allocates a block from a small chunk 926 MetaWord* get_small_chunk_and_allocate(size_t word_size); 927 928 // Helper for allocations 929 MetaWord* allocate_work(size_t word_size); 930 931 // Returns a block to the per manager freelist 932 void deallocate(MetaWord* p, size_t word_size); 933 934 // Based on the allocation size and a minimum chunk size, 935 // returned chunk size (for expanding space for chunk allocation). 936 size_t calc_chunk_size(size_t allocation_word_size); 937 938 // Called when an allocation from the current chunk fails. 939 // Gets a new chunk (may require getting a new virtual space), 940 // and allocates from that chunk. 941 MetaWord* grow_and_allocate(size_t word_size); 942 943 // Notify memory usage to MemoryService. 944 void track_metaspace_memory_usage(); 945 946 // debugging support. 947 948 void dump(outputStream* const out) const; 949 void print_on(outputStream* st) const; 950 void locked_print_chunks_in_use_on(outputStream* st) const; 951 952 void verify(); 953 void verify_chunk_size(Metachunk* chunk); 954 #ifdef ASSERT 955 void verify_allocated_blocks_words(); 956 #endif 957 958 // This adjusts the size given to be greater than the minimum allocation size in 959 // words for data in metaspace. Esentially the minimum size is currently 3 words. 960 size_t get_allocation_word_size(size_t word_size) { 961 size_t byte_size = word_size * BytesPerWord; 962 963 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 964 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); 965 966 size_t raw_word_size = raw_bytes_size / BytesPerWord; 967 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 968 969 return raw_word_size; 970 } 971 }; 972 973 uint const SpaceManager::_small_chunk_limit = 4; 974 uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4; 975 976 const char* SpaceManager::_expand_lock_name = 977 "SpaceManager chunk allocation lock"; 978 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 979 Mutex* const SpaceManager::_expand_lock = 980 new Mutex(SpaceManager::_expand_lock_rank, 981 SpaceManager::_expand_lock_name, 982 Mutex::_allow_vm_block_flag, 983 Monitor::_safepoint_check_never); 984 985 void VirtualSpaceNode::inc_container_count() { 986 assert_lock_strong(SpaceManager::expand_lock()); 987 _container_count++; 988 } 989 990 void VirtualSpaceNode::dec_container_count() { 991 assert_lock_strong(SpaceManager::expand_lock()); 992 _container_count--; 993 } 994 995 #ifdef ASSERT 996 void VirtualSpaceNode::verify_container_count() { 997 assert(_container_count == container_count_slow(), 998 "Inconsistency in container_count _container_count " UINTX_FORMAT 999 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 1000 } 1001 #endif 1002 1003 // BlockFreelist methods 1004 1005 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 1006 1007 BlockFreelist::~BlockFreelist() { 1008 delete _dictionary; 1009 if (_small_blocks != NULL) { 1010 delete _small_blocks; 1011 } 1012 } 1013 1014 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 1015 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 1016 1017 Metablock* free_chunk = ::new (p) Metablock(word_size); 1018 if (word_size < SmallBlocks::small_block_max_size()) { 1019 small_blocks()->return_block(free_chunk, word_size); 1020 } else { 1021 dictionary()->return_chunk(free_chunk); 1022 } 1023 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 1024 SIZE_FORMAT, p2i(free_chunk), word_size); 1025 } 1026 1027 MetaWord* BlockFreelist::get_block(size_t word_size) { 1028 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 1029 1030 // Try small_blocks first. 1031 if (word_size < SmallBlocks::small_block_max_size()) { 1032 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 1033 // this space manager. 1034 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 1035 if (new_block != NULL) { 1036 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 1037 p2i(new_block), word_size); 1038 return new_block; 1039 } 1040 } 1041 1042 if (word_size < BlockFreelist::min_dictionary_size()) { 1043 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 1044 return NULL; 1045 } 1046 1047 Metablock* free_block = dictionary()->get_chunk(word_size); 1048 if (free_block == NULL) { 1049 return NULL; 1050 } 1051 1052 const size_t block_size = free_block->size(); 1053 if (block_size > WasteMultiplier * word_size) { 1054 return_block((MetaWord*)free_block, block_size); 1055 return NULL; 1056 } 1057 1058 MetaWord* new_block = (MetaWord*)free_block; 1059 assert(block_size >= word_size, "Incorrect size of block from freelist"); 1060 const size_t unused = block_size - word_size; 1061 if (unused >= SmallBlocks::small_block_min_size()) { 1062 return_block(new_block + word_size, unused); 1063 } 1064 1065 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 1066 p2i(new_block), word_size); 1067 return new_block; 1068 } 1069 1070 void BlockFreelist::print_on(outputStream* st) const { 1071 dictionary()->print_free_lists(st); 1072 if (_small_blocks != NULL) { 1073 _small_blocks->print_on(st); 1074 } 1075 } 1076 1077 // VirtualSpaceNode methods 1078 1079 VirtualSpaceNode::~VirtualSpaceNode() { 1080 _rs.release(); 1081 #ifdef ASSERT 1082 size_t word_size = sizeof(*this) / BytesPerWord; 1083 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 1084 #endif 1085 } 1086 1087 size_t VirtualSpaceNode::used_words_in_vs() const { 1088 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 1089 } 1090 1091 // Space committed in the VirtualSpace 1092 size_t VirtualSpaceNode::capacity_words_in_vs() const { 1093 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 1094 } 1095 1096 size_t VirtualSpaceNode::free_words_in_vs() const { 1097 return pointer_delta(end(), top(), sizeof(MetaWord)); 1098 } 1099 1100 // Allocates the chunk from the virtual space only. 1101 // This interface is also used internally for debugging. Not all 1102 // chunks removed here are necessarily used for allocation. 1103 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 1104 // Bottom of the new chunk 1105 MetaWord* chunk_limit = top(); 1106 assert(chunk_limit != NULL, "Not safe to call this method"); 1107 1108 // The virtual spaces are always expanded by the 1109 // commit granularity to enforce the following condition. 1110 // Without this the is_available check will not work correctly. 1111 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 1112 "The committed memory doesn't match the expanded memory."); 1113 1114 if (!is_available(chunk_word_size)) { 1115 LogTarget(Debug, gc, metaspace, freelist) lt; 1116 if (lt.is_enabled()) { 1117 LogStream ls(lt); 1118 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 1119 // Dump some information about the virtual space that is nearly full 1120 print_on(&ls); 1121 } 1122 return NULL; 1123 } 1124 1125 // Take the space (bump top on the current virtual space). 1126 inc_top(chunk_word_size); 1127 1128 // Initialize the chunk 1129 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1130 return result; 1131 } 1132 1133 1134 // Expand the virtual space (commit more of the reserved space) 1135 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1136 size_t min_bytes = min_words * BytesPerWord; 1137 size_t preferred_bytes = preferred_words * BytesPerWord; 1138 1139 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1140 1141 if (uncommitted < min_bytes) { 1142 return false; 1143 } 1144 1145 size_t commit = MIN2(preferred_bytes, uncommitted); 1146 bool result = virtual_space()->expand_by(commit, false); 1147 1148 assert(result, "Failed to commit memory"); 1149 1150 return result; 1151 } 1152 1153 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1154 assert_lock_strong(SpaceManager::expand_lock()); 1155 Metachunk* result = take_from_committed(chunk_word_size); 1156 if (result != NULL) { 1157 inc_container_count(); 1158 } 1159 return result; 1160 } 1161 1162 bool VirtualSpaceNode::initialize() { 1163 1164 if (!_rs.is_reserved()) { 1165 return false; 1166 } 1167 1168 // These are necessary restriction to make sure that the virtual space always 1169 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1170 // aligned only the middle alignment of the VirtualSpace is used. 1171 assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); 1172 assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); 1173 1174 // ReservedSpaces marked as special will have the entire memory 1175 // pre-committed. Setting a committed size will make sure that 1176 // committed_size and actual_committed_size agrees. 1177 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1178 1179 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1180 Metaspace::commit_alignment()); 1181 if (result) { 1182 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1183 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1184 1185 set_top((MetaWord*)virtual_space()->low()); 1186 set_reserved(MemRegion((HeapWord*)_rs.base(), 1187 (HeapWord*)(_rs.base() + _rs.size()))); 1188 1189 assert(reserved()->start() == (HeapWord*) _rs.base(), 1190 "Reserved start was not set properly " PTR_FORMAT 1191 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1192 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1193 "Reserved size was not set properly " SIZE_FORMAT 1194 " != " SIZE_FORMAT, reserved()->word_size(), 1195 _rs.size() / BytesPerWord); 1196 } 1197 1198 return result; 1199 } 1200 1201 void VirtualSpaceNode::print_on(outputStream* st) const { 1202 size_t used = used_words_in_vs(); 1203 size_t capacity = capacity_words_in_vs(); 1204 VirtualSpace* vs = virtual_space(); 1205 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1206 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1207 PTR_FORMAT ", " PTR_FORMAT ")", 1208 p2i(vs), capacity / K, 1209 capacity == 0 ? 0 : used * 100 / capacity, 1210 p2i(bottom()), p2i(top()), p2i(end()), 1211 p2i(vs->high_boundary())); 1212 } 1213 1214 #ifdef ASSERT 1215 void VirtualSpaceNode::mangle() { 1216 size_t word_size = capacity_words_in_vs(); 1217 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1218 } 1219 #endif // ASSERT 1220 1221 // VirtualSpaceList methods 1222 // Space allocated from the VirtualSpace 1223 1224 VirtualSpaceList::~VirtualSpaceList() { 1225 VirtualSpaceListIterator iter(virtual_space_list()); 1226 while (iter.repeat()) { 1227 VirtualSpaceNode* vsl = iter.get_next(); 1228 delete vsl; 1229 } 1230 } 1231 1232 void VirtualSpaceList::inc_reserved_words(size_t v) { 1233 assert_lock_strong(SpaceManager::expand_lock()); 1234 _reserved_words = _reserved_words + v; 1235 } 1236 void VirtualSpaceList::dec_reserved_words(size_t v) { 1237 assert_lock_strong(SpaceManager::expand_lock()); 1238 _reserved_words = _reserved_words - v; 1239 } 1240 1241 #define assert_committed_below_limit() \ 1242 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1243 "Too much committed memory. Committed: " SIZE_FORMAT \ 1244 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1245 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1246 1247 void VirtualSpaceList::inc_committed_words(size_t v) { 1248 assert_lock_strong(SpaceManager::expand_lock()); 1249 _committed_words = _committed_words + v; 1250 1251 assert_committed_below_limit(); 1252 } 1253 void VirtualSpaceList::dec_committed_words(size_t v) { 1254 assert_lock_strong(SpaceManager::expand_lock()); 1255 _committed_words = _committed_words - v; 1256 1257 assert_committed_below_limit(); 1258 } 1259 1260 void VirtualSpaceList::inc_virtual_space_count() { 1261 assert_lock_strong(SpaceManager::expand_lock()); 1262 _virtual_space_count++; 1263 } 1264 void VirtualSpaceList::dec_virtual_space_count() { 1265 assert_lock_strong(SpaceManager::expand_lock()); 1266 _virtual_space_count--; 1267 } 1268 1269 void ChunkManager::remove_chunk(Metachunk* chunk) { 1270 size_t word_size = chunk->word_size(); 1271 ChunkIndex index = list_index(word_size); 1272 if (index != HumongousIndex) { 1273 free_chunks(index)->remove_chunk(chunk); 1274 } else { 1275 humongous_dictionary()->remove_chunk(chunk); 1276 } 1277 1278 // Chunk has been removed from the chunks free list, update counters. 1279 account_for_removed_chunk(chunk); 1280 } 1281 1282 // Walk the list of VirtualSpaceNodes and delete 1283 // nodes with a 0 container_count. Remove Metachunks in 1284 // the node from their respective freelists. 1285 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1286 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1287 assert_lock_strong(SpaceManager::expand_lock()); 1288 // Don't use a VirtualSpaceListIterator because this 1289 // list is being changed and a straightforward use of an iterator is not safe. 1290 VirtualSpaceNode* purged_vsl = NULL; 1291 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1292 VirtualSpaceNode* next_vsl = prev_vsl; 1293 while (next_vsl != NULL) { 1294 VirtualSpaceNode* vsl = next_vsl; 1295 DEBUG_ONLY(vsl->verify_container_count();) 1296 next_vsl = vsl->next(); 1297 // Don't free the current virtual space since it will likely 1298 // be needed soon. 1299 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1300 // Unlink it from the list 1301 if (prev_vsl == vsl) { 1302 // This is the case of the current node being the first node. 1303 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1304 set_virtual_space_list(vsl->next()); 1305 } else { 1306 prev_vsl->set_next(vsl->next()); 1307 } 1308 1309 vsl->purge(chunk_manager); 1310 dec_reserved_words(vsl->reserved_words()); 1311 dec_committed_words(vsl->committed_words()); 1312 dec_virtual_space_count(); 1313 purged_vsl = vsl; 1314 delete vsl; 1315 } else { 1316 prev_vsl = vsl; 1317 } 1318 } 1319 #ifdef ASSERT 1320 if (purged_vsl != NULL) { 1321 // List should be stable enough to use an iterator here. 1322 VirtualSpaceListIterator iter(virtual_space_list()); 1323 while (iter.repeat()) { 1324 VirtualSpaceNode* vsl = iter.get_next(); 1325 assert(vsl != purged_vsl, "Purge of vsl failed"); 1326 } 1327 } 1328 #endif 1329 } 1330 1331 1332 // This function looks at the mmap regions in the metaspace without locking. 1333 // The chunks are added with store ordering and not deleted except for at 1334 // unloading time during a safepoint. 1335 bool VirtualSpaceList::contains(const void* ptr) { 1336 // List should be stable enough to use an iterator here because removing virtual 1337 // space nodes is only allowed at a safepoint. 1338 VirtualSpaceListIterator iter(virtual_space_list()); 1339 while (iter.repeat()) { 1340 VirtualSpaceNode* vsn = iter.get_next(); 1341 if (vsn->contains(ptr)) { 1342 return true; 1343 } 1344 } 1345 return false; 1346 } 1347 1348 void VirtualSpaceList::retire_current_virtual_space() { 1349 assert_lock_strong(SpaceManager::expand_lock()); 1350 1351 VirtualSpaceNode* vsn = current_virtual_space(); 1352 1353 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1354 Metaspace::chunk_manager_metadata(); 1355 1356 vsn->retire(cm); 1357 } 1358 1359 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1360 DEBUG_ONLY(verify_container_count();) 1361 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1362 ChunkIndex index = (ChunkIndex)i; 1363 size_t chunk_size = chunk_manager->size_by_index(index); 1364 1365 while (free_words_in_vs() >= chunk_size) { 1366 Metachunk* chunk = get_chunk_vs(chunk_size); 1367 assert(chunk != NULL, "allocation should have been successful"); 1368 1369 chunk_manager->return_single_chunk(index, chunk); 1370 } 1371 DEBUG_ONLY(verify_container_count();) 1372 } 1373 assert(free_words_in_vs() == 0, "should be empty now"); 1374 } 1375 1376 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1377 _is_class(false), 1378 _virtual_space_list(NULL), 1379 _current_virtual_space(NULL), 1380 _reserved_words(0), 1381 _committed_words(0), 1382 _virtual_space_count(0) { 1383 MutexLockerEx cl(SpaceManager::expand_lock(), 1384 Mutex::_no_safepoint_check_flag); 1385 create_new_virtual_space(word_size); 1386 } 1387 1388 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1389 _is_class(true), 1390 _virtual_space_list(NULL), 1391 _current_virtual_space(NULL), 1392 _reserved_words(0), 1393 _committed_words(0), 1394 _virtual_space_count(0) { 1395 MutexLockerEx cl(SpaceManager::expand_lock(), 1396 Mutex::_no_safepoint_check_flag); 1397 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1398 bool succeeded = class_entry->initialize(); 1399 if (succeeded) { 1400 link_vs(class_entry); 1401 } 1402 } 1403 1404 size_t VirtualSpaceList::free_bytes() { 1405 return current_virtual_space()->free_words_in_vs() * BytesPerWord; 1406 } 1407 1408 // Allocate another meta virtual space and add it to the list. 1409 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1410 assert_lock_strong(SpaceManager::expand_lock()); 1411 1412 if (is_class()) { 1413 assert(false, "We currently don't support more than one VirtualSpace for" 1414 " the compressed class space. The initialization of the" 1415 " CCS uses another code path and should not hit this path."); 1416 return false; 1417 } 1418 1419 if (vs_word_size == 0) { 1420 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1421 return false; 1422 } 1423 1424 // Reserve the space 1425 size_t vs_byte_size = vs_word_size * BytesPerWord; 1426 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1427 1428 // Allocate the meta virtual space and initialize it. 1429 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1430 if (!new_entry->initialize()) { 1431 delete new_entry; 1432 return false; 1433 } else { 1434 assert(new_entry->reserved_words() == vs_word_size, 1435 "Reserved memory size differs from requested memory size"); 1436 // ensure lock-free iteration sees fully initialized node 1437 OrderAccess::storestore(); 1438 link_vs(new_entry); 1439 return true; 1440 } 1441 } 1442 1443 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1444 if (virtual_space_list() == NULL) { 1445 set_virtual_space_list(new_entry); 1446 } else { 1447 current_virtual_space()->set_next(new_entry); 1448 } 1449 set_current_virtual_space(new_entry); 1450 inc_reserved_words(new_entry->reserved_words()); 1451 inc_committed_words(new_entry->committed_words()); 1452 inc_virtual_space_count(); 1453 #ifdef ASSERT 1454 new_entry->mangle(); 1455 #endif 1456 LogTarget(Trace, gc, metaspace) lt; 1457 if (lt.is_enabled()) { 1458 LogStream ls(lt); 1459 VirtualSpaceNode* vsl = current_virtual_space(); 1460 ResourceMark rm; 1461 vsl->print_on(&ls); 1462 } 1463 } 1464 1465 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1466 size_t min_words, 1467 size_t preferred_words) { 1468 size_t before = node->committed_words(); 1469 1470 bool result = node->expand_by(min_words, preferred_words); 1471 1472 size_t after = node->committed_words(); 1473 1474 // after and before can be the same if the memory was pre-committed. 1475 assert(after >= before, "Inconsistency"); 1476 inc_committed_words(after - before); 1477 1478 return result; 1479 } 1480 1481 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1482 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 1483 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 1484 assert(min_words <= preferred_words, "Invalid arguments"); 1485 1486 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1487 return false; 1488 } 1489 1490 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1491 if (allowed_expansion_words < min_words) { 1492 return false; 1493 } 1494 1495 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1496 1497 // Commit more memory from the the current virtual space. 1498 bool vs_expanded = expand_node_by(current_virtual_space(), 1499 min_words, 1500 max_expansion_words); 1501 if (vs_expanded) { 1502 return true; 1503 } 1504 retire_current_virtual_space(); 1505 1506 // Get another virtual space. 1507 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1508 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1509 1510 if (create_new_virtual_space(grow_vs_words)) { 1511 if (current_virtual_space()->is_pre_committed()) { 1512 // The memory was pre-committed, so we are done here. 1513 assert(min_words <= current_virtual_space()->committed_words(), 1514 "The new VirtualSpace was pre-committed, so it" 1515 "should be large enough to fit the alloc request."); 1516 return true; 1517 } 1518 1519 return expand_node_by(current_virtual_space(), 1520 min_words, 1521 max_expansion_words); 1522 } 1523 1524 return false; 1525 } 1526 1527 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 1528 1529 // Allocate a chunk out of the current virtual space. 1530 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1531 1532 if (next != NULL) { 1533 return next; 1534 } 1535 1536 // The expand amount is currently only determined by the requested sizes 1537 // and not how much committed memory is left in the current virtual space. 1538 1539 size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words()); 1540 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 1541 if (min_word_size >= preferred_word_size) { 1542 // Can happen when humongous chunks are allocated. 1543 preferred_word_size = min_word_size; 1544 } 1545 1546 bool expanded = expand_by(min_word_size, preferred_word_size); 1547 if (expanded) { 1548 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1549 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1550 } 1551 1552 return next; 1553 } 1554 1555 void VirtualSpaceList::print_on(outputStream* st) const { 1556 VirtualSpaceListIterator iter(virtual_space_list()); 1557 while (iter.repeat()) { 1558 VirtualSpaceNode* node = iter.get_next(); 1559 node->print_on(st); 1560 } 1561 } 1562 1563 void VirtualSpaceList::print_map(outputStream* st) const { 1564 VirtualSpaceNode* list = virtual_space_list(); 1565 VirtualSpaceListIterator iter(list); 1566 unsigned i = 0; 1567 while (iter.repeat()) { 1568 st->print_cr("Node %u:", i); 1569 VirtualSpaceNode* node = iter.get_next(); 1570 node->print_map(st, this->is_class()); 1571 i ++; 1572 } 1573 } 1574 1575 // MetaspaceGC methods 1576 1577 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1578 // Within the VM operation after the GC the attempt to allocate the metadata 1579 // should succeed. If the GC did not free enough space for the metaspace 1580 // allocation, the HWM is increased so that another virtualspace will be 1581 // allocated for the metadata. With perm gen the increase in the perm 1582 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1583 // metaspace policy uses those as the small and large steps for the HWM. 1584 // 1585 // After the GC the compute_new_size() for MetaspaceGC is called to 1586 // resize the capacity of the metaspaces. The current implementation 1587 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1588 // to resize the Java heap by some GC's. New flags can be implemented 1589 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1590 // free space is desirable in the metaspace capacity to decide how much 1591 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1592 // free space is desirable in the metaspace capacity before decreasing 1593 // the HWM. 1594 1595 // Calculate the amount to increase the high water mark (HWM). 1596 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1597 // another expansion is not requested too soon. If that is not 1598 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1599 // If that is still not enough, expand by the size of the allocation 1600 // plus some. 1601 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1602 size_t min_delta = MinMetaspaceExpansion; 1603 size_t max_delta = MaxMetaspaceExpansion; 1604 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 1605 1606 if (delta <= min_delta) { 1607 delta = min_delta; 1608 } else if (delta <= max_delta) { 1609 // Don't want to hit the high water mark on the next 1610 // allocation so make the delta greater than just enough 1611 // for this allocation. 1612 delta = max_delta; 1613 } else { 1614 // This allocation is large but the next ones are probably not 1615 // so increase by the minimum. 1616 delta = delta + min_delta; 1617 } 1618 1619 assert_is_aligned(delta, Metaspace::commit_alignment()); 1620 1621 return delta; 1622 } 1623 1624 size_t MetaspaceGC::capacity_until_GC() { 1625 size_t value = OrderAccess::load_acquire(&_capacity_until_GC); 1626 assert(value >= MetaspaceSize, "Not initialized properly?"); 1627 return value; 1628 } 1629 1630 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1631 assert_is_aligned(v, Metaspace::commit_alignment()); 1632 1633 intptr_t capacity_until_GC = _capacity_until_GC; 1634 intptr_t new_value = capacity_until_GC + v; 1635 1636 if (new_value < capacity_until_GC) { 1637 // The addition wrapped around, set new_value to aligned max value. 1638 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 1639 } 1640 1641 intptr_t expected = _capacity_until_GC; 1642 intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected); 1643 1644 if (expected != actual) { 1645 return false; 1646 } 1647 1648 if (new_cap_until_GC != NULL) { 1649 *new_cap_until_GC = new_value; 1650 } 1651 if (old_cap_until_GC != NULL) { 1652 *old_cap_until_GC = capacity_until_GC; 1653 } 1654 return true; 1655 } 1656 1657 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1658 assert_is_aligned(v, Metaspace::commit_alignment()); 1659 1660 return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC); 1661 } 1662 1663 void MetaspaceGC::initialize() { 1664 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1665 // we can't do a GC during initialization. 1666 _capacity_until_GC = MaxMetaspaceSize; 1667 } 1668 1669 void MetaspaceGC::post_initialize() { 1670 // Reset the high-water mark once the VM initialization is done. 1671 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1672 } 1673 1674 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1675 // Check if the compressed class space is full. 1676 if (is_class && Metaspace::using_class_space()) { 1677 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1678 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1679 return false; 1680 } 1681 } 1682 1683 // Check if the user has imposed a limit on the metaspace memory. 1684 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1685 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1686 return false; 1687 } 1688 1689 return true; 1690 } 1691 1692 size_t MetaspaceGC::allowed_expansion() { 1693 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1694 size_t capacity_until_gc = capacity_until_GC(); 1695 1696 assert(capacity_until_gc >= committed_bytes, 1697 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1698 capacity_until_gc, committed_bytes); 1699 1700 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1701 size_t left_until_GC = capacity_until_gc - committed_bytes; 1702 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1703 1704 return left_to_commit / BytesPerWord; 1705 } 1706 1707 void MetaspaceGC::compute_new_size() { 1708 assert(_shrink_factor <= 100, "invalid shrink factor"); 1709 uint current_shrink_factor = _shrink_factor; 1710 _shrink_factor = 0; 1711 1712 // Using committed_bytes() for used_after_gc is an overestimation, since the 1713 // chunk free lists are included in committed_bytes() and the memory in an 1714 // un-fragmented chunk free list is available for future allocations. 1715 // However, if the chunk free lists becomes fragmented, then the memory may 1716 // not be available for future allocations and the memory is therefore "in use". 1717 // Including the chunk free lists in the definition of "in use" is therefore 1718 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1719 // shrink below committed_bytes() and this has caused serious bugs in the past. 1720 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1721 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1722 1723 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1724 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1725 1726 const double min_tmp = used_after_gc / maximum_used_percentage; 1727 size_t minimum_desired_capacity = 1728 (size_t)MIN2(min_tmp, double(max_uintx)); 1729 // Don't shrink less than the initial generation size 1730 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1731 MetaspaceSize); 1732 1733 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1734 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1735 minimum_free_percentage, maximum_used_percentage); 1736 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1737 1738 1739 size_t shrink_bytes = 0; 1740 if (capacity_until_GC < minimum_desired_capacity) { 1741 // If we have less capacity below the metaspace HWM, then 1742 // increment the HWM. 1743 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1744 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 1745 // Don't expand unless it's significant 1746 if (expand_bytes >= MinMetaspaceExpansion) { 1747 size_t new_capacity_until_GC = 0; 1748 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1749 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1750 1751 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1752 new_capacity_until_GC, 1753 MetaspaceGCThresholdUpdater::ComputeNewSize); 1754 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1755 minimum_desired_capacity / (double) K, 1756 expand_bytes / (double) K, 1757 MinMetaspaceExpansion / (double) K, 1758 new_capacity_until_GC / (double) K); 1759 } 1760 return; 1761 } 1762 1763 // No expansion, now see if we want to shrink 1764 // We would never want to shrink more than this 1765 assert(capacity_until_GC >= minimum_desired_capacity, 1766 SIZE_FORMAT " >= " SIZE_FORMAT, 1767 capacity_until_GC, minimum_desired_capacity); 1768 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1769 1770 // Should shrinking be considered? 1771 if (MaxMetaspaceFreeRatio < 100) { 1772 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1773 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1774 const double max_tmp = used_after_gc / minimum_used_percentage; 1775 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1776 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1777 MetaspaceSize); 1778 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1779 maximum_free_percentage, minimum_used_percentage); 1780 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1781 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1782 1783 assert(minimum_desired_capacity <= maximum_desired_capacity, 1784 "sanity check"); 1785 1786 if (capacity_until_GC > maximum_desired_capacity) { 1787 // Capacity too large, compute shrinking size 1788 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1789 // We don't want shrink all the way back to initSize if people call 1790 // System.gc(), because some programs do that between "phases" and then 1791 // we'd just have to grow the heap up again for the next phase. So we 1792 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1793 // on the third call, and 100% by the fourth call. But if we recompute 1794 // size without shrinking, it goes back to 0%. 1795 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1796 1797 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 1798 1799 assert(shrink_bytes <= max_shrink_bytes, 1800 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1801 shrink_bytes, max_shrink_bytes); 1802 if (current_shrink_factor == 0) { 1803 _shrink_factor = 10; 1804 } else { 1805 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1806 } 1807 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1808 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1809 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1810 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1811 } 1812 } 1813 1814 // Don't shrink unless it's significant 1815 if (shrink_bytes >= MinMetaspaceExpansion && 1816 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1817 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1818 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1819 new_capacity_until_GC, 1820 MetaspaceGCThresholdUpdater::ComputeNewSize); 1821 } 1822 } 1823 1824 // Metadebug methods 1825 1826 void Metadebug::init_allocation_fail_alot_count() { 1827 if (MetadataAllocationFailALot) { 1828 _allocation_fail_alot_count = 1829 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1830 } 1831 } 1832 1833 #ifdef ASSERT 1834 bool Metadebug::test_metadata_failure() { 1835 if (MetadataAllocationFailALot && 1836 Threads::is_vm_complete()) { 1837 if (_allocation_fail_alot_count > 0) { 1838 _allocation_fail_alot_count--; 1839 } else { 1840 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1841 init_allocation_fail_alot_count(); 1842 return true; 1843 } 1844 } 1845 return false; 1846 } 1847 #endif 1848 1849 // ChunkManager methods 1850 size_t ChunkManager::free_chunks_total_words() { 1851 return _free_chunks_total_words; 1852 } 1853 1854 size_t ChunkManager::free_chunks_total_bytes() { 1855 return free_chunks_total_words() * BytesPerWord; 1856 } 1857 1858 // Update internal accounting after a chunk was added 1859 void ChunkManager::account_for_added_chunk(const Metachunk* c) { 1860 assert_lock_strong(SpaceManager::expand_lock()); 1861 _free_chunks_count ++; 1862 _free_chunks_total_words += c->word_size(); 1863 } 1864 1865 // Update internal accounting after a chunk was removed 1866 void ChunkManager::account_for_removed_chunk(const Metachunk* c) { 1867 assert_lock_strong(SpaceManager::expand_lock()); 1868 assert(_free_chunks_count >= 1, 1869 "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); 1870 assert(_free_chunks_total_words >= c->word_size(), 1871 "ChunkManager::_free_chunks_total: about to go negative" 1872 "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total_words, c->word_size()); 1873 _free_chunks_count --; 1874 _free_chunks_total_words -= c->word_size(); 1875 } 1876 1877 size_t ChunkManager::free_chunks_count() { 1878 #ifdef ASSERT 1879 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1880 MutexLockerEx cl(SpaceManager::expand_lock(), 1881 Mutex::_no_safepoint_check_flag); 1882 // This lock is only needed in debug because the verification 1883 // of the _free_chunks_totals walks the list of free chunks 1884 slow_locked_verify_free_chunks_count(); 1885 } 1886 #endif 1887 return _free_chunks_count; 1888 } 1889 1890 ChunkIndex ChunkManager::list_index(size_t size) { 1891 if (size_by_index(SpecializedIndex) == size) { 1892 return SpecializedIndex; 1893 } 1894 if (size_by_index(SmallIndex) == size) { 1895 return SmallIndex; 1896 } 1897 const size_t med_size = size_by_index(MediumIndex); 1898 if (med_size == size) { 1899 return MediumIndex; 1900 } 1901 1902 assert(size > med_size, "Not a humongous chunk"); 1903 return HumongousIndex; 1904 } 1905 1906 size_t ChunkManager::size_by_index(ChunkIndex index) const { 1907 index_bounds_check(index); 1908 assert(index != HumongousIndex, "Do not call for humongous chunks."); 1909 return _free_chunks[index].size(); 1910 } 1911 1912 void ChunkManager::locked_verify_free_chunks_total() { 1913 assert_lock_strong(SpaceManager::expand_lock()); 1914 assert(sum_free_chunks() == _free_chunks_total_words, 1915 "_free_chunks_total " SIZE_FORMAT " is not the" 1916 " same as sum " SIZE_FORMAT, _free_chunks_total_words, 1917 sum_free_chunks()); 1918 } 1919 1920 void ChunkManager::locked_verify_free_chunks_count() { 1921 assert_lock_strong(SpaceManager::expand_lock()); 1922 assert(sum_free_chunks_count() == _free_chunks_count, 1923 "_free_chunks_count " SIZE_FORMAT " is not the" 1924 " same as sum " SIZE_FORMAT, _free_chunks_count, 1925 sum_free_chunks_count()); 1926 } 1927 1928 void ChunkManager::verify() { 1929 MutexLockerEx cl(SpaceManager::expand_lock(), 1930 Mutex::_no_safepoint_check_flag); 1931 locked_verify(); 1932 } 1933 1934 void ChunkManager::locked_verify() { 1935 locked_verify_free_chunks_count(); 1936 locked_verify_free_chunks_total(); 1937 } 1938 1939 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1940 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 1941 "Bad index: %d", (int)index); 1942 1943 return &_free_chunks[index]; 1944 } 1945 1946 // These methods that sum the free chunk lists are used in printing 1947 // methods that are used in product builds. 1948 size_t ChunkManager::sum_free_chunks() { 1949 assert_lock_strong(SpaceManager::expand_lock()); 1950 size_t result = 0; 1951 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1952 ChunkList* list = free_chunks(i); 1953 1954 if (list == NULL) { 1955 continue; 1956 } 1957 1958 result = result + list->count() * list->size(); 1959 } 1960 result = result + humongous_dictionary()->total_size(); 1961 return result; 1962 } 1963 1964 size_t ChunkManager::sum_free_chunks_count() { 1965 assert_lock_strong(SpaceManager::expand_lock()); 1966 size_t count = 0; 1967 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1968 ChunkList* list = free_chunks(i); 1969 if (list == NULL) { 1970 continue; 1971 } 1972 count = count + list->count(); 1973 } 1974 count = count + humongous_dictionary()->total_free_blocks(); 1975 return count; 1976 } 1977 1978 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1979 ChunkIndex index = list_index(word_size); 1980 assert(index < HumongousIndex, "No humongous list"); 1981 return free_chunks(index); 1982 } 1983 1984 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1985 assert_lock_strong(SpaceManager::expand_lock()); 1986 1987 slow_locked_verify(); 1988 1989 Metachunk* chunk = NULL; 1990 if (list_index(word_size) != HumongousIndex) { 1991 ChunkList* free_list = find_free_chunks_list(word_size); 1992 assert(free_list != NULL, "Sanity check"); 1993 1994 chunk = free_list->head(); 1995 1996 if (chunk == NULL) { 1997 return NULL; 1998 } 1999 2000 // Remove the chunk as the head of the list. 2001 free_list->remove_chunk(chunk); 2002 2003 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 2004 p2i(free_list), p2i(chunk), chunk->word_size()); 2005 } else { 2006 chunk = humongous_dictionary()->get_chunk(word_size); 2007 2008 if (chunk == NULL) { 2009 return NULL; 2010 } 2011 2012 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 2013 chunk->word_size(), word_size, chunk->word_size() - word_size); 2014 } 2015 2016 // Chunk has been removed from the chunk manager; update counters. 2017 account_for_removed_chunk(chunk); 2018 2019 // Remove it from the links to this freelist 2020 chunk->set_next(NULL); 2021 chunk->set_prev(NULL); 2022 2023 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 2024 // work. 2025 chunk->set_is_tagged_free(false); 2026 chunk->container()->inc_container_count(); 2027 2028 slow_locked_verify(); 2029 return chunk; 2030 } 2031 2032 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 2033 assert_lock_strong(SpaceManager::expand_lock()); 2034 slow_locked_verify(); 2035 2036 // Take from the beginning of the list 2037 Metachunk* chunk = free_chunks_get(word_size); 2038 if (chunk == NULL) { 2039 return NULL; 2040 } 2041 2042 assert((word_size <= chunk->word_size()) || 2043 (list_index(chunk->word_size()) == HumongousIndex), 2044 "Non-humongous variable sized chunk"); 2045 LogTarget(Debug, gc, metaspace, freelist) lt; 2046 if (lt.is_enabled()) { 2047 size_t list_count; 2048 if (list_index(word_size) < HumongousIndex) { 2049 ChunkList* list = find_free_chunks_list(word_size); 2050 list_count = list->count(); 2051 } else { 2052 list_count = humongous_dictionary()->total_count(); 2053 } 2054 LogStream ls(lt); 2055 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 2056 p2i(this), p2i(chunk), chunk->word_size(), list_count); 2057 ResourceMark rm; 2058 locked_print_free_chunks(&ls); 2059 } 2060 2061 return chunk; 2062 } 2063 2064 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { 2065 assert_lock_strong(SpaceManager::expand_lock()); 2066 assert(chunk != NULL, "Expected chunk."); 2067 assert(chunk->container() != NULL, "Container should have been set."); 2068 assert(chunk->is_tagged_free() == false, "Chunk should be in use."); 2069 index_bounds_check(index); 2070 2071 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not 2072 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary 2073 // keeps tree node pointers in the chunk payload area which mangle will overwrite. 2074 NOT_PRODUCT(chunk->mangle(badMetaWordVal);) 2075 2076 if (index != HumongousIndex) { 2077 // Return non-humongous chunk to freelist. 2078 ChunkList* list = free_chunks(index); 2079 assert(list->size() == chunk->word_size(), "Wrong chunk type."); 2080 list->return_chunk_at_head(chunk); 2081 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", 2082 chunk_size_name(index), p2i(chunk)); 2083 } else { 2084 // Return humongous chunk to dictionary. 2085 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); 2086 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, 2087 "Humongous chunk has wrong alignment."); 2088 _humongous_dictionary.return_chunk(chunk); 2089 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", 2090 chunk_size_name(index), p2i(chunk), chunk->word_size()); 2091 } 2092 chunk->container()->dec_container_count(); 2093 chunk->set_is_tagged_free(true); 2094 2095 // Chunk has been added; update counters. 2096 account_for_added_chunk(chunk); 2097 2098 } 2099 2100 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { 2101 index_bounds_check(index); 2102 if (chunks == NULL) { 2103 return; 2104 } 2105 LogTarget(Trace, gc, metaspace, freelist) log; 2106 if (log.is_enabled()) { // tracing 2107 log.print("returning list of %s chunks...", chunk_size_name(index)); 2108 } 2109 unsigned num_chunks_returned = 0; 2110 size_t size_chunks_returned = 0; 2111 Metachunk* cur = chunks; 2112 while (cur != NULL) { 2113 // Capture the next link before it is changed 2114 // by the call to return_chunk_at_head(); 2115 Metachunk* next = cur->next(); 2116 if (log.is_enabled()) { // tracing 2117 num_chunks_returned ++; 2118 size_chunks_returned += cur->word_size(); 2119 } 2120 return_single_chunk(index, cur); 2121 cur = next; 2122 } 2123 if (log.is_enabled()) { // tracing 2124 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".", 2125 num_chunks_returned, chunk_size_name(index), size_chunks_returned); 2126 if (index != HumongousIndex) { 2127 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size()); 2128 } else { 2129 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count()); 2130 } 2131 } 2132 } 2133 2134 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const { 2135 assert_lock_strong(SpaceManager::expand_lock()); 2136 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 2137 stat->num_by_type[i] = num_free_chunks(i); 2138 stat->single_size_by_type[i] = size_by_index(i); 2139 stat->total_size_by_type[i] = size_free_chunks_in_bytes(i); 2140 } 2141 stat->num_humongous_chunks = num_free_chunks(HumongousIndex); 2142 stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex); 2143 } 2144 2145 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const { 2146 MutexLockerEx cl(SpaceManager::expand_lock(), 2147 Mutex::_no_safepoint_check_flag); 2148 locked_get_statistics(stat); 2149 } 2150 2151 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) { 2152 size_t total = 0; 2153 assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale"); 2154 2155 const char* unit = scale_unit(scale); 2156 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 2157 out->print(" " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ", 2158 stat->num_by_type[i], chunk_size_name(i), 2159 stat->single_size_by_type[i]); 2160 if (scale == 1) { 2161 out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]); 2162 } else { 2163 out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit); 2164 } 2165 2166 total += stat->total_size_by_type[i]; 2167 } 2168 2169 2170 total += stat->total_size_humongous_chunks; 2171 2172 if (scale == 1) { 2173 out->print_cr(" " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes", 2174 stat->num_humongous_chunks, stat->total_size_humongous_chunks); 2175 2176 out->print_cr(" total size: " SIZE_FORMAT " bytes.", total); 2177 } else { 2178 out->print_cr(" " SIZE_FORMAT " humongous chunks, total %.2f%s", 2179 stat->num_humongous_chunks, 2180 (float)stat->total_size_humongous_chunks / scale, unit); 2181 2182 out->print_cr(" total size: %.2f%s.", (float)total / scale, unit); 2183 } 2184 2185 } 2186 2187 void ChunkManager::locked_print_on(outputStream* out, size_t scale = 1) const { 2188 assert_lock_strong(SpaceManager::expand_lock()); 2189 ChunkManagerStatistics stat; 2190 locked_get_statistics(&stat); 2191 print_statistics(&stat, out, scale); 2192 } 2193 2194 void ChunkManager::print_on(outputStream* out, size_t scale = 1) const { 2195 ChunkManagerStatistics stat; 2196 { 2197 // Print out of lock protection 2198 MutexLockerEx cl(SpaceManager::expand_lock(), 2199 Mutex::_no_safepoint_check_flag); 2200 locked_get_statistics(&stat); 2201 } 2202 print_statistics(&stat, out, scale); 2203 } 2204 2205 void ChunkManager::print_all_chunkmanagers_on(outputStream* out, size_t scale) { 2206 assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale"); 2207 2208 out->print_cr("Chunkmanager (non-class):"); 2209 const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata(); 2210 if (non_class_cm != NULL) { 2211 non_class_cm->print_on(out, scale); 2212 } else { 2213 out->print_cr("unavailable."); 2214 } 2215 out->print_cr("Chunkmanager (class):"); 2216 const ChunkManager* const class_cm = Metaspace::chunk_manager_class(); 2217 if (class_cm != NULL) { 2218 class_cm->print_on(out, scale); 2219 } else { 2220 out->print_cr("unavailable."); 2221 } 2222 } 2223 2224 // SpaceManager methods 2225 2226 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { 2227 size_t chunk_sizes[] = { 2228 specialized_chunk_size(is_class_space), 2229 small_chunk_size(is_class_space), 2230 medium_chunk_size(is_class_space) 2231 }; 2232 2233 // Adjust up to one of the fixed chunk sizes ... 2234 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { 2235 if (requested <= chunk_sizes[i]) { 2236 return chunk_sizes[i]; 2237 } 2238 } 2239 2240 // ... or return the size as a humongous chunk. 2241 return requested; 2242 } 2243 2244 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { 2245 return adjust_initial_chunk_size(requested, is_class()); 2246 } 2247 2248 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { 2249 size_t requested; 2250 2251 if (is_class()) { 2252 switch (type) { 2253 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; 2254 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break; 2255 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; 2256 default: requested = ClassSmallChunk; break; 2257 } 2258 } else { 2259 switch (type) { 2260 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; 2261 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break; 2262 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; 2263 default: requested = SmallChunk; break; 2264 } 2265 } 2266 2267 // Adjust to one of the fixed chunk sizes (unless humongous) 2268 const size_t adjusted = adjust_initial_chunk_size(requested); 2269 2270 assert(adjusted != 0, "Incorrect initial chunk size. Requested: " 2271 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted); 2272 2273 return adjusted; 2274 } 2275 2276 size_t SpaceManager::sum_free_in_chunks_in_use() const { 2277 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2278 size_t free = 0; 2279 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2280 Metachunk* chunk = chunks_in_use(i); 2281 while (chunk != NULL) { 2282 free += chunk->free_word_size(); 2283 chunk = chunk->next(); 2284 } 2285 } 2286 return free; 2287 } 2288 2289 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 2290 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2291 size_t result = 0; 2292 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2293 result += sum_waste_in_chunks_in_use(i); 2294 } 2295 2296 return result; 2297 } 2298 2299 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 2300 size_t result = 0; 2301 Metachunk* chunk = chunks_in_use(index); 2302 // Count the free space in all the chunk but not the 2303 // current chunk from which allocations are still being done. 2304 while (chunk != NULL) { 2305 if (chunk != current_chunk()) { 2306 result += chunk->free_word_size(); 2307 } 2308 chunk = chunk->next(); 2309 } 2310 return result; 2311 } 2312 2313 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 2314 // For CMS use "allocated_chunks_words()" which does not need the 2315 // Metaspace lock. For the other collectors sum over the 2316 // lists. Use both methods as a check that "allocated_chunks_words()" 2317 // is correct. That is, sum_capacity_in_chunks() is too expensive 2318 // to use in the product and allocated_chunks_words() should be used 2319 // but allow for checking that allocated_chunks_words() returns the same 2320 // value as sum_capacity_in_chunks_in_use() which is the definitive 2321 // answer. 2322 if (UseConcMarkSweepGC) { 2323 return allocated_chunks_words(); 2324 } else { 2325 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2326 size_t sum = 0; 2327 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2328 Metachunk* chunk = chunks_in_use(i); 2329 while (chunk != NULL) { 2330 sum += chunk->word_size(); 2331 chunk = chunk->next(); 2332 } 2333 } 2334 return sum; 2335 } 2336 } 2337 2338 size_t SpaceManager::sum_count_in_chunks_in_use() { 2339 size_t count = 0; 2340 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2341 count = count + sum_count_in_chunks_in_use(i); 2342 } 2343 2344 return count; 2345 } 2346 2347 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2348 size_t count = 0; 2349 Metachunk* chunk = chunks_in_use(i); 2350 while (chunk != NULL) { 2351 count++; 2352 chunk = chunk->next(); 2353 } 2354 return count; 2355 } 2356 2357 2358 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2359 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2360 size_t used = 0; 2361 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2362 Metachunk* chunk = chunks_in_use(i); 2363 while (chunk != NULL) { 2364 used += chunk->used_word_size(); 2365 chunk = chunk->next(); 2366 } 2367 } 2368 return used; 2369 } 2370 2371 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2372 2373 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2374 Metachunk* chunk = chunks_in_use(i); 2375 st->print("SpaceManager: %s " PTR_FORMAT, 2376 chunk_size_name(i), p2i(chunk)); 2377 if (chunk != NULL) { 2378 st->print_cr(" free " SIZE_FORMAT, 2379 chunk->free_word_size()); 2380 } else { 2381 st->cr(); 2382 } 2383 } 2384 2385 chunk_manager()->print_on(st, 1024); 2386 } 2387 2388 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2389 2390 // Decide between a small chunk and a medium chunk. Up to 2391 // _small_chunk_limit small chunks can be allocated. 2392 // After that a medium chunk is preferred. 2393 size_t chunk_word_size; 2394 2395 // Special case for anonymous metadata space. 2396 // Anonymous metadata space is usually small, with majority within 1K - 2K range and 2397 // rarely about 4K (64-bits JVM). 2398 // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation 2399 // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste 2400 // from 60+% to around 30%. 2401 if (_space_type == Metaspace::AnonymousMetaspaceType && 2402 _mdtype == Metaspace::NonClassType && 2403 sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit && 2404 word_size + Metachunk::overhead() <= SpecializedChunk) { 2405 return SpecializedChunk; 2406 } 2407 2408 if (chunks_in_use(MediumIndex) == NULL && 2409 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2410 chunk_word_size = (size_t) small_chunk_size(); 2411 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2412 chunk_word_size = medium_chunk_size(); 2413 } 2414 } else { 2415 chunk_word_size = medium_chunk_size(); 2416 } 2417 2418 // Might still need a humongous chunk. Enforce 2419 // humongous allocations sizes to be aligned up to 2420 // the smallest chunk size. 2421 size_t if_humongous_sized_chunk = 2422 align_up(word_size + Metachunk::overhead(), 2423 smallest_chunk_size()); 2424 chunk_word_size = 2425 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2426 2427 assert(!SpaceManager::is_humongous(word_size) || 2428 chunk_word_size == if_humongous_sized_chunk, 2429 "Size calculation is wrong, word_size " SIZE_FORMAT 2430 " chunk_word_size " SIZE_FORMAT, 2431 word_size, chunk_word_size); 2432 Log(gc, metaspace, alloc) log; 2433 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2434 log.debug("Metadata humongous allocation:"); 2435 log.debug(" word_size " PTR_FORMAT, word_size); 2436 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2437 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2438 } 2439 return chunk_word_size; 2440 } 2441 2442 void SpaceManager::track_metaspace_memory_usage() { 2443 if (is_init_completed()) { 2444 if (is_class()) { 2445 MemoryService::track_compressed_class_memory_usage(); 2446 } 2447 MemoryService::track_metaspace_memory_usage(); 2448 } 2449 } 2450 2451 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2452 assert(vs_list()->current_virtual_space() != NULL, 2453 "Should have been set"); 2454 assert(current_chunk() == NULL || 2455 current_chunk()->allocate(word_size) == NULL, 2456 "Don't need to expand"); 2457 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2458 2459 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2460 size_t words_left = 0; 2461 size_t words_used = 0; 2462 if (current_chunk() != NULL) { 2463 words_left = current_chunk()->free_word_size(); 2464 words_used = current_chunk()->used_word_size(); 2465 } 2466 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2467 word_size, words_used, words_left); 2468 } 2469 2470 // Get another chunk 2471 size_t chunk_word_size = calc_chunk_size(word_size); 2472 Metachunk* next = get_new_chunk(chunk_word_size); 2473 2474 MetaWord* mem = NULL; 2475 2476 // If a chunk was available, add it to the in-use chunk list 2477 // and do an allocation from it. 2478 if (next != NULL) { 2479 // Add to this manager's list of chunks in use. 2480 add_chunk(next, false); 2481 mem = next->allocate(word_size); 2482 } 2483 2484 // Track metaspace memory usage statistic. 2485 track_metaspace_memory_usage(); 2486 2487 return mem; 2488 } 2489 2490 void SpaceManager::print_on(outputStream* st) const { 2491 2492 for (ChunkIndex i = ZeroIndex; 2493 i < NumberOfInUseLists ; 2494 i = next_chunk_index(i) ) { 2495 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2496 p2i(chunks_in_use(i)), 2497 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2498 } 2499 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2500 " Humongous " SIZE_FORMAT, 2501 sum_waste_in_chunks_in_use(SmallIndex), 2502 sum_waste_in_chunks_in_use(MediumIndex), 2503 sum_waste_in_chunks_in_use(HumongousIndex)); 2504 // block free lists 2505 if (block_freelists() != NULL) { 2506 st->print_cr("total in block free lists " SIZE_FORMAT, 2507 block_freelists()->total_size()); 2508 } 2509 } 2510 2511 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2512 Metaspace::MetaspaceType space_type, 2513 Mutex* lock) : 2514 _mdtype(mdtype), 2515 _space_type(space_type), 2516 _allocated_blocks_words(0), 2517 _allocated_chunks_words(0), 2518 _allocated_chunks_count(0), 2519 _block_freelists(NULL), 2520 _lock(lock) 2521 { 2522 initialize(); 2523 } 2524 2525 void SpaceManager::inc_size_metrics(size_t words) { 2526 assert_lock_strong(SpaceManager::expand_lock()); 2527 // Total of allocated Metachunks and allocated Metachunks count 2528 // for each SpaceManager 2529 _allocated_chunks_words = _allocated_chunks_words + words; 2530 _allocated_chunks_count++; 2531 // Global total of capacity in allocated Metachunks 2532 MetaspaceAux::inc_capacity(mdtype(), words); 2533 // Global total of allocated Metablocks. 2534 // used_words_slow() includes the overhead in each 2535 // Metachunk so include it in the used when the 2536 // Metachunk is first added (so only added once per 2537 // Metachunk). 2538 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2539 } 2540 2541 void SpaceManager::inc_used_metrics(size_t words) { 2542 // Add to the per SpaceManager total 2543 Atomic::add(words, &_allocated_blocks_words); 2544 // Add to the global total 2545 MetaspaceAux::inc_used(mdtype(), words); 2546 } 2547 2548 void SpaceManager::dec_total_from_size_metrics() { 2549 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2550 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2551 // Also deduct the overhead per Metachunk 2552 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2553 } 2554 2555 void SpaceManager::initialize() { 2556 Metadebug::init_allocation_fail_alot_count(); 2557 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2558 _chunks_in_use[i] = NULL; 2559 } 2560 _current_chunk = NULL; 2561 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2562 } 2563 2564 SpaceManager::~SpaceManager() { 2565 // This call this->_lock which can't be done while holding expand_lock() 2566 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2567 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2568 " allocated_chunks_words() " SIZE_FORMAT, 2569 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2570 2571 MutexLockerEx fcl(SpaceManager::expand_lock(), 2572 Mutex::_no_safepoint_check_flag); 2573 2574 chunk_manager()->slow_locked_verify(); 2575 2576 dec_total_from_size_metrics(); 2577 2578 Log(gc, metaspace, freelist) log; 2579 if (log.is_trace()) { 2580 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2581 ResourceMark rm; 2582 LogStream ls(log.trace()); 2583 locked_print_chunks_in_use_on(&ls); 2584 if (block_freelists() != NULL) { 2585 block_freelists()->print_on(&ls); 2586 } 2587 } 2588 2589 // Add all the chunks in use by this space manager 2590 // to the global list of free chunks. 2591 2592 // Follow each list of chunks-in-use and add them to the 2593 // free lists. Each list is NULL terminated. 2594 2595 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) { 2596 Metachunk* chunks = chunks_in_use(i); 2597 chunk_manager()->return_chunk_list(i, chunks); 2598 set_chunks_in_use(i, NULL); 2599 } 2600 2601 chunk_manager()->slow_locked_verify(); 2602 2603 if (_block_freelists != NULL) { 2604 delete _block_freelists; 2605 } 2606 } 2607 2608 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2609 assert_lock_strong(_lock); 2610 // Allocations and deallocations are in raw_word_size 2611 size_t raw_word_size = get_allocation_word_size(word_size); 2612 // Lazily create a block_freelist 2613 if (block_freelists() == NULL) { 2614 _block_freelists = new BlockFreelist(); 2615 } 2616 block_freelists()->return_block(p, raw_word_size); 2617 } 2618 2619 // Adds a chunk to the list of chunks in use. 2620 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2621 2622 assert(new_chunk != NULL, "Should not be NULL"); 2623 assert(new_chunk->next() == NULL, "Should not be on a list"); 2624 2625 new_chunk->reset_empty(); 2626 2627 // Find the correct list and and set the current 2628 // chunk for that list. 2629 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2630 2631 if (index != HumongousIndex) { 2632 retire_current_chunk(); 2633 set_current_chunk(new_chunk); 2634 new_chunk->set_next(chunks_in_use(index)); 2635 set_chunks_in_use(index, new_chunk); 2636 } else { 2637 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2638 // small, so small will be null. Link this first chunk as the current 2639 // chunk. 2640 if (make_current) { 2641 // Set as the current chunk but otherwise treat as a humongous chunk. 2642 set_current_chunk(new_chunk); 2643 } 2644 // Link at head. The _current_chunk only points to a humongous chunk for 2645 // the null class loader metaspace (class and data virtual space managers) 2646 // any humongous chunks so will not point to the tail 2647 // of the humongous chunks list. 2648 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2649 set_chunks_in_use(HumongousIndex, new_chunk); 2650 2651 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2652 } 2653 2654 // Add to the running sum of capacity 2655 inc_size_metrics(new_chunk->word_size()); 2656 2657 assert(new_chunk->is_empty(), "Not ready for reuse"); 2658 Log(gc, metaspace, freelist) log; 2659 if (log.is_trace()) { 2660 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2661 ResourceMark rm; 2662 LogStream ls(log.trace()); 2663 new_chunk->print_on(&ls); 2664 chunk_manager()->locked_print_free_chunks(&ls); 2665 } 2666 } 2667 2668 void SpaceManager::retire_current_chunk() { 2669 if (current_chunk() != NULL) { 2670 size_t remaining_words = current_chunk()->free_word_size(); 2671 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2672 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2673 deallocate(ptr, remaining_words); 2674 inc_used_metrics(remaining_words); 2675 } 2676 } 2677 } 2678 2679 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { 2680 // Get a chunk from the chunk freelist 2681 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); 2682 2683 if (next == NULL) { 2684 next = vs_list()->get_new_chunk(chunk_word_size, 2685 medium_chunk_bunch()); 2686 } 2687 2688 Log(gc, metaspace, alloc) log; 2689 if (log.is_debug() && next != NULL && 2690 SpaceManager::is_humongous(next->word_size())) { 2691 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2692 } 2693 2694 return next; 2695 } 2696 2697 /* 2698 * The policy is to allocate up to _small_chunk_limit small chunks 2699 * after which only medium chunks are allocated. This is done to 2700 * reduce fragmentation. In some cases, this can result in a lot 2701 * of small chunks being allocated to the point where it's not 2702 * possible to expand. If this happens, there may be no medium chunks 2703 * available and OOME would be thrown. Instead of doing that, 2704 * if the allocation request size fits in a small chunk, an attempt 2705 * will be made to allocate a small chunk. 2706 */ 2707 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2708 size_t raw_word_size = get_allocation_word_size(word_size); 2709 2710 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2711 return NULL; 2712 } 2713 2714 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2715 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2716 2717 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2718 2719 MetaWord* mem = NULL; 2720 2721 if (chunk != NULL) { 2722 // Add chunk to the in-use chunk list and do an allocation from it. 2723 // Add to this manager's list of chunks in use. 2724 add_chunk(chunk, false); 2725 mem = chunk->allocate(raw_word_size); 2726 2727 inc_used_metrics(raw_word_size); 2728 2729 // Track metaspace memory usage statistic. 2730 track_metaspace_memory_usage(); 2731 } 2732 2733 return mem; 2734 } 2735 2736 MetaWord* SpaceManager::allocate(size_t word_size) { 2737 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2738 size_t raw_word_size = get_allocation_word_size(word_size); 2739 BlockFreelist* fl = block_freelists(); 2740 MetaWord* p = NULL; 2741 // Allocation from the dictionary is expensive in the sense that 2742 // the dictionary has to be searched for a size. Don't allocate 2743 // from the dictionary until it starts to get fat. Is this 2744 // a reasonable policy? Maybe an skinny dictionary is fast enough 2745 // for allocations. Do some profiling. JJJ 2746 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2747 p = fl->get_block(raw_word_size); 2748 } 2749 if (p == NULL) { 2750 p = allocate_work(raw_word_size); 2751 } 2752 2753 return p; 2754 } 2755 2756 // Returns the address of spaced allocated for "word_size". 2757 // This methods does not know about blocks (Metablocks) 2758 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2759 assert_lock_strong(_lock); 2760 #ifdef ASSERT 2761 if (Metadebug::test_metadata_failure()) { 2762 return NULL; 2763 } 2764 #endif 2765 // Is there space in the current chunk? 2766 MetaWord* result = NULL; 2767 2768 if (current_chunk() != NULL) { 2769 result = current_chunk()->allocate(word_size); 2770 } 2771 2772 if (result == NULL) { 2773 result = grow_and_allocate(word_size); 2774 } 2775 2776 if (result != NULL) { 2777 inc_used_metrics(word_size); 2778 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2779 "Head of the list is being allocated"); 2780 } 2781 2782 return result; 2783 } 2784 2785 void SpaceManager::verify() { 2786 // If there are blocks in the dictionary, then 2787 // verification of chunks does not work since 2788 // being in the dictionary alters a chunk. 2789 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2790 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2791 Metachunk* curr = chunks_in_use(i); 2792 while (curr != NULL) { 2793 curr->verify(); 2794 verify_chunk_size(curr); 2795 curr = curr->next(); 2796 } 2797 } 2798 } 2799 } 2800 2801 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2802 assert(is_humongous(chunk->word_size()) || 2803 chunk->word_size() == medium_chunk_size() || 2804 chunk->word_size() == small_chunk_size() || 2805 chunk->word_size() == specialized_chunk_size(), 2806 "Chunk size is wrong"); 2807 return; 2808 } 2809 2810 #ifdef ASSERT 2811 void SpaceManager::verify_allocated_blocks_words() { 2812 // Verification is only guaranteed at a safepoint. 2813 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2814 "Verification can fail if the applications is running"); 2815 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2816 "allocation total is not consistent " SIZE_FORMAT 2817 " vs " SIZE_FORMAT, 2818 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2819 } 2820 2821 #endif 2822 2823 void SpaceManager::dump(outputStream* const out) const { 2824 size_t curr_total = 0; 2825 size_t waste = 0; 2826 uint i = 0; 2827 size_t used = 0; 2828 size_t capacity = 0; 2829 2830 // Add up statistics for all chunks in this SpaceManager. 2831 for (ChunkIndex index = ZeroIndex; 2832 index < NumberOfInUseLists; 2833 index = next_chunk_index(index)) { 2834 for (Metachunk* curr = chunks_in_use(index); 2835 curr != NULL; 2836 curr = curr->next()) { 2837 out->print("%d) ", i++); 2838 curr->print_on(out); 2839 curr_total += curr->word_size(); 2840 used += curr->used_word_size(); 2841 capacity += curr->word_size(); 2842 waste += curr->free_word_size() + curr->overhead();; 2843 } 2844 } 2845 2846 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2847 if (block_freelists() != NULL) block_freelists()->print_on(out); 2848 } 2849 2850 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2851 // Free space isn't wasted. 2852 waste -= free; 2853 2854 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2855 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2856 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2857 } 2858 2859 // MetaspaceAux 2860 2861 2862 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2863 volatile size_t MetaspaceAux::_used_words[] = {0, 0}; 2864 2865 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2866 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2867 return list == NULL ? 0 : list->free_bytes(); 2868 } 2869 2870 size_t MetaspaceAux::free_bytes() { 2871 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2872 } 2873 2874 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2875 assert_lock_strong(SpaceManager::expand_lock()); 2876 assert(words <= capacity_words(mdtype), 2877 "About to decrement below 0: words " SIZE_FORMAT 2878 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2879 words, mdtype, capacity_words(mdtype)); 2880 _capacity_words[mdtype] -= words; 2881 } 2882 2883 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2884 assert_lock_strong(SpaceManager::expand_lock()); 2885 // Needs to be atomic 2886 _capacity_words[mdtype] += words; 2887 } 2888 2889 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2890 assert(words <= used_words(mdtype), 2891 "About to decrement below 0: words " SIZE_FORMAT 2892 " is greater than _used_words[%u] " SIZE_FORMAT, 2893 words, mdtype, used_words(mdtype)); 2894 // For CMS deallocation of the Metaspaces occurs during the 2895 // sweep which is a concurrent phase. Protection by the expand_lock() 2896 // is not enough since allocation is on a per Metaspace basis 2897 // and protected by the Metaspace lock. 2898 Atomic::sub(words, &_used_words[mdtype]); 2899 } 2900 2901 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2902 // _used_words tracks allocations for 2903 // each piece of metadata. Those allocations are 2904 // generally done concurrently by different application 2905 // threads so must be done atomically. 2906 Atomic::add(words, &_used_words[mdtype]); 2907 } 2908 2909 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2910 size_t used = 0; 2911 ClassLoaderDataGraphMetaspaceIterator iter; 2912 while (iter.repeat()) { 2913 Metaspace* msp = iter.get_next(); 2914 // Sum allocated_blocks_words for each metaspace 2915 if (msp != NULL) { 2916 used += msp->used_words_slow(mdtype); 2917 } 2918 } 2919 return used * BytesPerWord; 2920 } 2921 2922 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2923 size_t free = 0; 2924 ClassLoaderDataGraphMetaspaceIterator iter; 2925 while (iter.repeat()) { 2926 Metaspace* msp = iter.get_next(); 2927 if (msp != NULL) { 2928 free += msp->free_words_slow(mdtype); 2929 } 2930 } 2931 return free * BytesPerWord; 2932 } 2933 2934 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2935 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2936 return 0; 2937 } 2938 // Don't count the space in the freelists. That space will be 2939 // added to the capacity calculation as needed. 2940 size_t capacity = 0; 2941 ClassLoaderDataGraphMetaspaceIterator iter; 2942 while (iter.repeat()) { 2943 Metaspace* msp = iter.get_next(); 2944 if (msp != NULL) { 2945 capacity += msp->capacity_words_slow(mdtype); 2946 } 2947 } 2948 return capacity * BytesPerWord; 2949 } 2950 2951 size_t MetaspaceAux::capacity_bytes_slow() { 2952 #ifdef PRODUCT 2953 // Use capacity_bytes() in PRODUCT instead of this function. 2954 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2955 #endif 2956 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2957 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2958 assert(capacity_bytes() == class_capacity + non_class_capacity, 2959 "bad accounting: capacity_bytes() " SIZE_FORMAT 2960 " class_capacity + non_class_capacity " SIZE_FORMAT 2961 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2962 capacity_bytes(), class_capacity + non_class_capacity, 2963 class_capacity, non_class_capacity); 2964 2965 return class_capacity + non_class_capacity; 2966 } 2967 2968 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2969 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2970 return list == NULL ? 0 : list->reserved_bytes(); 2971 } 2972 2973 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2974 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2975 return list == NULL ? 0 : list->committed_bytes(); 2976 } 2977 2978 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2979 2980 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2981 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2982 if (chunk_manager == NULL) { 2983 return 0; 2984 } 2985 chunk_manager->slow_verify(); 2986 return chunk_manager->free_chunks_total_words(); 2987 } 2988 2989 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2990 return free_chunks_total_words(mdtype) * BytesPerWord; 2991 } 2992 2993 size_t MetaspaceAux::free_chunks_total_words() { 2994 return free_chunks_total_words(Metaspace::ClassType) + 2995 free_chunks_total_words(Metaspace::NonClassType); 2996 } 2997 2998 size_t MetaspaceAux::free_chunks_total_bytes() { 2999 return free_chunks_total_words() * BytesPerWord; 3000 } 3001 3002 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 3003 return Metaspace::get_chunk_manager(mdtype) != NULL; 3004 } 3005 3006 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 3007 if (!has_chunk_free_list(mdtype)) { 3008 return MetaspaceChunkFreeListSummary(); 3009 } 3010 3011 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 3012 return cm->chunk_free_list_summary(); 3013 } 3014 3015 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 3016 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 3017 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 3018 } 3019 3020 void MetaspaceAux::print_on(outputStream* out) { 3021 Metaspace::MetadataType nct = Metaspace::NonClassType; 3022 3023 out->print_cr(" Metaspace " 3024 "used " SIZE_FORMAT "K, " 3025 "capacity " SIZE_FORMAT "K, " 3026 "committed " SIZE_FORMAT "K, " 3027 "reserved " SIZE_FORMAT "K", 3028 used_bytes()/K, 3029 capacity_bytes()/K, 3030 committed_bytes()/K, 3031 reserved_bytes()/K); 3032 3033 if (Metaspace::using_class_space()) { 3034 Metaspace::MetadataType ct = Metaspace::ClassType; 3035 out->print_cr(" class space " 3036 "used " SIZE_FORMAT "K, " 3037 "capacity " SIZE_FORMAT "K, " 3038 "committed " SIZE_FORMAT "K, " 3039 "reserved " SIZE_FORMAT "K", 3040 used_bytes(ct)/K, 3041 capacity_bytes(ct)/K, 3042 committed_bytes(ct)/K, 3043 reserved_bytes(ct)/K); 3044 } 3045 } 3046 3047 // Print information for class space and data space separately. 3048 // This is almost the same as above. 3049 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 3050 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 3051 size_t capacity_bytes = capacity_bytes_slow(mdtype); 3052 size_t used_bytes = used_bytes_slow(mdtype); 3053 size_t free_bytes = free_bytes_slow(mdtype); 3054 size_t used_and_free = used_bytes + free_bytes + 3055 free_chunks_capacity_bytes; 3056 out->print_cr(" Chunk accounting: (used in chunks " SIZE_FORMAT 3057 "K + unused in chunks " SIZE_FORMAT "K + " 3058 " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT 3059 "K capacity in allocated chunks " SIZE_FORMAT "K", 3060 used_bytes / K, 3061 free_bytes / K, 3062 free_chunks_capacity_bytes / K, 3063 used_and_free / K, 3064 capacity_bytes / K); 3065 // Accounting can only be correct if we got the values during a safepoint 3066 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 3067 } 3068 3069 // Print total fragmentation for class metaspaces 3070 void MetaspaceAux::print_class_waste(outputStream* out) { 3071 assert(Metaspace::using_class_space(), "class metaspace not used"); 3072 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 3073 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 3074 ClassLoaderDataGraphMetaspaceIterator iter; 3075 while (iter.repeat()) { 3076 Metaspace* msp = iter.get_next(); 3077 if (msp != NULL) { 3078 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 3079 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 3080 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 3081 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 3082 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 3083 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 3084 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 3085 } 3086 } 3087 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 3088 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 3089 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 3090 "large count " SIZE_FORMAT, 3091 cls_specialized_count, cls_specialized_waste, 3092 cls_small_count, cls_small_waste, 3093 cls_medium_count, cls_medium_waste, cls_humongous_count); 3094 } 3095 3096 // Print total fragmentation for data and class metaspaces separately 3097 void MetaspaceAux::print_waste(outputStream* out) { 3098 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 3099 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 3100 3101 ClassLoaderDataGraphMetaspaceIterator iter; 3102 while (iter.repeat()) { 3103 Metaspace* msp = iter.get_next(); 3104 if (msp != NULL) { 3105 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 3106 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 3107 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 3108 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 3109 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 3110 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 3111 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 3112 } 3113 } 3114 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 3115 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 3116 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 3117 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 3118 "large count " SIZE_FORMAT, 3119 specialized_count, specialized_waste, small_count, 3120 small_waste, medium_count, medium_waste, humongous_count); 3121 if (Metaspace::using_class_space()) { 3122 print_class_waste(out); 3123 } 3124 } 3125 3126 class MetadataStats VALUE_OBJ_CLASS_SPEC { 3127 private: 3128 size_t _capacity; 3129 size_t _used; 3130 size_t _free; 3131 size_t _waste; 3132 3133 public: 3134 MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { } 3135 MetadataStats(size_t capacity, size_t used, size_t free, size_t waste) 3136 : _capacity(capacity), _used(used), _free(free), _waste(waste) { } 3137 3138 void add(const MetadataStats& stats) { 3139 _capacity += stats.capacity(); 3140 _used += stats.used(); 3141 _free += stats.free(); 3142 _waste += stats.waste(); 3143 } 3144 3145 size_t capacity() const { return _capacity; } 3146 size_t used() const { return _used; } 3147 size_t free() const { return _free; } 3148 size_t waste() const { return _waste; } 3149 3150 void print_on(outputStream* out, size_t scale) const; 3151 }; 3152 3153 3154 void MetadataStats::print_on(outputStream* out, size_t scale) const { 3155 const char* unit = scale_unit(scale); 3156 out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s", 3157 (float)capacity() / scale, unit, 3158 (float)used() / scale, unit, 3159 (float)free() / scale, unit, 3160 (float)waste() / scale, unit); 3161 } 3162 3163 class PrintCLDMetaspaceInfoClosure : public CLDClosure { 3164 private: 3165 outputStream* _out; 3166 size_t _scale; 3167 3168 size_t _total_count; 3169 MetadataStats _total_metadata; 3170 MetadataStats _total_class; 3171 3172 size_t _total_anon_count; 3173 MetadataStats _total_anon_metadata; 3174 MetadataStats _total_anon_class; 3175 3176 public: 3177 PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K) 3178 : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { } 3179 3180 ~PrintCLDMetaspaceInfoClosure() { 3181 print_summary(); 3182 } 3183 3184 void do_cld(ClassLoaderData* cld) { 3185 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 3186 3187 if (cld->is_unloading()) return; 3188 Metaspace* msp = cld->metaspace_or_null(); 3189 if (msp == NULL) { 3190 return; 3191 } 3192 3193 bool anonymous = false; 3194 if (cld->is_anonymous()) { 3195 _out->print_cr("ClassLoader: for anonymous class"); 3196 anonymous = true; 3197 } else { 3198 ResourceMark rm; 3199 _out->print_cr("ClassLoader: %s", cld->loader_name()); 3200 } 3201 3202 print_metaspace(msp, anonymous); 3203 _out->cr(); 3204 } 3205 3206 private: 3207 void print_metaspace(Metaspace* msp, bool anonymous); 3208 void print_summary() const; 3209 }; 3210 3211 void PrintCLDMetaspaceInfoClosure::print_metaspace(Metaspace* msp, bool anonymous){ 3212 assert(msp != NULL, "Sanity"); 3213 SpaceManager* vsm = msp->vsm(); 3214 const char* unit = scale_unit(_scale); 3215 3216 size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord; 3217 size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord; 3218 size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord; 3219 size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord; 3220 3221 _total_count ++; 3222 MetadataStats metadata_stats(capacity, used, free, waste); 3223 _total_metadata.add(metadata_stats); 3224 3225 if (anonymous) { 3226 _total_anon_count ++; 3227 _total_anon_metadata.add(metadata_stats); 3228 } 3229 3230 _out->print(" Metadata "); 3231 metadata_stats.print_on(_out, _scale); 3232 3233 if (Metaspace::using_class_space()) { 3234 vsm = msp->class_vsm(); 3235 3236 capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord; 3237 used = vsm->sum_used_in_chunks_in_use() * BytesPerWord; 3238 free = vsm->sum_free_in_chunks_in_use() * BytesPerWord; 3239 waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord; 3240 3241 MetadataStats class_stats(capacity, used, free, waste); 3242 _total_class.add(class_stats); 3243 3244 if (anonymous) { 3245 _total_anon_class.add(class_stats); 3246 } 3247 3248 _out->print(" Class data "); 3249 class_stats.print_on(_out, _scale); 3250 } 3251 } 3252 3253 void PrintCLDMetaspaceInfoClosure::print_summary() const { 3254 const char* unit = scale_unit(_scale); 3255 _out->cr(); 3256 _out->print_cr("Summary:"); 3257 3258 MetadataStats total; 3259 total.add(_total_metadata); 3260 total.add(_total_class); 3261 3262 _out->print(" Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count); 3263 total.print_on(_out, _scale); 3264 3265 _out->print(" Metadata "); 3266 _total_metadata.print_on(_out, _scale); 3267 3268 if (Metaspace::using_class_space()) { 3269 _out->print(" Class data "); 3270 _total_class.print_on(_out, _scale); 3271 } 3272 _out->cr(); 3273 3274 MetadataStats total_anon; 3275 total_anon.add(_total_anon_metadata); 3276 total_anon.add(_total_anon_class); 3277 3278 _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count); 3279 total_anon.print_on(_out, _scale); 3280 3281 _out->print(" Metadata "); 3282 _total_anon_metadata.print_on(_out, _scale); 3283 3284 if (Metaspace::using_class_space()) { 3285 _out->print(" Class data "); 3286 _total_anon_class.print_on(_out, _scale); 3287 } 3288 } 3289 3290 void MetaspaceAux::print_metadata_for_nmt(outputStream* out, size_t scale) { 3291 const char* unit = scale_unit(scale); 3292 out->print_cr("Metaspaces:"); 3293 out->print_cr(" Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s", 3294 reserved_bytes(Metaspace::NonClassType) / scale, unit, 3295 committed_bytes(Metaspace::NonClassType) / scale, unit); 3296 if (Metaspace::using_class_space()) { 3297 out->print_cr(" Class space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s", 3298 reserved_bytes(Metaspace::ClassType) / scale, unit, 3299 committed_bytes(Metaspace::ClassType) / scale, unit); 3300 } 3301 3302 out->cr(); 3303 ChunkManager::print_all_chunkmanagers_on(out, scale); 3304 3305 out->cr(); 3306 out->print_cr("Per-classloader metadata:"); 3307 out->cr(); 3308 3309 PrintCLDMetaspaceInfoClosure cl(out, scale); 3310 ClassLoaderDataGraph::cld_do(&cl); 3311 } 3312 3313 3314 // Dump global metaspace things from the end of ClassLoaderDataGraph 3315 void MetaspaceAux::dump(outputStream* out) { 3316 out->print_cr("All Metaspace:"); 3317 out->print("data space: "); print_on(out, Metaspace::NonClassType); 3318 out->print("class space: "); print_on(out, Metaspace::ClassType); 3319 print_waste(out); 3320 } 3321 3322 // Prints an ASCII representation of the given space. 3323 void MetaspaceAux::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) { 3324 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3325 const bool for_class = mdtype == Metaspace::ClassType ? true : false; 3326 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 3327 if (vsl != NULL) { 3328 if (for_class) { 3329 if (!Metaspace::using_class_space()) { 3330 out->print_cr("No Class Space."); 3331 return; 3332 } 3333 out->print_raw("---- Metaspace Map (Class Space) ----"); 3334 } else { 3335 out->print_raw("---- Metaspace Map (Non-Class Space) ----"); 3336 } 3337 // Print legend: 3338 out->cr(); 3339 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous."); 3340 out->cr(); 3341 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 3342 vsl->print_map(out); 3343 out->cr(); 3344 } 3345 } 3346 3347 void MetaspaceAux::verify_free_chunks() { 3348 Metaspace::chunk_manager_metadata()->verify(); 3349 if (Metaspace::using_class_space()) { 3350 Metaspace::chunk_manager_class()->verify(); 3351 } 3352 } 3353 3354 void MetaspaceAux::verify_capacity() { 3355 #ifdef ASSERT 3356 size_t running_sum_capacity_bytes = capacity_bytes(); 3357 // For purposes of the running sum of capacity, verify against capacity 3358 size_t capacity_in_use_bytes = capacity_bytes_slow(); 3359 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 3360 "capacity_words() * BytesPerWord " SIZE_FORMAT 3361 " capacity_bytes_slow()" SIZE_FORMAT, 3362 running_sum_capacity_bytes, capacity_in_use_bytes); 3363 for (Metaspace::MetadataType i = Metaspace::ClassType; 3364 i < Metaspace:: MetadataTypeCount; 3365 i = (Metaspace::MetadataType)(i + 1)) { 3366 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 3367 assert(capacity_bytes(i) == capacity_in_use_bytes, 3368 "capacity_bytes(%u) " SIZE_FORMAT 3369 " capacity_bytes_slow(%u)" SIZE_FORMAT, 3370 i, capacity_bytes(i), i, capacity_in_use_bytes); 3371 } 3372 #endif 3373 } 3374 3375 void MetaspaceAux::verify_used() { 3376 #ifdef ASSERT 3377 size_t running_sum_used_bytes = used_bytes(); 3378 // For purposes of the running sum of used, verify against used 3379 size_t used_in_use_bytes = used_bytes_slow(); 3380 assert(used_bytes() == used_in_use_bytes, 3381 "used_bytes() " SIZE_FORMAT 3382 " used_bytes_slow()" SIZE_FORMAT, 3383 used_bytes(), used_in_use_bytes); 3384 for (Metaspace::MetadataType i = Metaspace::ClassType; 3385 i < Metaspace:: MetadataTypeCount; 3386 i = (Metaspace::MetadataType)(i + 1)) { 3387 size_t used_in_use_bytes = used_bytes_slow(i); 3388 assert(used_bytes(i) == used_in_use_bytes, 3389 "used_bytes(%u) " SIZE_FORMAT 3390 " used_bytes_slow(%u)" SIZE_FORMAT, 3391 i, used_bytes(i), i, used_in_use_bytes); 3392 } 3393 #endif 3394 } 3395 3396 void MetaspaceAux::verify_metrics() { 3397 verify_capacity(); 3398 verify_used(); 3399 } 3400 3401 3402 // Metaspace methods 3403 3404 size_t Metaspace::_first_chunk_word_size = 0; 3405 size_t Metaspace::_first_class_chunk_word_size = 0; 3406 3407 size_t Metaspace::_commit_alignment = 0; 3408 size_t Metaspace::_reserve_alignment = 0; 3409 3410 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 3411 initialize(lock, type); 3412 } 3413 3414 Metaspace::~Metaspace() { 3415 delete _vsm; 3416 if (using_class_space()) { 3417 delete _class_vsm; 3418 } 3419 } 3420 3421 VirtualSpaceList* Metaspace::_space_list = NULL; 3422 VirtualSpaceList* Metaspace::_class_space_list = NULL; 3423 3424 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 3425 ChunkManager* Metaspace::_chunk_manager_class = NULL; 3426 3427 #define VIRTUALSPACEMULTIPLIER 2 3428 3429 #ifdef _LP64 3430 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 3431 3432 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 3433 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class."); 3434 // Figure out the narrow_klass_base and the narrow_klass_shift. The 3435 // narrow_klass_base is the lower of the metaspace base and the cds base 3436 // (if cds is enabled). The narrow_klass_shift depends on the distance 3437 // between the lower base and higher address. 3438 address lower_base; 3439 address higher_address; 3440 #if INCLUDE_CDS 3441 if (UseSharedSpaces) { 3442 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 3443 (address)(metaspace_base + compressed_class_space_size())); 3444 lower_base = MIN2(metaspace_base, cds_base); 3445 } else 3446 #endif 3447 { 3448 higher_address = metaspace_base + compressed_class_space_size(); 3449 lower_base = metaspace_base; 3450 3451 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3452 // If compressed class space fits in lower 32G, we don't need a base. 3453 if (higher_address <= (address)klass_encoding_max) { 3454 lower_base = 0; // Effectively lower base is zero. 3455 } 3456 } 3457 3458 Universe::set_narrow_klass_base(lower_base); 3459 3460 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See 3461 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for 3462 // how dump time narrow_klass_shift is set. Although, CDS can work 3463 // with zero-shift mode also, to be consistent with AOT it uses 3464 // LogKlassAlignmentInBytes for klass shift so archived java heap objects 3465 // can be used at same time as AOT code. 3466 if (!UseSharedSpaces 3467 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3468 Universe::set_narrow_klass_shift(0); 3469 } else { 3470 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3471 } 3472 AOTLoader::set_narrow_klass_shift(); 3473 } 3474 3475 #if INCLUDE_CDS 3476 // Return TRUE if the specified metaspace_base and cds_base are close enough 3477 // to work with compressed klass pointers. 3478 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3479 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3480 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3481 address lower_base = MIN2((address)metaspace_base, cds_base); 3482 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 3483 (address)(metaspace_base + compressed_class_space_size())); 3484 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3485 } 3486 #endif 3487 3488 // Try to allocate the metaspace at the requested addr. 3489 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3490 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class."); 3491 assert(using_class_space(), "called improperly"); 3492 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3493 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3494 "Metaspace size is too big"); 3495 assert_is_aligned(requested_addr, _reserve_alignment); 3496 assert_is_aligned(cds_base, _reserve_alignment); 3497 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 3498 3499 // Don't use large pages for the class space. 3500 bool large_pages = false; 3501 3502 #if !(defined(AARCH64) || defined(AIX)) 3503 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3504 _reserve_alignment, 3505 large_pages, 3506 requested_addr); 3507 #else // AARCH64 3508 ReservedSpace metaspace_rs; 3509 3510 // Our compressed klass pointers may fit nicely into the lower 32 3511 // bits. 3512 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3513 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3514 _reserve_alignment, 3515 large_pages, 3516 requested_addr); 3517 } 3518 3519 if (! metaspace_rs.is_reserved()) { 3520 // Aarch64: Try to align metaspace so that we can decode a compressed 3521 // klass with a single MOVK instruction. We can do this iff the 3522 // compressed class base is a multiple of 4G. 3523 // Aix: Search for a place where we can find memory. If we need to load 3524 // the base, 4G alignment is helpful, too. 3525 size_t increment = AARCH64_ONLY(4*)G; 3526 for (char *a = align_up(requested_addr, increment); 3527 a < (char*)(1024*G); 3528 a += increment) { 3529 if (a == (char *)(32*G)) { 3530 // Go faster from here on. Zero-based is no longer possible. 3531 increment = 4*G; 3532 } 3533 3534 #if INCLUDE_CDS 3535 if (UseSharedSpaces 3536 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3537 // We failed to find an aligned base that will reach. Fall 3538 // back to using our requested addr. 3539 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3540 _reserve_alignment, 3541 large_pages, 3542 requested_addr); 3543 break; 3544 } 3545 #endif 3546 3547 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3548 _reserve_alignment, 3549 large_pages, 3550 a); 3551 if (metaspace_rs.is_reserved()) 3552 break; 3553 } 3554 } 3555 3556 #endif // AARCH64 3557 3558 if (!metaspace_rs.is_reserved()) { 3559 #if INCLUDE_CDS 3560 if (UseSharedSpaces) { 3561 size_t increment = align_up(1*G, _reserve_alignment); 3562 3563 // Keep trying to allocate the metaspace, increasing the requested_addr 3564 // by 1GB each time, until we reach an address that will no longer allow 3565 // use of CDS with compressed klass pointers. 3566 char *addr = requested_addr; 3567 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3568 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3569 addr = addr + increment; 3570 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3571 _reserve_alignment, large_pages, addr); 3572 } 3573 } 3574 #endif 3575 // If no successful allocation then try to allocate the space anywhere. If 3576 // that fails then OOM doom. At this point we cannot try allocating the 3577 // metaspace as if UseCompressedClassPointers is off because too much 3578 // initialization has happened that depends on UseCompressedClassPointers. 3579 // So, UseCompressedClassPointers cannot be turned off at this point. 3580 if (!metaspace_rs.is_reserved()) { 3581 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3582 _reserve_alignment, large_pages); 3583 if (!metaspace_rs.is_reserved()) { 3584 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3585 compressed_class_space_size())); 3586 } 3587 } 3588 } 3589 3590 // If we got here then the metaspace got allocated. 3591 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3592 3593 #if INCLUDE_CDS 3594 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3595 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3596 FileMapInfo::stop_sharing_and_unmap( 3597 "Could not allocate metaspace at a compatible address"); 3598 } 3599 #endif 3600 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3601 UseSharedSpaces ? (address)cds_base : 0); 3602 3603 initialize_class_space(metaspace_rs); 3604 3605 LogTarget(Trace, gc, metaspace) lt; 3606 if (lt.is_enabled()) { 3607 ResourceMark rm; 3608 LogStream ls(lt); 3609 print_compressed_class_space(&ls, requested_addr); 3610 } 3611 } 3612 3613 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3614 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3615 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3616 if (_class_space_list != NULL) { 3617 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3618 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3619 compressed_class_space_size(), p2i(base)); 3620 if (requested_addr != 0) { 3621 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3622 } 3623 st->cr(); 3624 } 3625 } 3626 3627 // For UseCompressedClassPointers the class space is reserved above the top of 3628 // the Java heap. The argument passed in is at the base of the compressed space. 3629 void Metaspace::initialize_class_space(ReservedSpace rs) { 3630 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3631 assert(rs.size() >= CompressedClassSpaceSize, 3632 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3633 assert(using_class_space(), "Must be using class space"); 3634 _class_space_list = new VirtualSpaceList(rs); 3635 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3636 3637 if (!_class_space_list->initialization_succeeded()) { 3638 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3639 } 3640 } 3641 3642 #endif 3643 3644 void Metaspace::ergo_initialize() { 3645 if (DumpSharedSpaces) { 3646 // Using large pages when dumping the shared archive is currently not implemented. 3647 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3648 } 3649 3650 size_t page_size = os::vm_page_size(); 3651 if (UseLargePages && UseLargePagesInMetaspace) { 3652 page_size = os::large_page_size(); 3653 } 3654 3655 _commit_alignment = page_size; 3656 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3657 3658 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3659 // override if MaxMetaspaceSize was set on the command line or not. 3660 // This information is needed later to conform to the specification of the 3661 // java.lang.management.MemoryUsage API. 3662 // 3663 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3664 // globals.hpp to the aligned value, but this is not possible, since the 3665 // alignment depends on other flags being parsed. 3666 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3667 3668 if (MetaspaceSize > MaxMetaspaceSize) { 3669 MetaspaceSize = MaxMetaspaceSize; 3670 } 3671 3672 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 3673 3674 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3675 3676 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3677 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3678 3679 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3680 3681 // Initial virtual space size will be calculated at global_initialize() 3682 size_t min_metaspace_sz = 3683 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize; 3684 if (UseCompressedClassPointers) { 3685 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) { 3686 if (min_metaspace_sz >= MaxMetaspaceSize) { 3687 vm_exit_during_initialization("MaxMetaspaceSize is too small."); 3688 } else { 3689 FLAG_SET_ERGO(size_t, CompressedClassSpaceSize, 3690 MaxMetaspaceSize - min_metaspace_sz); 3691 } 3692 } 3693 } else if (min_metaspace_sz >= MaxMetaspaceSize) { 3694 FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize, 3695 min_metaspace_sz); 3696 } 3697 3698 set_compressed_class_space_size(CompressedClassSpaceSize); 3699 } 3700 3701 void Metaspace::global_initialize() { 3702 MetaspaceGC::initialize(); 3703 3704 #if INCLUDE_CDS 3705 if (DumpSharedSpaces) { 3706 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces(); 3707 } else if (UseSharedSpaces) { 3708 // If any of the archived space fails to map, UseSharedSpaces 3709 // is reset to false. Fall through to the 3710 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class 3711 // metaspace. 3712 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 3713 } 3714 3715 if (!DumpSharedSpaces && !UseSharedSpaces) 3716 #endif // INCLUDE_CDS 3717 { 3718 #ifdef _LP64 3719 if (using_class_space()) { 3720 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3721 allocate_metaspace_compressed_klass_ptrs(base, 0); 3722 } 3723 #endif // _LP64 3724 } 3725 3726 // Initialize these before initializing the VirtualSpaceList 3727 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3728 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3729 // Make the first class chunk bigger than a medium chunk so it's not put 3730 // on the medium chunk list. The next chunk will be small and progress 3731 // from there. This size calculated by -version. 3732 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3733 (CompressedClassSpaceSize/BytesPerWord)*2); 3734 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3735 // Arbitrarily set the initial virtual space to a multiple 3736 // of the boot class loader size. 3737 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3738 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 3739 3740 // Initialize the list of virtual spaces. 3741 _space_list = new VirtualSpaceList(word_size); 3742 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3743 3744 if (!_space_list->initialization_succeeded()) { 3745 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3746 } 3747 3748 _tracer = new MetaspaceTracer(); 3749 } 3750 3751 void Metaspace::post_initialize() { 3752 MetaspaceGC::post_initialize(); 3753 } 3754 3755 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) { 3756 Metachunk* chunk = get_initialization_chunk(type, mdtype); 3757 if (chunk != NULL) { 3758 // Add to this manager's list of chunks in use and current_chunk(). 3759 get_space_manager(mdtype)->add_chunk(chunk, true); 3760 } 3761 } 3762 3763 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) { 3764 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 3765 3766 // Get a chunk from the chunk freelist 3767 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3768 3769 if (chunk == NULL) { 3770 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size, 3771 get_space_manager(mdtype)->medium_chunk_bunch()); 3772 } 3773 3774 return chunk; 3775 } 3776 3777 void Metaspace::verify_global_initialization() { 3778 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 3779 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 3780 3781 if (using_class_space()) { 3782 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 3783 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 3784 } 3785 } 3786 3787 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3788 verify_global_initialization(); 3789 3790 // Allocate SpaceManager for metadata objects. 3791 _vsm = new SpaceManager(NonClassType, type, lock); 3792 3793 if (using_class_space()) { 3794 // Allocate SpaceManager for classes. 3795 _class_vsm = new SpaceManager(ClassType, type, lock); 3796 } 3797 3798 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3799 3800 // Allocate chunk for metadata objects 3801 initialize_first_chunk(type, NonClassType); 3802 3803 // Allocate chunk for class metadata objects 3804 if (using_class_space()) { 3805 initialize_first_chunk(type, ClassType); 3806 } 3807 } 3808 3809 size_t Metaspace::align_word_size_up(size_t word_size) { 3810 size_t byte_size = word_size * wordSize; 3811 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3812 } 3813 3814 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3815 assert(!_frozen, "sanity"); 3816 // Don't use class_vsm() unless UseCompressedClassPointers is true. 3817 if (is_class_space_allocation(mdtype)) { 3818 return class_vsm()->allocate(word_size); 3819 } else { 3820 return vsm()->allocate(word_size); 3821 } 3822 } 3823 3824 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3825 assert(!_frozen, "sanity"); 3826 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3827 assert(delta_bytes > 0, "Must be"); 3828 3829 size_t before = 0; 3830 size_t after = 0; 3831 MetaWord* res; 3832 bool incremented; 3833 3834 // Each thread increments the HWM at most once. Even if the thread fails to increment 3835 // the HWM, an allocation is still attempted. This is because another thread must then 3836 // have incremented the HWM and therefore the allocation might still succeed. 3837 do { 3838 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3839 res = allocate(word_size, mdtype); 3840 } while (!incremented && res == NULL); 3841 3842 if (incremented) { 3843 tracer()->report_gc_threshold(before, after, 3844 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3845 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3846 } 3847 3848 return res; 3849 } 3850 3851 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3852 if (mdtype == ClassType) { 3853 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3854 } else { 3855 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3856 } 3857 } 3858 3859 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3860 assert(!_frozen, "sanity"); 3861 if (mdtype == ClassType) { 3862 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3863 } else { 3864 return vsm()->sum_free_in_chunks_in_use(); 3865 } 3866 } 3867 3868 // Space capacity in the Metaspace. It includes 3869 // space in the list of chunks from which allocations 3870 // have been made. Don't include space in the global freelist and 3871 // in the space available in the dictionary which 3872 // is already counted in some chunk. 3873 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3874 if (mdtype == ClassType) { 3875 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3876 } else { 3877 return vsm()->sum_capacity_in_chunks_in_use(); 3878 } 3879 } 3880 3881 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3882 return used_words_slow(mdtype) * BytesPerWord; 3883 } 3884 3885 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3886 return capacity_words_slow(mdtype) * BytesPerWord; 3887 } 3888 3889 size_t Metaspace::allocated_blocks_bytes() const { 3890 return vsm()->allocated_blocks_bytes() + 3891 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3892 } 3893 3894 size_t Metaspace::allocated_chunks_bytes() const { 3895 return vsm()->allocated_chunks_bytes() + 3896 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3897 } 3898 3899 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3900 assert(!_frozen, "sanity"); 3901 assert(!SafepointSynchronize::is_at_safepoint() 3902 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3903 3904 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3905 3906 if (is_class && using_class_space()) { 3907 class_vsm()->deallocate(ptr, word_size); 3908 } else { 3909 vsm()->deallocate(ptr, word_size); 3910 } 3911 } 3912 3913 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3914 MetaspaceObj::Type type, TRAPS) { 3915 assert(!_frozen, "sanity"); 3916 if (HAS_PENDING_EXCEPTION) { 3917 assert(false, "Should not allocate with exception pending"); 3918 return NULL; // caller does a CHECK_NULL too 3919 } 3920 3921 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3922 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3923 3924 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3925 3926 // Try to allocate metadata. 3927 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3928 3929 if (result == NULL) { 3930 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3931 3932 // Allocation failed. 3933 if (is_init_completed()) { 3934 // Only start a GC if the bootstrapping has completed. 3935 3936 // Try to clean out some memory and retry. 3937 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); 3938 } 3939 } 3940 3941 if (result == NULL) { 3942 SpaceManager* sm; 3943 if (is_class_space_allocation(mdtype)) { 3944 sm = loader_data->metaspace_non_null()->class_vsm(); 3945 } else { 3946 sm = loader_data->metaspace_non_null()->vsm(); 3947 } 3948 3949 result = sm->get_small_chunk_and_allocate(word_size); 3950 3951 if (result == NULL) { 3952 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3953 } 3954 } 3955 3956 // Zero initialize. 3957 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3958 3959 return result; 3960 } 3961 3962 size_t Metaspace::class_chunk_size(size_t word_size) { 3963 assert(using_class_space(), "Has to use class space"); 3964 return class_vsm()->calc_chunk_size(word_size); 3965 } 3966 3967 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3968 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3969 3970 // If result is still null, we are out of memory. 3971 Log(gc, metaspace, freelist) log; 3972 if (log.is_info()) { 3973 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3974 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3975 ResourceMark rm; 3976 if (log.is_debug()) { 3977 if (loader_data->metaspace_or_null() != NULL) { 3978 LogStream ls(log.debug()); 3979 loader_data->dump(&ls); 3980 } 3981 } 3982 LogStream ls(log.info()); 3983 MetaspaceAux::dump(&ls); 3984 MetaspaceAux::print_metaspace_map(&ls, mdtype); 3985 ChunkManager::print_all_chunkmanagers_on(&ls); 3986 } 3987 3988 bool out_of_compressed_class_space = false; 3989 if (is_class_space_allocation(mdtype)) { 3990 Metaspace* metaspace = loader_data->metaspace_non_null(); 3991 out_of_compressed_class_space = 3992 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3993 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3994 CompressedClassSpaceSize; 3995 } 3996 3997 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3998 const char* space_string = out_of_compressed_class_space ? 3999 "Compressed class space" : "Metaspace"; 4000 4001 report_java_out_of_memory(space_string); 4002 4003 if (JvmtiExport::should_post_resource_exhausted()) { 4004 JvmtiExport::post_resource_exhausted( 4005 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 4006 space_string); 4007 } 4008 4009 if (!is_init_completed()) { 4010 vm_exit_during_initialization("OutOfMemoryError", space_string); 4011 } 4012 4013 if (out_of_compressed_class_space) { 4014 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 4015 } else { 4016 THROW_OOP(Universe::out_of_memory_error_metaspace()); 4017 } 4018 } 4019 4020 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 4021 switch (mdtype) { 4022 case Metaspace::ClassType: return "Class"; 4023 case Metaspace::NonClassType: return "Metadata"; 4024 default: 4025 assert(false, "Got bad mdtype: %d", (int) mdtype); 4026 return NULL; 4027 } 4028 } 4029 4030 void Metaspace::purge(MetadataType mdtype) { 4031 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 4032 } 4033 4034 void Metaspace::purge() { 4035 MutexLockerEx cl(SpaceManager::expand_lock(), 4036 Mutex::_no_safepoint_check_flag); 4037 purge(NonClassType); 4038 if (using_class_space()) { 4039 purge(ClassType); 4040 } 4041 } 4042 4043 void Metaspace::print_on(outputStream* out) const { 4044 // Print both class virtual space counts and metaspace. 4045 if (Verbose) { 4046 vsm()->print_on(out); 4047 if (using_class_space()) { 4048 class_vsm()->print_on(out); 4049 } 4050 } 4051 } 4052 4053 bool Metaspace::contains(const void* ptr) { 4054 if (MetaspaceShared::is_in_shared_metaspace(ptr)) { 4055 return true; 4056 } 4057 return contains_non_shared(ptr); 4058 } 4059 4060 bool Metaspace::contains_non_shared(const void* ptr) { 4061 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 4062 return true; 4063 } 4064 4065 return get_space_list(NonClassType)->contains(ptr); 4066 } 4067 4068 void Metaspace::verify() { 4069 vsm()->verify(); 4070 if (using_class_space()) { 4071 class_vsm()->verify(); 4072 } 4073 } 4074 4075 void Metaspace::dump(outputStream* const out) const { 4076 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 4077 vsm()->dump(out); 4078 if (using_class_space()) { 4079 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 4080 class_vsm()->dump(out); 4081 } 4082 } 4083 4084 /////////////// Unit tests /////////////// 4085 4086 #ifndef PRODUCT 4087 4088 class TestMetaspaceAuxTest : AllStatic { 4089 public: 4090 static void test_reserved() { 4091 size_t reserved = MetaspaceAux::reserved_bytes(); 4092 4093 assert(reserved > 0, "assert"); 4094 4095 size_t committed = MetaspaceAux::committed_bytes(); 4096 assert(committed <= reserved, "assert"); 4097 4098 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 4099 assert(reserved_metadata > 0, "assert"); 4100 assert(reserved_metadata <= reserved, "assert"); 4101 4102 if (UseCompressedClassPointers) { 4103 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 4104 assert(reserved_class > 0, "assert"); 4105 assert(reserved_class < reserved, "assert"); 4106 } 4107 } 4108 4109 static void test_committed() { 4110 size_t committed = MetaspaceAux::committed_bytes(); 4111 4112 assert(committed > 0, "assert"); 4113 4114 size_t reserved = MetaspaceAux::reserved_bytes(); 4115 assert(committed <= reserved, "assert"); 4116 4117 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 4118 assert(committed_metadata > 0, "assert"); 4119 assert(committed_metadata <= committed, "assert"); 4120 4121 if (UseCompressedClassPointers) { 4122 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 4123 assert(committed_class > 0, "assert"); 4124 assert(committed_class < committed, "assert"); 4125 } 4126 } 4127 4128 static void test_virtual_space_list_large_chunk() { 4129 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 4130 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4131 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 4132 // vm_allocation_granularity aligned on Windows. 4133 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 4134 large_size += (os::vm_page_size()/BytesPerWord); 4135 vs_list->get_new_chunk(large_size, 0); 4136 } 4137 4138 static void test() { 4139 test_reserved(); 4140 test_committed(); 4141 test_virtual_space_list_large_chunk(); 4142 } 4143 }; 4144 4145 void TestMetaspaceAux_test() { 4146 TestMetaspaceAuxTest::test(); 4147 } 4148 4149 class TestVirtualSpaceNodeTest { 4150 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 4151 size_t& num_small_chunks, 4152 size_t& num_specialized_chunks) { 4153 num_medium_chunks = words_left / MediumChunk; 4154 words_left = words_left % MediumChunk; 4155 4156 num_small_chunks = words_left / SmallChunk; 4157 words_left = words_left % SmallChunk; 4158 // how many specialized chunks can we get? 4159 num_specialized_chunks = words_left / SpecializedChunk; 4160 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 4161 } 4162 4163 public: 4164 static void test() { 4165 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4166 const size_t vsn_test_size_words = MediumChunk * 4; 4167 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 4168 4169 // The chunk sizes must be multiples of eachother, or this will fail 4170 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 4171 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 4172 4173 { // No committed memory in VSN 4174 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 4175 VirtualSpaceNode vsn(vsn_test_size_bytes); 4176 vsn.initialize(); 4177 vsn.retire(&cm); 4178 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 4179 } 4180 4181 { // All of VSN is committed, half is used by chunks 4182 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 4183 VirtualSpaceNode vsn(vsn_test_size_bytes); 4184 vsn.initialize(); 4185 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 4186 vsn.get_chunk_vs(MediumChunk); 4187 vsn.get_chunk_vs(MediumChunk); 4188 vsn.retire(&cm); 4189 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 4190 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 4191 } 4192 4193 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 4194 // This doesn't work for systems with vm_page_size >= 16K. 4195 if (page_chunks < MediumChunk) { 4196 // 4 pages of VSN is committed, some is used by chunks 4197 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 4198 VirtualSpaceNode vsn(vsn_test_size_bytes); 4199 4200 vsn.initialize(); 4201 vsn.expand_by(page_chunks, page_chunks); 4202 vsn.get_chunk_vs(SmallChunk); 4203 vsn.get_chunk_vs(SpecializedChunk); 4204 vsn.retire(&cm); 4205 4206 // committed - used = words left to retire 4207 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 4208 4209 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 4210 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 4211 4212 assert(num_medium_chunks == 0, "should not get any medium chunks"); 4213 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 4214 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 4215 } 4216 4217 { // Half of VSN is committed, a humongous chunk is used 4218 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 4219 VirtualSpaceNode vsn(vsn_test_size_bytes); 4220 vsn.initialize(); 4221 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 4222 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 4223 vsn.retire(&cm); 4224 4225 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 4226 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 4227 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 4228 4229 assert(num_medium_chunks == 0, "should not get any medium chunks"); 4230 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 4231 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 4232 } 4233 4234 } 4235 4236 #define assert_is_available_positive(word_size) \ 4237 assert(vsn.is_available(word_size), \ 4238 #word_size ": " PTR_FORMAT " bytes were not available in " \ 4239 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 4240 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 4241 4242 #define assert_is_available_negative(word_size) \ 4243 assert(!vsn.is_available(word_size), \ 4244 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 4245 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 4246 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 4247 4248 static void test_is_available_positive() { 4249 // Reserve some memory. 4250 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4251 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4252 4253 // Commit some memory. 4254 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4255 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4256 assert(expanded, "Failed to commit"); 4257 4258 // Check that is_available accepts the committed size. 4259 assert_is_available_positive(commit_word_size); 4260 4261 // Check that is_available accepts half the committed size. 4262 size_t expand_word_size = commit_word_size / 2; 4263 assert_is_available_positive(expand_word_size); 4264 } 4265 4266 static void test_is_available_negative() { 4267 // Reserve some memory. 4268 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4269 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4270 4271 // Commit some memory. 4272 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4273 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4274 assert(expanded, "Failed to commit"); 4275 4276 // Check that is_available doesn't accept a too large size. 4277 size_t two_times_commit_word_size = commit_word_size * 2; 4278 assert_is_available_negative(two_times_commit_word_size); 4279 } 4280 4281 static void test_is_available_overflow() { 4282 // Reserve some memory. 4283 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4284 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4285 4286 // Commit some memory. 4287 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4288 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4289 assert(expanded, "Failed to commit"); 4290 4291 // Calculate a size that will overflow the virtual space size. 4292 void* virtual_space_max = (void*)(uintptr_t)-1; 4293 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4294 size_t overflow_size = bottom_to_max + BytesPerWord; 4295 size_t overflow_word_size = overflow_size / BytesPerWord; 4296 4297 // Check that is_available can handle the overflow. 4298 assert_is_available_negative(overflow_word_size); 4299 } 4300 4301 static void test_is_available() { 4302 TestVirtualSpaceNodeTest::test_is_available_positive(); 4303 TestVirtualSpaceNodeTest::test_is_available_negative(); 4304 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4305 } 4306 }; 4307 4308 void TestVirtualSpaceNode_test() { 4309 TestVirtualSpaceNodeTest::test(); 4310 TestVirtualSpaceNodeTest::test_is_available(); 4311 } 4312 4313 // The following test is placed here instead of a gtest / unittest file 4314 // because the ChunkManager class is only available in this file. 4315 void ChunkManager_test_list_index() { 4316 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 4317 4318 // Test previous bug where a query for a humongous class metachunk, 4319 // incorrectly matched the non-class medium metachunk size. 4320 { 4321 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 4322 4323 ChunkIndex index = manager.list_index(MediumChunk); 4324 4325 assert(index == HumongousIndex, 4326 "Requested size is larger than ClassMediumChunk," 4327 " so should return HumongousIndex. Got index: %d", (int)index); 4328 } 4329 4330 // Check the specified sizes as well. 4331 { 4332 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 4333 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index); 4334 } 4335 { 4336 ChunkIndex index = manager.list_index(ClassSmallChunk); 4337 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index); 4338 } 4339 { 4340 ChunkIndex index = manager.list_index(ClassMediumChunk); 4341 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index); 4342 } 4343 { 4344 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 4345 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index); 4346 } 4347 } 4348 4349 #endif // !PRODUCT 4350 4351 #ifdef ASSERT 4352 4353 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and 4354 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager 4355 // content. 4356 class ChunkManagerReturnTestImpl : public CHeapObj<mtClass> { 4357 4358 VirtualSpaceNode _vsn; 4359 ChunkManager _cm; 4360 4361 // The expected content of the chunk manager. 4362 unsigned _chunks_in_chunkmanager; 4363 size_t _words_in_chunkmanager; 4364 4365 // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use). 4366 static const int num_chunks = 256; 4367 Metachunk* _pool[num_chunks]; 4368 4369 // Helper, return a random position into the chunk pool. 4370 static int get_random_position() { 4371 return os::random() % num_chunks; 4372 } 4373 4374 // Asserts that ChunkManager counters match expectations. 4375 void assert_counters() { 4376 assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch."); 4377 assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch."); 4378 assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch."); 4379 } 4380 4381 // Get a random chunk size. Equal chance to get spec/med/small chunk size or 4382 // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med). 4383 size_t get_random_chunk_size() { 4384 const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk }; 4385 const int rand = os::random() % 4; 4386 if (rand < 3) { 4387 return sizes[rand]; 4388 } else { 4389 // Note: this affects the max. size of space (see _vsn initialization in ctor). 4390 return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); 4391 } 4392 } 4393 4394 // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending 4395 // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found. 4396 int next_matching_chunk(int start, bool is_free) const { 4397 assert(start >= 0 && start < num_chunks, "invalid parameter"); 4398 int pos = start; 4399 do { 4400 if (++pos == num_chunks) { 4401 pos = 0; 4402 } 4403 if (_pool[pos]->is_tagged_free() == is_free) { 4404 return pos; 4405 } 4406 } while (pos != start); 4407 return -1; 4408 } 4409 4410 // A structure to keep information about a chunk list including which 4411 // chunks are part of this list. This is needed to keep information about a chunk list 4412 // we will to return to the ChunkManager, because the original list will be destroyed. 4413 struct AChunkList { 4414 Metachunk* head; 4415 Metachunk* all[num_chunks]; 4416 size_t size; 4417 int num; 4418 ChunkIndex index; 4419 }; 4420 4421 // Assemble, from the in-use chunks (not in the chunk manager) in the pool, 4422 // a random chunk list of max. length <list_size> of chunks with the same 4423 // ChunkIndex (chunk size). 4424 // Returns false if list cannot be assembled. List is returned in the <out> 4425 // structure. Returned list may be smaller than <list_size>. 4426 bool assemble_random_chunklist(AChunkList* out, int list_size) { 4427 // Choose a random in-use chunk from the pool... 4428 const int headpos = next_matching_chunk(get_random_position(), false); 4429 if (headpos == -1) { 4430 return false; 4431 } 4432 Metachunk* const head = _pool[headpos]; 4433 out->all[0] = head; 4434 assert(head->is_tagged_free() == false, "Chunk state mismatch"); 4435 // ..then go from there, chain it up with up to list_size - 1 number of other 4436 // in-use chunks of the same index. 4437 const ChunkIndex index = _cm.list_index(head->word_size()); 4438 int num_added = 1; 4439 size_t size_added = head->word_size(); 4440 int pos = headpos; 4441 Metachunk* tail = head; 4442 do { 4443 pos = next_matching_chunk(pos, false); 4444 if (pos != headpos) { 4445 Metachunk* c = _pool[pos]; 4446 assert(c->is_tagged_free() == false, "Chunk state mismatch"); 4447 if (index == _cm.list_index(c->word_size())) { 4448 tail->set_next(c); 4449 c->set_prev(tail); 4450 tail = c; 4451 out->all[num_added] = c; 4452 num_added ++; 4453 size_added += c->word_size(); 4454 } 4455 } 4456 } while (num_added < list_size && pos != headpos); 4457 out->head = head; 4458 out->index = index; 4459 out->size = size_added; 4460 out->num = num_added; 4461 return true; 4462 } 4463 4464 // Take a single random chunk from the ChunkManager. 4465 bool take_single_random_chunk_from_chunkmanager() { 4466 assert_counters(); 4467 _cm.locked_verify(); 4468 int pos = next_matching_chunk(get_random_position(), true); 4469 if (pos == -1) { 4470 return false; 4471 } 4472 Metachunk* c = _pool[pos]; 4473 assert(c->is_tagged_free(), "Chunk state mismatch"); 4474 // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call 4475 // ChunkManager::free_chunks_get() with this chunk's word size. We really want 4476 // to exercise ChunkManager::free_chunks_get() because that one gets called for 4477 // normal chunk allocation. 4478 Metachunk* c2 = _cm.free_chunks_get(c->word_size()); 4479 assert(c2 != NULL, "Unexpected."); 4480 assert(!c2->is_tagged_free(), "Chunk state mismatch"); 4481 assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list."); 4482 _chunks_in_chunkmanager --; 4483 _words_in_chunkmanager -= c->word_size(); 4484 assert_counters(); 4485 _cm.locked_verify(); 4486 return true; 4487 } 4488 4489 // Returns a single random chunk to the chunk manager. Returns false if that 4490 // was not possible (all chunks are already in the chunk manager). 4491 bool return_single_random_chunk_to_chunkmanager() { 4492 assert_counters(); 4493 _cm.locked_verify(); 4494 int pos = next_matching_chunk(get_random_position(), false); 4495 if (pos == -1) { 4496 return false; 4497 } 4498 Metachunk* c = _pool[pos]; 4499 assert(c->is_tagged_free() == false, "wrong chunk information"); 4500 _cm.return_single_chunk(_cm.list_index(c->word_size()), c); 4501 _chunks_in_chunkmanager ++; 4502 _words_in_chunkmanager += c->word_size(); 4503 assert(c->is_tagged_free() == true, "wrong chunk information"); 4504 assert_counters(); 4505 _cm.locked_verify(); 4506 return true; 4507 } 4508 4509 // Return a random chunk list to the chunk manager. Returns the length of the 4510 // returned list. 4511 int return_random_chunk_list_to_chunkmanager(int list_size) { 4512 assert_counters(); 4513 _cm.locked_verify(); 4514 AChunkList aChunkList; 4515 if (!assemble_random_chunklist(&aChunkList, list_size)) { 4516 return 0; 4517 } 4518 // Before returning chunks are returned, they should be tagged in use. 4519 for (int i = 0; i < aChunkList.num; i ++) { 4520 assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4521 } 4522 _cm.return_chunk_list(aChunkList.index, aChunkList.head); 4523 _chunks_in_chunkmanager += aChunkList.num; 4524 _words_in_chunkmanager += aChunkList.size; 4525 // After all chunks are returned, check that they are now tagged free. 4526 for (int i = 0; i < aChunkList.num; i ++) { 4527 assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4528 } 4529 assert_counters(); 4530 _cm.locked_verify(); 4531 return aChunkList.num; 4532 } 4533 4534 public: 4535 4536 ChunkManagerReturnTestImpl() 4537 : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) 4538 , _cm(SpecializedChunk, SmallChunk, MediumChunk) 4539 , _chunks_in_chunkmanager(0) 4540 , _words_in_chunkmanager(0) 4541 { 4542 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4543 // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are 4544 // "in use", because not yet added to any chunk manager. 4545 _vsn.initialize(); 4546 _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words()); 4547 for (int i = 0; i < num_chunks; i ++) { 4548 const size_t size = get_random_chunk_size(); 4549 _pool[i] = _vsn.get_chunk_vs(size); 4550 assert(_pool[i] != NULL, "allocation failed"); 4551 } 4552 assert_counters(); 4553 _cm.locked_verify(); 4554 } 4555 4556 // Test entry point. 4557 // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat. 4558 // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected 4559 // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between 4560 // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the 4561 // chunks manager, thereby emptying or filling it completely. 4562 void do_test(float phase_length_factor) { 4563 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4564 assert_counters(); 4565 // Execute n operations, and operation being the move of a single chunk to/from the chunk manager. 4566 const int num_max_ops = num_chunks * 100; 4567 int num_ops = num_max_ops; 4568 const int average_phase_length = (int)(phase_length_factor * num_chunks); 4569 int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4570 bool return_phase = true; 4571 while (num_ops > 0) { 4572 int chunks_moved = 0; 4573 if (return_phase) { 4574 // Randomly switch between returning a single chunk or a random length chunk list. 4575 if (os::random() % 2 == 0) { 4576 if (return_single_random_chunk_to_chunkmanager()) { 4577 chunks_moved = 1; 4578 } 4579 } else { 4580 const int list_length = MAX2(1, (os::random() % num_ops_until_switch)); 4581 chunks_moved = return_random_chunk_list_to_chunkmanager(list_length); 4582 } 4583 } else { 4584 // Breath out. 4585 if (take_single_random_chunk_from_chunkmanager()) { 4586 chunks_moved = 1; 4587 } 4588 } 4589 num_ops -= chunks_moved; 4590 num_ops_until_switch -= chunks_moved; 4591 if (chunks_moved == 0 || num_ops_until_switch <= 0) { 4592 return_phase = !return_phase; 4593 num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4594 } 4595 } 4596 } 4597 }; 4598 4599 void* setup_chunkmanager_returntests() { 4600 ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl(); 4601 return p; 4602 } 4603 4604 void teardown_chunkmanager_returntests(void* p) { 4605 delete (ChunkManagerReturnTestImpl*) p; 4606 } 4607 4608 void run_chunkmanager_returntests(void* p, float phase_length) { 4609 ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p; 4610 test->do_test(phase_length); 4611 } 4612 4613 // The following test is placed here instead of a gtest / unittest file 4614 // because the ChunkManager class is only available in this file. 4615 class SpaceManagerTest : AllStatic { 4616 friend void SpaceManager_test_adjust_initial_chunk_size(); 4617 4618 static void test_adjust_initial_chunk_size(bool is_class) { 4619 const size_t smallest = SpaceManager::smallest_chunk_size(is_class); 4620 const size_t normal = SpaceManager::small_chunk_size(is_class); 4621 const size_t medium = SpaceManager::medium_chunk_size(is_class); 4622 4623 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \ 4624 do { \ 4625 size_t v = value; \ 4626 size_t e = expected; \ 4627 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \ 4628 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \ 4629 } while (0) 4630 4631 // Smallest (specialized) 4632 test_adjust_initial_chunk_size(1, smallest, is_class); 4633 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class); 4634 test_adjust_initial_chunk_size(smallest, smallest, is_class); 4635 4636 // Small 4637 test_adjust_initial_chunk_size(smallest + 1, normal, is_class); 4638 test_adjust_initial_chunk_size(normal - 1, normal, is_class); 4639 test_adjust_initial_chunk_size(normal, normal, is_class); 4640 4641 // Medium 4642 test_adjust_initial_chunk_size(normal + 1, medium, is_class); 4643 test_adjust_initial_chunk_size(medium - 1, medium, is_class); 4644 test_adjust_initial_chunk_size(medium, medium, is_class); 4645 4646 // Humongous 4647 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class); 4648 4649 #undef test_adjust_initial_chunk_size 4650 } 4651 4652 static void test_adjust_initial_chunk_size() { 4653 test_adjust_initial_chunk_size(false); 4654 test_adjust_initial_chunk_size(true); 4655 } 4656 }; 4657 4658 void SpaceManager_test_adjust_initial_chunk_size() { 4659 SpaceManagerTest::test_adjust_initial_chunk_size(); 4660 } 4661 4662 #endif // ASSERT --- EOF ---