1 /* 2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "aot/aotLoader.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectorPolicy.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "logging/log.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/binaryTreeDictionary.hpp" 32 #include "memory/filemap.hpp" 33 #include "memory/freeList.hpp" 34 #include "memory/metachunk.hpp" 35 #include "memory/metaspace.hpp" 36 #include "memory/metaspaceGCThresholdUpdater.hpp" 37 #include "memory/metaspaceShared.hpp" 38 #include "memory/metaspaceTracer.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "runtime/atomic.hpp" 42 #include "runtime/globals.hpp" 43 #include "runtime/init.hpp" 44 #include "runtime/java.hpp" 45 #include "runtime/mutex.hpp" 46 #include "runtime/orderAccess.inline.hpp" 47 #include "services/memTracker.hpp" 48 #include "services/memoryService.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/debug.hpp" 51 #include "utilities/macros.hpp" 52 53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 55 56 // Set this constant to enable slow integrity checking of the free chunk lists 57 const bool metaspace_slow_verify = false; 58 59 size_t const allocation_from_dictionary_limit = 4 * K; 60 61 MetaWord* last_allocated = 0; 62 63 size_t Metaspace::_compressed_class_space_size; 64 const MetaspaceTracer* Metaspace::_tracer = NULL; 65 66 // Used in declarations in SpaceManager and ChunkManager 67 enum ChunkIndex { 68 ZeroIndex = 0, 69 SpecializedIndex = ZeroIndex, 70 SmallIndex = SpecializedIndex + 1, 71 MediumIndex = SmallIndex + 1, 72 HumongousIndex = MediumIndex + 1, 73 NumberOfFreeLists = 3, 74 NumberOfInUseLists = 4 75 }; 76 77 enum ChunkSizes { // in words. 78 ClassSpecializedChunk = 128, 79 SpecializedChunk = 128, 80 ClassSmallChunk = 256, 81 SmallChunk = 512, 82 ClassMediumChunk = 4 * K, 83 MediumChunk = 8 * K 84 }; 85 86 static ChunkIndex next_chunk_index(ChunkIndex i) { 87 assert(i < NumberOfInUseLists, "Out of bound"); 88 return (ChunkIndex) (i+1); 89 } 90 91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 92 uint MetaspaceGC::_shrink_factor = 0; 93 bool MetaspaceGC::_should_concurrent_collect = false; 94 95 typedef class FreeList<Metachunk> ChunkList; 96 97 // Manages the global free lists of chunks. 98 class ChunkManager : public CHeapObj<mtInternal> { 99 friend class TestVirtualSpaceNodeTest; 100 101 // Free list of chunks of different sizes. 102 // SpecializedChunk 103 // SmallChunk 104 // MediumChunk 105 // HumongousChunk 106 ChunkList _free_chunks[NumberOfFreeLists]; 107 108 // HumongousChunk 109 ChunkTreeDictionary _humongous_dictionary; 110 111 // ChunkManager in all lists of this type 112 size_t _free_chunks_total; 113 size_t _free_chunks_count; 114 115 void dec_free_chunks_total(size_t v) { 116 assert(_free_chunks_count > 0 && 117 _free_chunks_total > 0, 118 "About to go negative"); 119 Atomic::add_ptr(-1, &_free_chunks_count); 120 jlong minus_v = (jlong) - (jlong) v; 121 Atomic::add_ptr(minus_v, &_free_chunks_total); 122 } 123 124 // Debug support 125 126 size_t sum_free_chunks(); 127 size_t sum_free_chunks_count(); 128 129 void locked_verify_free_chunks_total(); 130 void slow_locked_verify_free_chunks_total() { 131 if (metaspace_slow_verify) { 132 locked_verify_free_chunks_total(); 133 } 134 } 135 void locked_verify_free_chunks_count(); 136 void slow_locked_verify_free_chunks_count() { 137 if (metaspace_slow_verify) { 138 locked_verify_free_chunks_count(); 139 } 140 } 141 void verify_free_chunks_count(); 142 143 public: 144 145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 146 : _free_chunks_total(0), _free_chunks_count(0) { 147 _free_chunks[SpecializedIndex].set_size(specialized_size); 148 _free_chunks[SmallIndex].set_size(small_size); 149 _free_chunks[MediumIndex].set_size(medium_size); 150 } 151 152 // add or delete (return) a chunk to the global freelist. 153 Metachunk* chunk_freelist_allocate(size_t word_size); 154 155 // Map a size to a list index assuming that there are lists 156 // for special, small, medium, and humongous chunks. 157 static ChunkIndex list_index(size_t size); 158 159 // Remove the chunk from its freelist. It is 160 // expected to be on one of the _free_chunks[] lists. 161 void remove_chunk(Metachunk* chunk); 162 163 // Add the simple linked list of chunks to the freelist of chunks 164 // of type index. 165 void return_chunks(ChunkIndex index, Metachunk* chunks); 166 167 // Total of the space in the free chunks list 168 size_t free_chunks_total_words(); 169 size_t free_chunks_total_bytes(); 170 171 // Number of chunks in the free chunks list 172 size_t free_chunks_count(); 173 174 void inc_free_chunks_total(size_t v, size_t count = 1) { 175 Atomic::add_ptr(count, &_free_chunks_count); 176 Atomic::add_ptr(v, &_free_chunks_total); 177 } 178 ChunkTreeDictionary* humongous_dictionary() { 179 return &_humongous_dictionary; 180 } 181 182 ChunkList* free_chunks(ChunkIndex index); 183 184 // Returns the list for the given chunk word size. 185 ChunkList* find_free_chunks_list(size_t word_size); 186 187 // Remove from a list by size. Selects list based on size of chunk. 188 Metachunk* free_chunks_get(size_t chunk_word_size); 189 190 #define index_bounds_check(index) \ 191 assert(index == SpecializedIndex || \ 192 index == SmallIndex || \ 193 index == MediumIndex || \ 194 index == HumongousIndex, "Bad index: %d", (int) index) 195 196 size_t num_free_chunks(ChunkIndex index) const { 197 index_bounds_check(index); 198 199 if (index == HumongousIndex) { 200 return _humongous_dictionary.total_free_blocks(); 201 } 202 203 ssize_t count = _free_chunks[index].count(); 204 return count == -1 ? 0 : (size_t) count; 205 } 206 207 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 208 index_bounds_check(index); 209 210 size_t word_size = 0; 211 if (index == HumongousIndex) { 212 word_size = _humongous_dictionary.total_size(); 213 } else { 214 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 215 word_size = size_per_chunk_in_words * num_free_chunks(index); 216 } 217 218 return word_size * BytesPerWord; 219 } 220 221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 223 num_free_chunks(SmallIndex), 224 num_free_chunks(MediumIndex), 225 num_free_chunks(HumongousIndex), 226 size_free_chunks_in_bytes(SpecializedIndex), 227 size_free_chunks_in_bytes(SmallIndex), 228 size_free_chunks_in_bytes(MediumIndex), 229 size_free_chunks_in_bytes(HumongousIndex)); 230 } 231 232 // Debug support 233 void verify(); 234 void slow_verify() { 235 if (metaspace_slow_verify) { 236 verify(); 237 } 238 } 239 void locked_verify(); 240 void slow_locked_verify() { 241 if (metaspace_slow_verify) { 242 locked_verify(); 243 } 244 } 245 void verify_free_chunks_total(); 246 247 void locked_print_free_chunks(outputStream* st); 248 void locked_print_sum_free_chunks(outputStream* st); 249 250 void print_on(outputStream* st) const; 251 }; 252 253 class SmallBlocks : public CHeapObj<mtClass> { 254 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 255 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 256 257 private: 258 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 259 260 FreeList<Metablock>& list_at(size_t word_size) { 261 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 262 return _small_lists[word_size - _small_block_min_size]; 263 } 264 265 public: 266 SmallBlocks() { 267 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 268 uint k = i - _small_block_min_size; 269 _small_lists[k].set_size(i); 270 } 271 } 272 273 size_t total_size() const { 274 size_t result = 0; 275 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 276 uint k = i - _small_block_min_size; 277 result = result + _small_lists[k].count() * _small_lists[k].size(); 278 } 279 return result; 280 } 281 282 static uint small_block_max_size() { return _small_block_max_size; } 283 static uint small_block_min_size() { return _small_block_min_size; } 284 285 MetaWord* get_block(size_t word_size) { 286 if (list_at(word_size).count() > 0) { 287 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 288 return new_block; 289 } else { 290 return NULL; 291 } 292 } 293 void return_block(Metablock* free_chunk, size_t word_size) { 294 list_at(word_size).return_chunk_at_head(free_chunk, false); 295 assert(list_at(word_size).count() > 0, "Should have a chunk"); 296 } 297 298 void print_on(outputStream* st) const { 299 st->print_cr("SmallBlocks:"); 300 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 301 uint k = i - _small_block_min_size; 302 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 303 } 304 } 305 }; 306 307 // Used to manage the free list of Metablocks (a block corresponds 308 // to the allocation of a quantum of metadata). 309 class BlockFreelist : public CHeapObj<mtClass> { 310 BlockTreeDictionary* const _dictionary; 311 SmallBlocks* _small_blocks; 312 313 // Only allocate and split from freelist if the size of the allocation 314 // is at least 1/4th the size of the available block. 315 const static int WasteMultiplier = 4; 316 317 // Accessors 318 BlockTreeDictionary* dictionary() const { return _dictionary; } 319 SmallBlocks* small_blocks() { 320 if (_small_blocks == NULL) { 321 _small_blocks = new SmallBlocks(); 322 } 323 return _small_blocks; 324 } 325 326 public: 327 BlockFreelist(); 328 ~BlockFreelist(); 329 330 // Get and return a block to the free list 331 MetaWord* get_block(size_t word_size); 332 void return_block(MetaWord* p, size_t word_size); 333 334 size_t total_size() const { 335 size_t result = dictionary()->total_size(); 336 if (_small_blocks != NULL) { 337 result = result + _small_blocks->total_size(); 338 } 339 return result; 340 } 341 342 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 343 void print_on(outputStream* st) const; 344 }; 345 346 // A VirtualSpaceList node. 347 class VirtualSpaceNode : public CHeapObj<mtClass> { 348 friend class VirtualSpaceList; 349 350 // Link to next VirtualSpaceNode 351 VirtualSpaceNode* _next; 352 353 // total in the VirtualSpace 354 MemRegion _reserved; 355 ReservedSpace _rs; 356 VirtualSpace _virtual_space; 357 MetaWord* _top; 358 // count of chunks contained in this VirtualSpace 359 uintx _container_count; 360 361 // Convenience functions to access the _virtual_space 362 char* low() const { return virtual_space()->low(); } 363 char* high() const { return virtual_space()->high(); } 364 365 // The first Metachunk will be allocated at the bottom of the 366 // VirtualSpace 367 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 368 369 // Committed but unused space in the virtual space 370 size_t free_words_in_vs() const; 371 public: 372 373 VirtualSpaceNode(size_t byte_size); 374 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 375 ~VirtualSpaceNode(); 376 377 // Convenience functions for logical bottom and end 378 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 379 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 380 381 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 382 383 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 384 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 385 386 bool is_pre_committed() const { return _virtual_space.special(); } 387 388 // address of next available space in _virtual_space; 389 // Accessors 390 VirtualSpaceNode* next() { return _next; } 391 void set_next(VirtualSpaceNode* v) { _next = v; } 392 393 void set_reserved(MemRegion const v) { _reserved = v; } 394 void set_top(MetaWord* v) { _top = v; } 395 396 // Accessors 397 MemRegion* reserved() { return &_reserved; } 398 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 399 400 // Returns true if "word_size" is available in the VirtualSpace 401 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 402 403 MetaWord* top() const { return _top; } 404 void inc_top(size_t word_size) { _top += word_size; } 405 406 uintx container_count() { return _container_count; } 407 void inc_container_count(); 408 void dec_container_count(); 409 #ifdef ASSERT 410 uintx container_count_slow(); 411 void verify_container_count(); 412 #endif 413 414 // used and capacity in this single entry in the list 415 size_t used_words_in_vs() const; 416 size_t capacity_words_in_vs() const; 417 418 bool initialize(); 419 420 // get space from the virtual space 421 Metachunk* take_from_committed(size_t chunk_word_size); 422 423 // Allocate a chunk from the virtual space and return it. 424 Metachunk* get_chunk_vs(size_t chunk_word_size); 425 426 // Expands/shrinks the committed space in a virtual space. Delegates 427 // to Virtualspace 428 bool expand_by(size_t min_words, size_t preferred_words); 429 430 // In preparation for deleting this node, remove all the chunks 431 // in the node from any freelist. 432 void purge(ChunkManager* chunk_manager); 433 434 // If an allocation doesn't fit in the current node a new node is created. 435 // Allocate chunks out of the remaining committed space in this node 436 // to avoid wasting that memory. 437 // This always adds up because all the chunk sizes are multiples of 438 // the smallest chunk size. 439 void retire(ChunkManager* chunk_manager); 440 441 #ifdef ASSERT 442 // Debug support 443 void mangle(); 444 #endif 445 446 void print_on(outputStream* st) const; 447 }; 448 449 #define assert_is_ptr_aligned(ptr, alignment) \ 450 assert(is_ptr_aligned(ptr, alignment), \ 451 PTR_FORMAT " is not aligned to " \ 452 SIZE_FORMAT, p2i(ptr), alignment) 453 454 #define assert_is_size_aligned(size, alignment) \ 455 assert(is_size_aligned(size, alignment), \ 456 SIZE_FORMAT " is not aligned to " \ 457 SIZE_FORMAT, size, alignment) 458 459 460 // Decide if large pages should be committed when the memory is reserved. 461 static bool should_commit_large_pages_when_reserving(size_t bytes) { 462 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 463 size_t words = bytes / BytesPerWord; 464 bool is_class = false; // We never reserve large pages for the class space. 465 if (MetaspaceGC::can_expand(words, is_class) && 466 MetaspaceGC::allowed_expansion() >= words) { 467 return true; 468 } 469 } 470 471 return false; 472 } 473 474 // byte_size is the size of the associated virtualspace. 475 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 476 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 477 478 #if INCLUDE_CDS 479 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 480 // configurable address, generally at the top of the Java heap so other 481 // memory addresses don't conflict. 482 if (DumpSharedSpaces) { 483 bool large_pages = false; // No large pages when dumping the CDS archive. 484 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 485 486 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 487 if (_rs.is_reserved()) { 488 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 489 } else { 490 // Get a mmap region anywhere if the SharedBaseAddress fails. 491 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 492 } 493 MetaspaceShared::initialize_shared_rs(&_rs); 494 } else 495 #endif 496 { 497 bool large_pages = should_commit_large_pages_when_reserving(bytes); 498 499 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 500 } 501 502 if (_rs.is_reserved()) { 503 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 504 assert(_rs.size() != 0, "Catch if we get a 0 size"); 505 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 506 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 507 508 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 509 } 510 } 511 512 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 513 Metachunk* chunk = first_chunk(); 514 Metachunk* invalid_chunk = (Metachunk*) top(); 515 while (chunk < invalid_chunk ) { 516 assert(chunk->is_tagged_free(), "Should be tagged free"); 517 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 518 chunk_manager->remove_chunk(chunk); 519 assert(chunk->next() == NULL && 520 chunk->prev() == NULL, 521 "Was not removed from its list"); 522 chunk = (Metachunk*) next; 523 } 524 } 525 526 #ifdef ASSERT 527 uintx VirtualSpaceNode::container_count_slow() { 528 uintx count = 0; 529 Metachunk* chunk = first_chunk(); 530 Metachunk* invalid_chunk = (Metachunk*) top(); 531 while (chunk < invalid_chunk ) { 532 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 533 // Don't count the chunks on the free lists. Those are 534 // still part of the VirtualSpaceNode but not currently 535 // counted. 536 if (!chunk->is_tagged_free()) { 537 count++; 538 } 539 chunk = (Metachunk*) next; 540 } 541 return count; 542 } 543 #endif 544 545 // List of VirtualSpaces for metadata allocation. 546 class VirtualSpaceList : public CHeapObj<mtClass> { 547 friend class VirtualSpaceNode; 548 549 enum VirtualSpaceSizes { 550 VirtualSpaceSize = 256 * K 551 }; 552 553 // Head of the list 554 VirtualSpaceNode* _virtual_space_list; 555 // virtual space currently being used for allocations 556 VirtualSpaceNode* _current_virtual_space; 557 558 // Is this VirtualSpaceList used for the compressed class space 559 bool _is_class; 560 561 // Sum of reserved and committed memory in the virtual spaces 562 size_t _reserved_words; 563 size_t _committed_words; 564 565 // Number of virtual spaces 566 size_t _virtual_space_count; 567 568 ~VirtualSpaceList(); 569 570 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 571 572 void set_virtual_space_list(VirtualSpaceNode* v) { 573 _virtual_space_list = v; 574 } 575 void set_current_virtual_space(VirtualSpaceNode* v) { 576 _current_virtual_space = v; 577 } 578 579 void link_vs(VirtualSpaceNode* new_entry); 580 581 // Get another virtual space and add it to the list. This 582 // is typically prompted by a failed attempt to allocate a chunk 583 // and is typically followed by the allocation of a chunk. 584 bool create_new_virtual_space(size_t vs_word_size); 585 586 // Chunk up the unused committed space in the current 587 // virtual space and add the chunks to the free list. 588 void retire_current_virtual_space(); 589 590 public: 591 VirtualSpaceList(size_t word_size); 592 VirtualSpaceList(ReservedSpace rs); 593 594 size_t free_bytes(); 595 596 Metachunk* get_new_chunk(size_t word_size, 597 size_t grow_chunks_by_words, 598 size_t medium_chunk_bunch); 599 600 bool expand_node_by(VirtualSpaceNode* node, 601 size_t min_words, 602 size_t preferred_words); 603 604 bool expand_by(size_t min_words, 605 size_t preferred_words); 606 607 VirtualSpaceNode* current_virtual_space() { 608 return _current_virtual_space; 609 } 610 611 bool is_class() const { return _is_class; } 612 613 bool initialization_succeeded() { return _virtual_space_list != NULL; } 614 615 size_t reserved_words() { return _reserved_words; } 616 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 617 size_t committed_words() { return _committed_words; } 618 size_t committed_bytes() { return committed_words() * BytesPerWord; } 619 620 void inc_reserved_words(size_t v); 621 void dec_reserved_words(size_t v); 622 void inc_committed_words(size_t v); 623 void dec_committed_words(size_t v); 624 void inc_virtual_space_count(); 625 void dec_virtual_space_count(); 626 627 bool contains(const void* ptr); 628 629 // Unlink empty VirtualSpaceNodes and free it. 630 void purge(ChunkManager* chunk_manager); 631 632 void print_on(outputStream* st) const; 633 634 class VirtualSpaceListIterator : public StackObj { 635 VirtualSpaceNode* _virtual_spaces; 636 public: 637 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 638 _virtual_spaces(virtual_spaces) {} 639 640 bool repeat() { 641 return _virtual_spaces != NULL; 642 } 643 644 VirtualSpaceNode* get_next() { 645 VirtualSpaceNode* result = _virtual_spaces; 646 if (_virtual_spaces != NULL) { 647 _virtual_spaces = _virtual_spaces->next(); 648 } 649 return result; 650 } 651 }; 652 }; 653 654 class Metadebug : AllStatic { 655 // Debugging support for Metaspaces 656 static int _allocation_fail_alot_count; 657 658 public: 659 660 static void init_allocation_fail_alot_count(); 661 #ifdef ASSERT 662 static bool test_metadata_failure(); 663 #endif 664 }; 665 666 int Metadebug::_allocation_fail_alot_count = 0; 667 668 // SpaceManager - used by Metaspace to handle allocations 669 class SpaceManager : public CHeapObj<mtClass> { 670 friend class Metaspace; 671 friend class Metadebug; 672 673 private: 674 675 // protects allocations 676 Mutex* const _lock; 677 678 // Type of metadata allocated. 679 Metaspace::MetadataType _mdtype; 680 681 // List of chunks in use by this SpaceManager. Allocations 682 // are done from the current chunk. The list is used for deallocating 683 // chunks when the SpaceManager is freed. 684 Metachunk* _chunks_in_use[NumberOfInUseLists]; 685 Metachunk* _current_chunk; 686 687 // Maximum number of small chunks to allocate to a SpaceManager 688 static uint const _small_chunk_limit; 689 690 // Sum of all space in allocated chunks 691 size_t _allocated_blocks_words; 692 693 // Sum of all allocated chunks 694 size_t _allocated_chunks_words; 695 size_t _allocated_chunks_count; 696 697 // Free lists of blocks are per SpaceManager since they 698 // are assumed to be in chunks in use by the SpaceManager 699 // and all chunks in use by a SpaceManager are freed when 700 // the class loader using the SpaceManager is collected. 701 BlockFreelist* _block_freelists; 702 703 // protects virtualspace and chunk expansions 704 static const char* _expand_lock_name; 705 static const int _expand_lock_rank; 706 static Mutex* const _expand_lock; 707 708 private: 709 // Accessors 710 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 711 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 712 _chunks_in_use[index] = v; 713 } 714 715 BlockFreelist* block_freelists() const { return _block_freelists; } 716 717 Metaspace::MetadataType mdtype() { return _mdtype; } 718 719 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 720 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 721 722 Metachunk* current_chunk() const { return _current_chunk; } 723 void set_current_chunk(Metachunk* v) { 724 _current_chunk = v; 725 } 726 727 Metachunk* find_current_chunk(size_t word_size); 728 729 // Add chunk to the list of chunks in use 730 void add_chunk(Metachunk* v, bool make_current); 731 void retire_current_chunk(); 732 733 Mutex* lock() const { return _lock; } 734 735 const char* chunk_size_name(ChunkIndex index) const; 736 737 protected: 738 void initialize(); 739 740 public: 741 SpaceManager(Metaspace::MetadataType mdtype, 742 Mutex* lock); 743 ~SpaceManager(); 744 745 enum ChunkMultiples { 746 MediumChunkMultiple = 4 747 }; 748 749 bool is_class() { return _mdtype == Metaspace::ClassType; } 750 751 // Accessors 752 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 753 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 754 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 755 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 756 757 size_t smallest_chunk_size() { return specialized_chunk_size(); } 758 759 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 760 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 761 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 762 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 763 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 764 765 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 766 767 static Mutex* expand_lock() { return _expand_lock; } 768 769 // Increment the per Metaspace and global running sums for Metachunks 770 // by the given size. This is used when a Metachunk to added to 771 // the in-use list. 772 void inc_size_metrics(size_t words); 773 // Increment the per Metaspace and global running sums Metablocks by the given 774 // size. This is used when a Metablock is allocated. 775 void inc_used_metrics(size_t words); 776 // Delete the portion of the running sums for this SpaceManager. That is, 777 // the globals running sums for the Metachunks and Metablocks are 778 // decremented for all the Metachunks in-use by this SpaceManager. 779 void dec_total_from_size_metrics(); 780 781 // Set the sizes for the initial chunks. 782 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 783 size_t* chunk_word_size, 784 size_t* class_chunk_word_size); 785 786 size_t sum_capacity_in_chunks_in_use() const; 787 size_t sum_used_in_chunks_in_use() const; 788 size_t sum_free_in_chunks_in_use() const; 789 size_t sum_waste_in_chunks_in_use() const; 790 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 791 792 size_t sum_count_in_chunks_in_use(); 793 size_t sum_count_in_chunks_in_use(ChunkIndex i); 794 795 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 796 797 // Block allocation and deallocation. 798 // Allocates a block from the current chunk 799 MetaWord* allocate(size_t word_size); 800 // Allocates a block from a small chunk 801 MetaWord* get_small_chunk_and_allocate(size_t word_size); 802 803 // Helper for allocations 804 MetaWord* allocate_work(size_t word_size); 805 806 // Returns a block to the per manager freelist 807 void deallocate(MetaWord* p, size_t word_size); 808 809 // Based on the allocation size and a minimum chunk size, 810 // returned chunk size (for expanding space for chunk allocation). 811 size_t calc_chunk_size(size_t allocation_word_size); 812 813 // Called when an allocation from the current chunk fails. 814 // Gets a new chunk (may require getting a new virtual space), 815 // and allocates from that chunk. 816 MetaWord* grow_and_allocate(size_t word_size); 817 818 // Notify memory usage to MemoryService. 819 void track_metaspace_memory_usage(); 820 821 // debugging support. 822 823 void dump(outputStream* const out) const; 824 void print_on(outputStream* st) const; 825 void locked_print_chunks_in_use_on(outputStream* st) const; 826 827 void verify(); 828 void verify_chunk_size(Metachunk* chunk); 829 #ifdef ASSERT 830 void verify_allocated_blocks_words(); 831 #endif 832 833 // This adjusts the size given to be greater than the minimum allocation size in 834 // words for data in metaspace. Esentially the minimum size is currently 3 words. 835 size_t get_allocation_word_size(size_t word_size) { 836 size_t byte_size = word_size * BytesPerWord; 837 838 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 839 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 840 841 size_t raw_word_size = raw_bytes_size / BytesPerWord; 842 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 843 844 return raw_word_size; 845 } 846 }; 847 848 uint const SpaceManager::_small_chunk_limit = 4; 849 850 const char* SpaceManager::_expand_lock_name = 851 "SpaceManager chunk allocation lock"; 852 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 853 Mutex* const SpaceManager::_expand_lock = 854 new Mutex(SpaceManager::_expand_lock_rank, 855 SpaceManager::_expand_lock_name, 856 Mutex::_allow_vm_block_flag, 857 Monitor::_safepoint_check_never); 858 859 void VirtualSpaceNode::inc_container_count() { 860 assert_lock_strong(SpaceManager::expand_lock()); 861 _container_count++; 862 } 863 864 void VirtualSpaceNode::dec_container_count() { 865 assert_lock_strong(SpaceManager::expand_lock()); 866 _container_count--; 867 } 868 869 #ifdef ASSERT 870 void VirtualSpaceNode::verify_container_count() { 871 assert(_container_count == container_count_slow(), 872 "Inconsistency in container_count _container_count " UINTX_FORMAT 873 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 874 } 875 #endif 876 877 // BlockFreelist methods 878 879 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 880 881 BlockFreelist::~BlockFreelist() { 882 delete _dictionary; 883 if (_small_blocks != NULL) { 884 delete _small_blocks; 885 } 886 } 887 888 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 889 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 890 891 Metablock* free_chunk = ::new (p) Metablock(word_size); 892 if (word_size < SmallBlocks::small_block_max_size()) { 893 small_blocks()->return_block(free_chunk, word_size); 894 } else { 895 dictionary()->return_chunk(free_chunk); 896 } 897 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 898 SIZE_FORMAT, p2i(free_chunk), word_size); 899 } 900 901 MetaWord* BlockFreelist::get_block(size_t word_size) { 902 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 903 904 // Try small_blocks first. 905 if (word_size < SmallBlocks::small_block_max_size()) { 906 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 907 // this space manager. 908 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 909 if (new_block != NULL) { 910 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 911 p2i(new_block), word_size); 912 return new_block; 913 } 914 } 915 916 if (word_size < BlockFreelist::min_dictionary_size()) { 917 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 918 return NULL; 919 } 920 921 Metablock* free_block = 922 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 923 if (free_block == NULL) { 924 return NULL; 925 } 926 927 const size_t block_size = free_block->size(); 928 if (block_size > WasteMultiplier * word_size) { 929 return_block((MetaWord*)free_block, block_size); 930 return NULL; 931 } 932 933 MetaWord* new_block = (MetaWord*)free_block; 934 assert(block_size >= word_size, "Incorrect size of block from freelist"); 935 const size_t unused = block_size - word_size; 936 if (unused >= SmallBlocks::small_block_min_size()) { 937 return_block(new_block + word_size, unused); 938 } 939 940 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 941 p2i(new_block), word_size); 942 return new_block; 943 } 944 945 void BlockFreelist::print_on(outputStream* st) const { 946 dictionary()->print_free_lists(st); 947 if (_small_blocks != NULL) { 948 _small_blocks->print_on(st); 949 } 950 } 951 952 // VirtualSpaceNode methods 953 954 VirtualSpaceNode::~VirtualSpaceNode() { 955 _rs.release(); 956 #ifdef ASSERT 957 size_t word_size = sizeof(*this) / BytesPerWord; 958 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 959 #endif 960 } 961 962 size_t VirtualSpaceNode::used_words_in_vs() const { 963 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 964 } 965 966 // Space committed in the VirtualSpace 967 size_t VirtualSpaceNode::capacity_words_in_vs() const { 968 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 969 } 970 971 size_t VirtualSpaceNode::free_words_in_vs() const { 972 return pointer_delta(end(), top(), sizeof(MetaWord)); 973 } 974 975 // Allocates the chunk from the virtual space only. 976 // This interface is also used internally for debugging. Not all 977 // chunks removed here are necessarily used for allocation. 978 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 979 // Bottom of the new chunk 980 MetaWord* chunk_limit = top(); 981 assert(chunk_limit != NULL, "Not safe to call this method"); 982 983 // The virtual spaces are always expanded by the 984 // commit granularity to enforce the following condition. 985 // Without this the is_available check will not work correctly. 986 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 987 "The committed memory doesn't match the expanded memory."); 988 989 if (!is_available(chunk_word_size)) { 990 Log(gc, metaspace, freelist) log; 991 log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 992 // Dump some information about the virtual space that is nearly full 993 ResourceMark rm; 994 print_on(log.debug_stream()); 995 return NULL; 996 } 997 998 // Take the space (bump top on the current virtual space). 999 inc_top(chunk_word_size); 1000 1001 // Initialize the chunk 1002 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1003 return result; 1004 } 1005 1006 1007 // Expand the virtual space (commit more of the reserved space) 1008 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1009 size_t min_bytes = min_words * BytesPerWord; 1010 size_t preferred_bytes = preferred_words * BytesPerWord; 1011 1012 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1013 1014 if (uncommitted < min_bytes) { 1015 return false; 1016 } 1017 1018 size_t commit = MIN2(preferred_bytes, uncommitted); 1019 bool result = virtual_space()->expand_by(commit, false); 1020 1021 assert(result, "Failed to commit memory"); 1022 1023 return result; 1024 } 1025 1026 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1027 assert_lock_strong(SpaceManager::expand_lock()); 1028 Metachunk* result = take_from_committed(chunk_word_size); 1029 if (result != NULL) { 1030 inc_container_count(); 1031 } 1032 return result; 1033 } 1034 1035 bool VirtualSpaceNode::initialize() { 1036 1037 if (!_rs.is_reserved()) { 1038 return false; 1039 } 1040 1041 // These are necessary restriction to make sure that the virtual space always 1042 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1043 // aligned only the middle alignment of the VirtualSpace is used. 1044 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 1045 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 1046 1047 // ReservedSpaces marked as special will have the entire memory 1048 // pre-committed. Setting a committed size will make sure that 1049 // committed_size and actual_committed_size agrees. 1050 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1051 1052 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1053 Metaspace::commit_alignment()); 1054 if (result) { 1055 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1056 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1057 1058 set_top((MetaWord*)virtual_space()->low()); 1059 set_reserved(MemRegion((HeapWord*)_rs.base(), 1060 (HeapWord*)(_rs.base() + _rs.size()))); 1061 1062 assert(reserved()->start() == (HeapWord*) _rs.base(), 1063 "Reserved start was not set properly " PTR_FORMAT 1064 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1065 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1066 "Reserved size was not set properly " SIZE_FORMAT 1067 " != " SIZE_FORMAT, reserved()->word_size(), 1068 _rs.size() / BytesPerWord); 1069 } 1070 1071 return result; 1072 } 1073 1074 void VirtualSpaceNode::print_on(outputStream* st) const { 1075 size_t used = used_words_in_vs(); 1076 size_t capacity = capacity_words_in_vs(); 1077 VirtualSpace* vs = virtual_space(); 1078 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1079 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1080 PTR_FORMAT ", " PTR_FORMAT ")", 1081 p2i(vs), capacity / K, 1082 capacity == 0 ? 0 : used * 100 / capacity, 1083 p2i(bottom()), p2i(top()), p2i(end()), 1084 p2i(vs->high_boundary())); 1085 } 1086 1087 #ifdef ASSERT 1088 void VirtualSpaceNode::mangle() { 1089 size_t word_size = capacity_words_in_vs(); 1090 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1091 } 1092 #endif // ASSERT 1093 1094 // VirtualSpaceList methods 1095 // Space allocated from the VirtualSpace 1096 1097 VirtualSpaceList::~VirtualSpaceList() { 1098 VirtualSpaceListIterator iter(virtual_space_list()); 1099 while (iter.repeat()) { 1100 VirtualSpaceNode* vsl = iter.get_next(); 1101 delete vsl; 1102 } 1103 } 1104 1105 void VirtualSpaceList::inc_reserved_words(size_t v) { 1106 assert_lock_strong(SpaceManager::expand_lock()); 1107 _reserved_words = _reserved_words + v; 1108 } 1109 void VirtualSpaceList::dec_reserved_words(size_t v) { 1110 assert_lock_strong(SpaceManager::expand_lock()); 1111 _reserved_words = _reserved_words - v; 1112 } 1113 1114 #define assert_committed_below_limit() \ 1115 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1116 "Too much committed memory. Committed: " SIZE_FORMAT \ 1117 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1118 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1119 1120 void VirtualSpaceList::inc_committed_words(size_t v) { 1121 assert_lock_strong(SpaceManager::expand_lock()); 1122 _committed_words = _committed_words + v; 1123 1124 assert_committed_below_limit(); 1125 } 1126 void VirtualSpaceList::dec_committed_words(size_t v) { 1127 assert_lock_strong(SpaceManager::expand_lock()); 1128 _committed_words = _committed_words - v; 1129 1130 assert_committed_below_limit(); 1131 } 1132 1133 void VirtualSpaceList::inc_virtual_space_count() { 1134 assert_lock_strong(SpaceManager::expand_lock()); 1135 _virtual_space_count++; 1136 } 1137 void VirtualSpaceList::dec_virtual_space_count() { 1138 assert_lock_strong(SpaceManager::expand_lock()); 1139 _virtual_space_count--; 1140 } 1141 1142 void ChunkManager::remove_chunk(Metachunk* chunk) { 1143 size_t word_size = chunk->word_size(); 1144 ChunkIndex index = list_index(word_size); 1145 if (index != HumongousIndex) { 1146 free_chunks(index)->remove_chunk(chunk); 1147 } else { 1148 humongous_dictionary()->remove_chunk(chunk); 1149 } 1150 1151 // Chunk is being removed from the chunks free list. 1152 dec_free_chunks_total(chunk->word_size()); 1153 } 1154 1155 // Walk the list of VirtualSpaceNodes and delete 1156 // nodes with a 0 container_count. Remove Metachunks in 1157 // the node from their respective freelists. 1158 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1159 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1160 assert_lock_strong(SpaceManager::expand_lock()); 1161 // Don't use a VirtualSpaceListIterator because this 1162 // list is being changed and a straightforward use of an iterator is not safe. 1163 VirtualSpaceNode* purged_vsl = NULL; 1164 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1165 VirtualSpaceNode* next_vsl = prev_vsl; 1166 while (next_vsl != NULL) { 1167 VirtualSpaceNode* vsl = next_vsl; 1168 DEBUG_ONLY(vsl->verify_container_count();) 1169 next_vsl = vsl->next(); 1170 // Don't free the current virtual space since it will likely 1171 // be needed soon. 1172 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1173 // Unlink it from the list 1174 if (prev_vsl == vsl) { 1175 // This is the case of the current node being the first node. 1176 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1177 set_virtual_space_list(vsl->next()); 1178 } else { 1179 prev_vsl->set_next(vsl->next()); 1180 } 1181 1182 vsl->purge(chunk_manager); 1183 dec_reserved_words(vsl->reserved_words()); 1184 dec_committed_words(vsl->committed_words()); 1185 dec_virtual_space_count(); 1186 purged_vsl = vsl; 1187 delete vsl; 1188 } else { 1189 prev_vsl = vsl; 1190 } 1191 } 1192 #ifdef ASSERT 1193 if (purged_vsl != NULL) { 1194 // List should be stable enough to use an iterator here. 1195 VirtualSpaceListIterator iter(virtual_space_list()); 1196 while (iter.repeat()) { 1197 VirtualSpaceNode* vsl = iter.get_next(); 1198 assert(vsl != purged_vsl, "Purge of vsl failed"); 1199 } 1200 } 1201 #endif 1202 } 1203 1204 1205 // This function looks at the mmap regions in the metaspace without locking. 1206 // The chunks are added with store ordering and not deleted except for at 1207 // unloading time during a safepoint. 1208 bool VirtualSpaceList::contains(const void* ptr) { 1209 // List should be stable enough to use an iterator here because removing virtual 1210 // space nodes is only allowed at a safepoint. 1211 VirtualSpaceListIterator iter(virtual_space_list()); 1212 while (iter.repeat()) { 1213 VirtualSpaceNode* vsn = iter.get_next(); 1214 if (vsn->contains(ptr)) { 1215 return true; 1216 } 1217 } 1218 return false; 1219 } 1220 1221 void VirtualSpaceList::retire_current_virtual_space() { 1222 assert_lock_strong(SpaceManager::expand_lock()); 1223 1224 VirtualSpaceNode* vsn = current_virtual_space(); 1225 1226 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1227 Metaspace::chunk_manager_metadata(); 1228 1229 vsn->retire(cm); 1230 } 1231 1232 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1233 DEBUG_ONLY(verify_container_count();) 1234 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1235 ChunkIndex index = (ChunkIndex)i; 1236 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1237 1238 while (free_words_in_vs() >= chunk_size) { 1239 Metachunk* chunk = get_chunk_vs(chunk_size); 1240 assert(chunk != NULL, "allocation should have been successful"); 1241 1242 chunk_manager->return_chunks(index, chunk); 1243 chunk_manager->inc_free_chunks_total(chunk_size); 1244 } 1245 DEBUG_ONLY(verify_container_count();) 1246 } 1247 assert(free_words_in_vs() == 0, "should be empty now"); 1248 } 1249 1250 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1251 _is_class(false), 1252 _virtual_space_list(NULL), 1253 _current_virtual_space(NULL), 1254 _reserved_words(0), 1255 _committed_words(0), 1256 _virtual_space_count(0) { 1257 MutexLockerEx cl(SpaceManager::expand_lock(), 1258 Mutex::_no_safepoint_check_flag); 1259 create_new_virtual_space(word_size); 1260 } 1261 1262 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1263 _is_class(true), 1264 _virtual_space_list(NULL), 1265 _current_virtual_space(NULL), 1266 _reserved_words(0), 1267 _committed_words(0), 1268 _virtual_space_count(0) { 1269 MutexLockerEx cl(SpaceManager::expand_lock(), 1270 Mutex::_no_safepoint_check_flag); 1271 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1272 bool succeeded = class_entry->initialize(); 1273 if (succeeded) { 1274 link_vs(class_entry); 1275 } 1276 } 1277 1278 size_t VirtualSpaceList::free_bytes() { 1279 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1280 } 1281 1282 // Allocate another meta virtual space and add it to the list. 1283 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1284 assert_lock_strong(SpaceManager::expand_lock()); 1285 1286 if (is_class()) { 1287 assert(false, "We currently don't support more than one VirtualSpace for" 1288 " the compressed class space. The initialization of the" 1289 " CCS uses another code path and should not hit this path."); 1290 return false; 1291 } 1292 1293 if (vs_word_size == 0) { 1294 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1295 return false; 1296 } 1297 1298 // Reserve the space 1299 size_t vs_byte_size = vs_word_size * BytesPerWord; 1300 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1301 1302 // Allocate the meta virtual space and initialize it. 1303 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1304 if (!new_entry->initialize()) { 1305 delete new_entry; 1306 return false; 1307 } else { 1308 assert(new_entry->reserved_words() == vs_word_size, 1309 "Reserved memory size differs from requested memory size"); 1310 // ensure lock-free iteration sees fully initialized node 1311 OrderAccess::storestore(); 1312 link_vs(new_entry); 1313 return true; 1314 } 1315 } 1316 1317 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1318 if (virtual_space_list() == NULL) { 1319 set_virtual_space_list(new_entry); 1320 } else { 1321 current_virtual_space()->set_next(new_entry); 1322 } 1323 set_current_virtual_space(new_entry); 1324 inc_reserved_words(new_entry->reserved_words()); 1325 inc_committed_words(new_entry->committed_words()); 1326 inc_virtual_space_count(); 1327 #ifdef ASSERT 1328 new_entry->mangle(); 1329 #endif 1330 if (log_is_enabled(Trace, gc, metaspace)) { 1331 Log(gc, metaspace) log; 1332 VirtualSpaceNode* vsl = current_virtual_space(); 1333 ResourceMark rm; 1334 vsl->print_on(log.trace_stream()); 1335 } 1336 } 1337 1338 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1339 size_t min_words, 1340 size_t preferred_words) { 1341 size_t before = node->committed_words(); 1342 1343 bool result = node->expand_by(min_words, preferred_words); 1344 1345 size_t after = node->committed_words(); 1346 1347 // after and before can be the same if the memory was pre-committed. 1348 assert(after >= before, "Inconsistency"); 1349 inc_committed_words(after - before); 1350 1351 return result; 1352 } 1353 1354 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1355 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1356 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1357 assert(min_words <= preferred_words, "Invalid arguments"); 1358 1359 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1360 return false; 1361 } 1362 1363 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1364 if (allowed_expansion_words < min_words) { 1365 return false; 1366 } 1367 1368 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1369 1370 // Commit more memory from the the current virtual space. 1371 bool vs_expanded = expand_node_by(current_virtual_space(), 1372 min_words, 1373 max_expansion_words); 1374 if (vs_expanded) { 1375 return true; 1376 } 1377 retire_current_virtual_space(); 1378 1379 // Get another virtual space. 1380 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1381 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1382 1383 if (create_new_virtual_space(grow_vs_words)) { 1384 if (current_virtual_space()->is_pre_committed()) { 1385 // The memory was pre-committed, so we are done here. 1386 assert(min_words <= current_virtual_space()->committed_words(), 1387 "The new VirtualSpace was pre-committed, so it" 1388 "should be large enough to fit the alloc request."); 1389 return true; 1390 } 1391 1392 return expand_node_by(current_virtual_space(), 1393 min_words, 1394 max_expansion_words); 1395 } 1396 1397 return false; 1398 } 1399 1400 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1401 size_t grow_chunks_by_words, 1402 size_t medium_chunk_bunch) { 1403 1404 // Allocate a chunk out of the current virtual space. 1405 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1406 1407 if (next != NULL) { 1408 return next; 1409 } 1410 1411 // The expand amount is currently only determined by the requested sizes 1412 // and not how much committed memory is left in the current virtual space. 1413 1414 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1415 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1416 if (min_word_size >= preferred_word_size) { 1417 // Can happen when humongous chunks are allocated. 1418 preferred_word_size = min_word_size; 1419 } 1420 1421 bool expanded = expand_by(min_word_size, preferred_word_size); 1422 if (expanded) { 1423 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1424 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1425 } 1426 1427 return next; 1428 } 1429 1430 void VirtualSpaceList::print_on(outputStream* st) const { 1431 VirtualSpaceListIterator iter(virtual_space_list()); 1432 while (iter.repeat()) { 1433 VirtualSpaceNode* node = iter.get_next(); 1434 node->print_on(st); 1435 } 1436 } 1437 1438 // MetaspaceGC methods 1439 1440 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1441 // Within the VM operation after the GC the attempt to allocate the metadata 1442 // should succeed. If the GC did not free enough space for the metaspace 1443 // allocation, the HWM is increased so that another virtualspace will be 1444 // allocated for the metadata. With perm gen the increase in the perm 1445 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1446 // metaspace policy uses those as the small and large steps for the HWM. 1447 // 1448 // After the GC the compute_new_size() for MetaspaceGC is called to 1449 // resize the capacity of the metaspaces. The current implementation 1450 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1451 // to resize the Java heap by some GC's. New flags can be implemented 1452 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1453 // free space is desirable in the metaspace capacity to decide how much 1454 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1455 // free space is desirable in the metaspace capacity before decreasing 1456 // the HWM. 1457 1458 // Calculate the amount to increase the high water mark (HWM). 1459 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1460 // another expansion is not requested too soon. If that is not 1461 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1462 // If that is still not enough, expand by the size of the allocation 1463 // plus some. 1464 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1465 size_t min_delta = MinMetaspaceExpansion; 1466 size_t max_delta = MaxMetaspaceExpansion; 1467 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1468 1469 if (delta <= min_delta) { 1470 delta = min_delta; 1471 } else if (delta <= max_delta) { 1472 // Don't want to hit the high water mark on the next 1473 // allocation so make the delta greater than just enough 1474 // for this allocation. 1475 delta = max_delta; 1476 } else { 1477 // This allocation is large but the next ones are probably not 1478 // so increase by the minimum. 1479 delta = delta + min_delta; 1480 } 1481 1482 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1483 1484 return delta; 1485 } 1486 1487 size_t MetaspaceGC::capacity_until_GC() { 1488 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1489 assert(value >= MetaspaceSize, "Not initialized properly?"); 1490 return value; 1491 } 1492 1493 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1494 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1495 1496 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1497 size_t new_value = capacity_until_GC + v; 1498 1499 if (new_value < capacity_until_GC) { 1500 // The addition wrapped around, set new_value to aligned max value. 1501 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1502 } 1503 1504 intptr_t expected = (intptr_t) capacity_until_GC; 1505 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1506 1507 if (expected != actual) { 1508 return false; 1509 } 1510 1511 if (new_cap_until_GC != NULL) { 1512 *new_cap_until_GC = new_value; 1513 } 1514 if (old_cap_until_GC != NULL) { 1515 *old_cap_until_GC = capacity_until_GC; 1516 } 1517 return true; 1518 } 1519 1520 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1521 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1522 1523 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1524 } 1525 1526 void MetaspaceGC::initialize() { 1527 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1528 // we can't do a GC during initialization. 1529 _capacity_until_GC = MaxMetaspaceSize; 1530 } 1531 1532 void MetaspaceGC::post_initialize() { 1533 // Reset the high-water mark once the VM initialization is done. 1534 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1535 } 1536 1537 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1538 // Check if the compressed class space is full. 1539 if (is_class && Metaspace::using_class_space()) { 1540 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1541 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1542 return false; 1543 } 1544 } 1545 1546 // Check if the user has imposed a limit on the metaspace memory. 1547 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1548 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1549 return false; 1550 } 1551 1552 return true; 1553 } 1554 1555 size_t MetaspaceGC::allowed_expansion() { 1556 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1557 size_t capacity_until_gc = capacity_until_GC(); 1558 1559 assert(capacity_until_gc >= committed_bytes, 1560 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1561 capacity_until_gc, committed_bytes); 1562 1563 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1564 size_t left_until_GC = capacity_until_gc - committed_bytes; 1565 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1566 1567 return left_to_commit / BytesPerWord; 1568 } 1569 1570 void MetaspaceGC::compute_new_size() { 1571 assert(_shrink_factor <= 100, "invalid shrink factor"); 1572 uint current_shrink_factor = _shrink_factor; 1573 _shrink_factor = 0; 1574 1575 // Using committed_bytes() for used_after_gc is an overestimation, since the 1576 // chunk free lists are included in committed_bytes() and the memory in an 1577 // un-fragmented chunk free list is available for future allocations. 1578 // However, if the chunk free lists becomes fragmented, then the memory may 1579 // not be available for future allocations and the memory is therefore "in use". 1580 // Including the chunk free lists in the definition of "in use" is therefore 1581 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1582 // shrink below committed_bytes() and this has caused serious bugs in the past. 1583 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1584 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1585 1586 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1587 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1588 1589 const double min_tmp = used_after_gc / maximum_used_percentage; 1590 size_t minimum_desired_capacity = 1591 (size_t)MIN2(min_tmp, double(max_uintx)); 1592 // Don't shrink less than the initial generation size 1593 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1594 MetaspaceSize); 1595 1596 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1597 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1598 minimum_free_percentage, maximum_used_percentage); 1599 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1600 1601 1602 size_t shrink_bytes = 0; 1603 if (capacity_until_GC < minimum_desired_capacity) { 1604 // If we have less capacity below the metaspace HWM, then 1605 // increment the HWM. 1606 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1607 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1608 // Don't expand unless it's significant 1609 if (expand_bytes >= MinMetaspaceExpansion) { 1610 size_t new_capacity_until_GC = 0; 1611 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1612 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1613 1614 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1615 new_capacity_until_GC, 1616 MetaspaceGCThresholdUpdater::ComputeNewSize); 1617 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1618 minimum_desired_capacity / (double) K, 1619 expand_bytes / (double) K, 1620 MinMetaspaceExpansion / (double) K, 1621 new_capacity_until_GC / (double) K); 1622 } 1623 return; 1624 } 1625 1626 // No expansion, now see if we want to shrink 1627 // We would never want to shrink more than this 1628 assert(capacity_until_GC >= minimum_desired_capacity, 1629 SIZE_FORMAT " >= " SIZE_FORMAT, 1630 capacity_until_GC, minimum_desired_capacity); 1631 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1632 1633 // Should shrinking be considered? 1634 if (MaxMetaspaceFreeRatio < 100) { 1635 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1636 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1637 const double max_tmp = used_after_gc / minimum_used_percentage; 1638 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1639 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1640 MetaspaceSize); 1641 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1642 maximum_free_percentage, minimum_used_percentage); 1643 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1644 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1645 1646 assert(minimum_desired_capacity <= maximum_desired_capacity, 1647 "sanity check"); 1648 1649 if (capacity_until_GC > maximum_desired_capacity) { 1650 // Capacity too large, compute shrinking size 1651 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1652 // We don't want shrink all the way back to initSize if people call 1653 // System.gc(), because some programs do that between "phases" and then 1654 // we'd just have to grow the heap up again for the next phase. So we 1655 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1656 // on the third call, and 100% by the fourth call. But if we recompute 1657 // size without shrinking, it goes back to 0%. 1658 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1659 1660 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1661 1662 assert(shrink_bytes <= max_shrink_bytes, 1663 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1664 shrink_bytes, max_shrink_bytes); 1665 if (current_shrink_factor == 0) { 1666 _shrink_factor = 10; 1667 } else { 1668 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1669 } 1670 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1671 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1672 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1673 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1674 } 1675 } 1676 1677 // Don't shrink unless it's significant 1678 if (shrink_bytes >= MinMetaspaceExpansion && 1679 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1680 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1681 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1682 new_capacity_until_GC, 1683 MetaspaceGCThresholdUpdater::ComputeNewSize); 1684 } 1685 } 1686 1687 // Metadebug methods 1688 1689 void Metadebug::init_allocation_fail_alot_count() { 1690 if (MetadataAllocationFailALot) { 1691 _allocation_fail_alot_count = 1692 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1693 } 1694 } 1695 1696 #ifdef ASSERT 1697 bool Metadebug::test_metadata_failure() { 1698 if (MetadataAllocationFailALot && 1699 Threads::is_vm_complete()) { 1700 if (_allocation_fail_alot_count > 0) { 1701 _allocation_fail_alot_count--; 1702 } else { 1703 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1704 init_allocation_fail_alot_count(); 1705 return true; 1706 } 1707 } 1708 return false; 1709 } 1710 #endif 1711 1712 // ChunkManager methods 1713 1714 size_t ChunkManager::free_chunks_total_words() { 1715 return _free_chunks_total; 1716 } 1717 1718 size_t ChunkManager::free_chunks_total_bytes() { 1719 return free_chunks_total_words() * BytesPerWord; 1720 } 1721 1722 size_t ChunkManager::free_chunks_count() { 1723 #ifdef ASSERT 1724 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1725 MutexLockerEx cl(SpaceManager::expand_lock(), 1726 Mutex::_no_safepoint_check_flag); 1727 // This lock is only needed in debug because the verification 1728 // of the _free_chunks_totals walks the list of free chunks 1729 slow_locked_verify_free_chunks_count(); 1730 } 1731 #endif 1732 return _free_chunks_count; 1733 } 1734 1735 void ChunkManager::locked_verify_free_chunks_total() { 1736 assert_lock_strong(SpaceManager::expand_lock()); 1737 assert(sum_free_chunks() == _free_chunks_total, 1738 "_free_chunks_total " SIZE_FORMAT " is not the" 1739 " same as sum " SIZE_FORMAT, _free_chunks_total, 1740 sum_free_chunks()); 1741 } 1742 1743 void ChunkManager::verify_free_chunks_total() { 1744 MutexLockerEx cl(SpaceManager::expand_lock(), 1745 Mutex::_no_safepoint_check_flag); 1746 locked_verify_free_chunks_total(); 1747 } 1748 1749 void ChunkManager::locked_verify_free_chunks_count() { 1750 assert_lock_strong(SpaceManager::expand_lock()); 1751 assert(sum_free_chunks_count() == _free_chunks_count, 1752 "_free_chunks_count " SIZE_FORMAT " is not the" 1753 " same as sum " SIZE_FORMAT, _free_chunks_count, 1754 sum_free_chunks_count()); 1755 } 1756 1757 void ChunkManager::verify_free_chunks_count() { 1758 #ifdef ASSERT 1759 MutexLockerEx cl(SpaceManager::expand_lock(), 1760 Mutex::_no_safepoint_check_flag); 1761 locked_verify_free_chunks_count(); 1762 #endif 1763 } 1764 1765 void ChunkManager::verify() { 1766 MutexLockerEx cl(SpaceManager::expand_lock(), 1767 Mutex::_no_safepoint_check_flag); 1768 locked_verify(); 1769 } 1770 1771 void ChunkManager::locked_verify() { 1772 locked_verify_free_chunks_count(); 1773 locked_verify_free_chunks_total(); 1774 } 1775 1776 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1777 assert_lock_strong(SpaceManager::expand_lock()); 1778 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1779 _free_chunks_total, _free_chunks_count); 1780 } 1781 1782 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1783 assert_lock_strong(SpaceManager::expand_lock()); 1784 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1785 sum_free_chunks(), sum_free_chunks_count()); 1786 } 1787 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1788 return &_free_chunks[index]; 1789 } 1790 1791 // These methods that sum the free chunk lists are used in printing 1792 // methods that are used in product builds. 1793 size_t ChunkManager::sum_free_chunks() { 1794 assert_lock_strong(SpaceManager::expand_lock()); 1795 size_t result = 0; 1796 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1797 ChunkList* list = free_chunks(i); 1798 1799 if (list == NULL) { 1800 continue; 1801 } 1802 1803 result = result + list->count() * list->size(); 1804 } 1805 result = result + humongous_dictionary()->total_size(); 1806 return result; 1807 } 1808 1809 size_t ChunkManager::sum_free_chunks_count() { 1810 assert_lock_strong(SpaceManager::expand_lock()); 1811 size_t count = 0; 1812 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1813 ChunkList* list = free_chunks(i); 1814 if (list == NULL) { 1815 continue; 1816 } 1817 count = count + list->count(); 1818 } 1819 count = count + humongous_dictionary()->total_free_blocks(); 1820 return count; 1821 } 1822 1823 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1824 ChunkIndex index = list_index(word_size); 1825 assert(index < HumongousIndex, "No humongous list"); 1826 return free_chunks(index); 1827 } 1828 1829 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1830 assert_lock_strong(SpaceManager::expand_lock()); 1831 1832 slow_locked_verify(); 1833 1834 Metachunk* chunk = NULL; 1835 if (list_index(word_size) != HumongousIndex) { 1836 ChunkList* free_list = find_free_chunks_list(word_size); 1837 assert(free_list != NULL, "Sanity check"); 1838 1839 chunk = free_list->head(); 1840 1841 if (chunk == NULL) { 1842 return NULL; 1843 } 1844 1845 // Remove the chunk as the head of the list. 1846 free_list->remove_chunk(chunk); 1847 1848 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1849 p2i(free_list), p2i(chunk), chunk->word_size()); 1850 } else { 1851 chunk = humongous_dictionary()->get_chunk( 1852 word_size, 1853 FreeBlockDictionary<Metachunk>::atLeast); 1854 1855 if (chunk == NULL) { 1856 return NULL; 1857 } 1858 1859 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1860 chunk->word_size(), word_size, chunk->word_size() - word_size); 1861 } 1862 1863 // Chunk is being removed from the chunks free list. 1864 dec_free_chunks_total(chunk->word_size()); 1865 1866 // Remove it from the links to this freelist 1867 chunk->set_next(NULL); 1868 chunk->set_prev(NULL); 1869 #ifdef ASSERT 1870 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1871 // work. 1872 chunk->set_is_tagged_free(false); 1873 #endif 1874 chunk->container()->inc_container_count(); 1875 1876 slow_locked_verify(); 1877 return chunk; 1878 } 1879 1880 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1881 assert_lock_strong(SpaceManager::expand_lock()); 1882 slow_locked_verify(); 1883 1884 // Take from the beginning of the list 1885 Metachunk* chunk = free_chunks_get(word_size); 1886 if (chunk == NULL) { 1887 return NULL; 1888 } 1889 1890 assert((word_size <= chunk->word_size()) || 1891 list_index(chunk->word_size() == HumongousIndex), 1892 "Non-humongous variable sized chunk"); 1893 Log(gc, metaspace, freelist) log; 1894 if (log.is_debug()) { 1895 size_t list_count; 1896 if (list_index(word_size) < HumongousIndex) { 1897 ChunkList* list = find_free_chunks_list(word_size); 1898 list_count = list->count(); 1899 } else { 1900 list_count = humongous_dictionary()->total_count(); 1901 } 1902 log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1903 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1904 ResourceMark rm; 1905 locked_print_free_chunks(log.debug_stream()); 1906 } 1907 1908 return chunk; 1909 } 1910 1911 void ChunkManager::print_on(outputStream* out) const { 1912 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 1913 } 1914 1915 // SpaceManager methods 1916 1917 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1918 size_t* chunk_word_size, 1919 size_t* class_chunk_word_size) { 1920 switch (type) { 1921 case Metaspace::BootMetaspaceType: 1922 *chunk_word_size = Metaspace::first_chunk_word_size(); 1923 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1924 break; 1925 case Metaspace::ROMetaspaceType: 1926 *chunk_word_size = SharedReadOnlySize / wordSize; 1927 *class_chunk_word_size = ClassSpecializedChunk; 1928 break; 1929 case Metaspace::ReadWriteMetaspaceType: 1930 *chunk_word_size = SharedReadWriteSize / wordSize; 1931 *class_chunk_word_size = ClassSpecializedChunk; 1932 break; 1933 case Metaspace::AnonymousMetaspaceType: 1934 case Metaspace::ReflectionMetaspaceType: 1935 *chunk_word_size = SpecializedChunk; 1936 *class_chunk_word_size = ClassSpecializedChunk; 1937 break; 1938 default: 1939 *chunk_word_size = SmallChunk; 1940 *class_chunk_word_size = ClassSmallChunk; 1941 break; 1942 } 1943 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1944 "Initial chunks sizes bad: data " SIZE_FORMAT 1945 " class " SIZE_FORMAT, 1946 *chunk_word_size, *class_chunk_word_size); 1947 } 1948 1949 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1950 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1951 size_t free = 0; 1952 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1953 Metachunk* chunk = chunks_in_use(i); 1954 while (chunk != NULL) { 1955 free += chunk->free_word_size(); 1956 chunk = chunk->next(); 1957 } 1958 } 1959 return free; 1960 } 1961 1962 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1963 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1964 size_t result = 0; 1965 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1966 result += sum_waste_in_chunks_in_use(i); 1967 } 1968 1969 return result; 1970 } 1971 1972 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1973 size_t result = 0; 1974 Metachunk* chunk = chunks_in_use(index); 1975 // Count the free space in all the chunk but not the 1976 // current chunk from which allocations are still being done. 1977 while (chunk != NULL) { 1978 if (chunk != current_chunk()) { 1979 result += chunk->free_word_size(); 1980 } 1981 chunk = chunk->next(); 1982 } 1983 return result; 1984 } 1985 1986 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1987 // For CMS use "allocated_chunks_words()" which does not need the 1988 // Metaspace lock. For the other collectors sum over the 1989 // lists. Use both methods as a check that "allocated_chunks_words()" 1990 // is correct. That is, sum_capacity_in_chunks() is too expensive 1991 // to use in the product and allocated_chunks_words() should be used 1992 // but allow for checking that allocated_chunks_words() returns the same 1993 // value as sum_capacity_in_chunks_in_use() which is the definitive 1994 // answer. 1995 if (UseConcMarkSweepGC) { 1996 return allocated_chunks_words(); 1997 } else { 1998 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1999 size_t sum = 0; 2000 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2001 Metachunk* chunk = chunks_in_use(i); 2002 while (chunk != NULL) { 2003 sum += chunk->word_size(); 2004 chunk = chunk->next(); 2005 } 2006 } 2007 return sum; 2008 } 2009 } 2010 2011 size_t SpaceManager::sum_count_in_chunks_in_use() { 2012 size_t count = 0; 2013 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2014 count = count + sum_count_in_chunks_in_use(i); 2015 } 2016 2017 return count; 2018 } 2019 2020 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2021 size_t count = 0; 2022 Metachunk* chunk = chunks_in_use(i); 2023 while (chunk != NULL) { 2024 count++; 2025 chunk = chunk->next(); 2026 } 2027 return count; 2028 } 2029 2030 2031 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2032 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2033 size_t used = 0; 2034 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2035 Metachunk* chunk = chunks_in_use(i); 2036 while (chunk != NULL) { 2037 used += chunk->used_word_size(); 2038 chunk = chunk->next(); 2039 } 2040 } 2041 return used; 2042 } 2043 2044 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2045 2046 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2047 Metachunk* chunk = chunks_in_use(i); 2048 st->print("SpaceManager: %s " PTR_FORMAT, 2049 chunk_size_name(i), p2i(chunk)); 2050 if (chunk != NULL) { 2051 st->print_cr(" free " SIZE_FORMAT, 2052 chunk->free_word_size()); 2053 } else { 2054 st->cr(); 2055 } 2056 } 2057 2058 chunk_manager()->locked_print_free_chunks(st); 2059 chunk_manager()->locked_print_sum_free_chunks(st); 2060 } 2061 2062 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2063 2064 // Decide between a small chunk and a medium chunk. Up to 2065 // _small_chunk_limit small chunks can be allocated. 2066 // After that a medium chunk is preferred. 2067 size_t chunk_word_size; 2068 if (chunks_in_use(MediumIndex) == NULL && 2069 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2070 chunk_word_size = (size_t) small_chunk_size(); 2071 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2072 chunk_word_size = medium_chunk_size(); 2073 } 2074 } else { 2075 chunk_word_size = medium_chunk_size(); 2076 } 2077 2078 // Might still need a humongous chunk. Enforce 2079 // humongous allocations sizes to be aligned up to 2080 // the smallest chunk size. 2081 size_t if_humongous_sized_chunk = 2082 align_size_up(word_size + Metachunk::overhead(), 2083 smallest_chunk_size()); 2084 chunk_word_size = 2085 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2086 2087 assert(!SpaceManager::is_humongous(word_size) || 2088 chunk_word_size == if_humongous_sized_chunk, 2089 "Size calculation is wrong, word_size " SIZE_FORMAT 2090 " chunk_word_size " SIZE_FORMAT, 2091 word_size, chunk_word_size); 2092 Log(gc, metaspace, alloc) log; 2093 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2094 log.debug("Metadata humongous allocation:"); 2095 log.debug(" word_size " PTR_FORMAT, word_size); 2096 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2097 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2098 } 2099 return chunk_word_size; 2100 } 2101 2102 void SpaceManager::track_metaspace_memory_usage() { 2103 if (is_init_completed()) { 2104 if (is_class()) { 2105 MemoryService::track_compressed_class_memory_usage(); 2106 } 2107 MemoryService::track_metaspace_memory_usage(); 2108 } 2109 } 2110 2111 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2112 assert(vs_list()->current_virtual_space() != NULL, 2113 "Should have been set"); 2114 assert(current_chunk() == NULL || 2115 current_chunk()->allocate(word_size) == NULL, 2116 "Don't need to expand"); 2117 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2118 2119 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2120 size_t words_left = 0; 2121 size_t words_used = 0; 2122 if (current_chunk() != NULL) { 2123 words_left = current_chunk()->free_word_size(); 2124 words_used = current_chunk()->used_word_size(); 2125 } 2126 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2127 word_size, words_used, words_left); 2128 } 2129 2130 // Get another chunk 2131 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2132 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2133 2134 MetaWord* mem = NULL; 2135 2136 // If a chunk was available, add it to the in-use chunk list 2137 // and do an allocation from it. 2138 if (next != NULL) { 2139 // Add to this manager's list of chunks in use. 2140 add_chunk(next, false); 2141 mem = next->allocate(word_size); 2142 } 2143 2144 // Track metaspace memory usage statistic. 2145 track_metaspace_memory_usage(); 2146 2147 return mem; 2148 } 2149 2150 void SpaceManager::print_on(outputStream* st) const { 2151 2152 for (ChunkIndex i = ZeroIndex; 2153 i < NumberOfInUseLists ; 2154 i = next_chunk_index(i) ) { 2155 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2156 p2i(chunks_in_use(i)), 2157 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2158 } 2159 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2160 " Humongous " SIZE_FORMAT, 2161 sum_waste_in_chunks_in_use(SmallIndex), 2162 sum_waste_in_chunks_in_use(MediumIndex), 2163 sum_waste_in_chunks_in_use(HumongousIndex)); 2164 // block free lists 2165 if (block_freelists() != NULL) { 2166 st->print_cr("total in block free lists " SIZE_FORMAT, 2167 block_freelists()->total_size()); 2168 } 2169 } 2170 2171 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2172 Mutex* lock) : 2173 _mdtype(mdtype), 2174 _allocated_blocks_words(0), 2175 _allocated_chunks_words(0), 2176 _allocated_chunks_count(0), 2177 _block_freelists(NULL), 2178 _lock(lock) 2179 { 2180 initialize(); 2181 } 2182 2183 void SpaceManager::inc_size_metrics(size_t words) { 2184 assert_lock_strong(SpaceManager::expand_lock()); 2185 // Total of allocated Metachunks and allocated Metachunks count 2186 // for each SpaceManager 2187 _allocated_chunks_words = _allocated_chunks_words + words; 2188 _allocated_chunks_count++; 2189 // Global total of capacity in allocated Metachunks 2190 MetaspaceAux::inc_capacity(mdtype(), words); 2191 // Global total of allocated Metablocks. 2192 // used_words_slow() includes the overhead in each 2193 // Metachunk so include it in the used when the 2194 // Metachunk is first added (so only added once per 2195 // Metachunk). 2196 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2197 } 2198 2199 void SpaceManager::inc_used_metrics(size_t words) { 2200 // Add to the per SpaceManager total 2201 Atomic::add_ptr(words, &_allocated_blocks_words); 2202 // Add to the global total 2203 MetaspaceAux::inc_used(mdtype(), words); 2204 } 2205 2206 void SpaceManager::dec_total_from_size_metrics() { 2207 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2208 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2209 // Also deduct the overhead per Metachunk 2210 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2211 } 2212 2213 void SpaceManager::initialize() { 2214 Metadebug::init_allocation_fail_alot_count(); 2215 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2216 _chunks_in_use[i] = NULL; 2217 } 2218 _current_chunk = NULL; 2219 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2220 } 2221 2222 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2223 if (chunks == NULL) { 2224 return; 2225 } 2226 ChunkList* list = free_chunks(index); 2227 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2228 assert_lock_strong(SpaceManager::expand_lock()); 2229 Metachunk* cur = chunks; 2230 2231 // This returns chunks one at a time. If a new 2232 // class List can be created that is a base class 2233 // of FreeList then something like FreeList::prepend() 2234 // can be used in place of this loop 2235 while (cur != NULL) { 2236 assert(cur->container() != NULL, "Container should have been set"); 2237 cur->container()->dec_container_count(); 2238 // Capture the next link before it is changed 2239 // by the call to return_chunk_at_head(); 2240 Metachunk* next = cur->next(); 2241 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2242 NOT_PRODUCT(cur->mangle(badMetaWordVal);) 2243 list->return_chunk_at_head(cur); 2244 cur = next; 2245 } 2246 } 2247 2248 SpaceManager::~SpaceManager() { 2249 // This call this->_lock which can't be done while holding expand_lock() 2250 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2251 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2252 " allocated_chunks_words() " SIZE_FORMAT, 2253 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2254 2255 MutexLockerEx fcl(SpaceManager::expand_lock(), 2256 Mutex::_no_safepoint_check_flag); 2257 2258 chunk_manager()->slow_locked_verify(); 2259 2260 dec_total_from_size_metrics(); 2261 2262 Log(gc, metaspace, freelist) log; 2263 if (log.is_trace()) { 2264 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2265 ResourceMark rm; 2266 locked_print_chunks_in_use_on(log.trace_stream()); 2267 if (block_freelists() != NULL) { 2268 block_freelists()->print_on(log.trace_stream()); 2269 } 2270 } 2271 2272 // Have to update before the chunks_in_use lists are emptied 2273 // below. 2274 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2275 sum_count_in_chunks_in_use()); 2276 2277 // Add all the chunks in use by this space manager 2278 // to the global list of free chunks. 2279 2280 // Follow each list of chunks-in-use and add them to the 2281 // free lists. Each list is NULL terminated. 2282 2283 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2284 log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i)); 2285 Metachunk* chunks = chunks_in_use(i); 2286 chunk_manager()->return_chunks(i, chunks); 2287 set_chunks_in_use(i, NULL); 2288 log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i)); 2289 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2290 } 2291 2292 // The medium chunk case may be optimized by passing the head and 2293 // tail of the medium chunk list to add_at_head(). The tail is often 2294 // the current chunk but there are probably exceptions. 2295 2296 // Humongous chunks 2297 log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary", 2298 sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex)); 2299 log.trace("Humongous chunk dictionary: "); 2300 // Humongous chunks are never the current chunk. 2301 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2302 2303 while (humongous_chunks != NULL) { 2304 DEBUG_ONLY(humongous_chunks->set_is_tagged_free(true);) 2305 NOT_PRODUCT(humongous_chunks->mangle(badMetaWordVal);) 2306 log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size()); 2307 assert(humongous_chunks->word_size() == (size_t) 2308 align_size_up(humongous_chunks->word_size(), 2309 smallest_chunk_size()), 2310 "Humongous chunk size is wrong: word size " SIZE_FORMAT 2311 " granularity " SIZE_FORMAT, 2312 humongous_chunks->word_size(), smallest_chunk_size()); 2313 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2314 humongous_chunks->container()->dec_container_count(); 2315 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2316 humongous_chunks = next_humongous_chunks; 2317 } 2318 log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); 2319 chunk_manager()->slow_locked_verify(); 2320 2321 if (_block_freelists != NULL) { 2322 delete _block_freelists; 2323 } 2324 } 2325 2326 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2327 switch (index) { 2328 case SpecializedIndex: 2329 return "Specialized"; 2330 case SmallIndex: 2331 return "Small"; 2332 case MediumIndex: 2333 return "Medium"; 2334 case HumongousIndex: 2335 return "Humongous"; 2336 default: 2337 return NULL; 2338 } 2339 } 2340 2341 ChunkIndex ChunkManager::list_index(size_t size) { 2342 switch (size) { 2343 case SpecializedChunk: 2344 assert(SpecializedChunk == ClassSpecializedChunk, 2345 "Need branch for ClassSpecializedChunk"); 2346 return SpecializedIndex; 2347 case SmallChunk: 2348 case ClassSmallChunk: 2349 return SmallIndex; 2350 case MediumChunk: 2351 case ClassMediumChunk: 2352 return MediumIndex; 2353 default: 2354 assert(size > MediumChunk || size > ClassMediumChunk, 2355 "Not a humongous chunk"); 2356 return HumongousIndex; 2357 } 2358 } 2359 2360 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2361 assert_lock_strong(_lock); 2362 // Allocations and deallocations are in raw_word_size 2363 size_t raw_word_size = get_allocation_word_size(word_size); 2364 // Lazily create a block_freelist 2365 if (block_freelists() == NULL) { 2366 _block_freelists = new BlockFreelist(); 2367 } 2368 block_freelists()->return_block(p, raw_word_size); 2369 } 2370 2371 // Adds a chunk to the list of chunks in use. 2372 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2373 2374 assert(new_chunk != NULL, "Should not be NULL"); 2375 assert(new_chunk->next() == NULL, "Should not be on a list"); 2376 2377 new_chunk->reset_empty(); 2378 2379 // Find the correct list and and set the current 2380 // chunk for that list. 2381 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2382 2383 if (index != HumongousIndex) { 2384 retire_current_chunk(); 2385 set_current_chunk(new_chunk); 2386 new_chunk->set_next(chunks_in_use(index)); 2387 set_chunks_in_use(index, new_chunk); 2388 } else { 2389 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2390 // small, so small will be null. Link this first chunk as the current 2391 // chunk. 2392 if (make_current) { 2393 // Set as the current chunk but otherwise treat as a humongous chunk. 2394 set_current_chunk(new_chunk); 2395 } 2396 // Link at head. The _current_chunk only points to a humongous chunk for 2397 // the null class loader metaspace (class and data virtual space managers) 2398 // any humongous chunks so will not point to the tail 2399 // of the humongous chunks list. 2400 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2401 set_chunks_in_use(HumongousIndex, new_chunk); 2402 2403 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2404 } 2405 2406 // Add to the running sum of capacity 2407 inc_size_metrics(new_chunk->word_size()); 2408 2409 assert(new_chunk->is_empty(), "Not ready for reuse"); 2410 Log(gc, metaspace, freelist) log; 2411 if (log.is_trace()) { 2412 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2413 ResourceMark rm; 2414 outputStream* out = log.trace_stream(); 2415 new_chunk->print_on(out); 2416 chunk_manager()->locked_print_free_chunks(out); 2417 } 2418 } 2419 2420 void SpaceManager::retire_current_chunk() { 2421 if (current_chunk() != NULL) { 2422 size_t remaining_words = current_chunk()->free_word_size(); 2423 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2424 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2425 deallocate(ptr, remaining_words); 2426 inc_used_metrics(remaining_words); 2427 } 2428 } 2429 } 2430 2431 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2432 size_t grow_chunks_by_words) { 2433 // Get a chunk from the chunk freelist 2434 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2435 2436 if (next == NULL) { 2437 next = vs_list()->get_new_chunk(word_size, 2438 grow_chunks_by_words, 2439 medium_chunk_bunch()); 2440 } 2441 2442 Log(gc, metaspace, alloc) log; 2443 if (log.is_debug() && next != NULL && 2444 SpaceManager::is_humongous(next->word_size())) { 2445 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2446 } 2447 2448 return next; 2449 } 2450 2451 /* 2452 * The policy is to allocate up to _small_chunk_limit small chunks 2453 * after which only medium chunks are allocated. This is done to 2454 * reduce fragmentation. In some cases, this can result in a lot 2455 * of small chunks being allocated to the point where it's not 2456 * possible to expand. If this happens, there may be no medium chunks 2457 * available and OOME would be thrown. Instead of doing that, 2458 * if the allocation request size fits in a small chunk, an attempt 2459 * will be made to allocate a small chunk. 2460 */ 2461 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2462 size_t raw_word_size = get_allocation_word_size(word_size); 2463 2464 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2465 return NULL; 2466 } 2467 2468 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2469 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2470 2471 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2472 2473 MetaWord* mem = NULL; 2474 2475 if (chunk != NULL) { 2476 // Add chunk to the in-use chunk list and do an allocation from it. 2477 // Add to this manager's list of chunks in use. 2478 add_chunk(chunk, false); 2479 mem = chunk->allocate(raw_word_size); 2480 2481 inc_used_metrics(raw_word_size); 2482 2483 // Track metaspace memory usage statistic. 2484 track_metaspace_memory_usage(); 2485 } 2486 2487 return mem; 2488 } 2489 2490 MetaWord* SpaceManager::allocate(size_t word_size) { 2491 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2492 size_t raw_word_size = get_allocation_word_size(word_size); 2493 BlockFreelist* fl = block_freelists(); 2494 MetaWord* p = NULL; 2495 // Allocation from the dictionary is expensive in the sense that 2496 // the dictionary has to be searched for a size. Don't allocate 2497 // from the dictionary until it starts to get fat. Is this 2498 // a reasonable policy? Maybe an skinny dictionary is fast enough 2499 // for allocations. Do some profiling. JJJ 2500 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2501 p = fl->get_block(raw_word_size); 2502 } 2503 if (p == NULL) { 2504 p = allocate_work(raw_word_size); 2505 } 2506 2507 return p; 2508 } 2509 2510 // Returns the address of spaced allocated for "word_size". 2511 // This methods does not know about blocks (Metablocks) 2512 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2513 assert_lock_strong(_lock); 2514 #ifdef ASSERT 2515 if (Metadebug::test_metadata_failure()) { 2516 return NULL; 2517 } 2518 #endif 2519 // Is there space in the current chunk? 2520 MetaWord* result = NULL; 2521 2522 // For DumpSharedSpaces, only allocate out of the current chunk which is 2523 // never null because we gave it the size we wanted. Caller reports out 2524 // of memory if this returns null. 2525 if (DumpSharedSpaces) { 2526 assert(current_chunk() != NULL, "should never happen"); 2527 inc_used_metrics(word_size); 2528 return current_chunk()->allocate(word_size); // caller handles null result 2529 } 2530 2531 if (current_chunk() != NULL) { 2532 result = current_chunk()->allocate(word_size); 2533 } 2534 2535 if (result == NULL) { 2536 result = grow_and_allocate(word_size); 2537 } 2538 2539 if (result != NULL) { 2540 inc_used_metrics(word_size); 2541 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2542 "Head of the list is being allocated"); 2543 } 2544 2545 return result; 2546 } 2547 2548 void SpaceManager::verify() { 2549 // If there are blocks in the dictionary, then 2550 // verification of chunks does not work since 2551 // being in the dictionary alters a chunk. 2552 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2553 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2554 Metachunk* curr = chunks_in_use(i); 2555 while (curr != NULL) { 2556 curr->verify(); 2557 verify_chunk_size(curr); 2558 curr = curr->next(); 2559 } 2560 } 2561 } 2562 } 2563 2564 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2565 assert(is_humongous(chunk->word_size()) || 2566 chunk->word_size() == medium_chunk_size() || 2567 chunk->word_size() == small_chunk_size() || 2568 chunk->word_size() == specialized_chunk_size(), 2569 "Chunk size is wrong"); 2570 return; 2571 } 2572 2573 #ifdef ASSERT 2574 void SpaceManager::verify_allocated_blocks_words() { 2575 // Verification is only guaranteed at a safepoint. 2576 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2577 "Verification can fail if the applications is running"); 2578 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2579 "allocation total is not consistent " SIZE_FORMAT 2580 " vs " SIZE_FORMAT, 2581 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2582 } 2583 2584 #endif 2585 2586 void SpaceManager::dump(outputStream* const out) const { 2587 size_t curr_total = 0; 2588 size_t waste = 0; 2589 uint i = 0; 2590 size_t used = 0; 2591 size_t capacity = 0; 2592 2593 // Add up statistics for all chunks in this SpaceManager. 2594 for (ChunkIndex index = ZeroIndex; 2595 index < NumberOfInUseLists; 2596 index = next_chunk_index(index)) { 2597 for (Metachunk* curr = chunks_in_use(index); 2598 curr != NULL; 2599 curr = curr->next()) { 2600 out->print("%d) ", i++); 2601 curr->print_on(out); 2602 curr_total += curr->word_size(); 2603 used += curr->used_word_size(); 2604 capacity += curr->word_size(); 2605 waste += curr->free_word_size() + curr->overhead();; 2606 } 2607 } 2608 2609 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2610 if (block_freelists() != NULL) block_freelists()->print_on(out); 2611 } 2612 2613 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2614 // Free space isn't wasted. 2615 waste -= free; 2616 2617 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2618 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2619 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2620 } 2621 2622 // MetaspaceAux 2623 2624 2625 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2626 size_t MetaspaceAux::_used_words[] = {0, 0}; 2627 2628 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2629 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2630 return list == NULL ? 0 : list->free_bytes(); 2631 } 2632 2633 size_t MetaspaceAux::free_bytes() { 2634 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2635 } 2636 2637 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2638 assert_lock_strong(SpaceManager::expand_lock()); 2639 assert(words <= capacity_words(mdtype), 2640 "About to decrement below 0: words " SIZE_FORMAT 2641 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2642 words, mdtype, capacity_words(mdtype)); 2643 _capacity_words[mdtype] -= words; 2644 } 2645 2646 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2647 assert_lock_strong(SpaceManager::expand_lock()); 2648 // Needs to be atomic 2649 _capacity_words[mdtype] += words; 2650 } 2651 2652 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2653 assert(words <= used_words(mdtype), 2654 "About to decrement below 0: words " SIZE_FORMAT 2655 " is greater than _used_words[%u] " SIZE_FORMAT, 2656 words, mdtype, used_words(mdtype)); 2657 // For CMS deallocation of the Metaspaces occurs during the 2658 // sweep which is a concurrent phase. Protection by the expand_lock() 2659 // is not enough since allocation is on a per Metaspace basis 2660 // and protected by the Metaspace lock. 2661 jlong minus_words = (jlong) - (jlong) words; 2662 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2663 } 2664 2665 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2666 // _used_words tracks allocations for 2667 // each piece of metadata. Those allocations are 2668 // generally done concurrently by different application 2669 // threads so must be done atomically. 2670 Atomic::add_ptr(words, &_used_words[mdtype]); 2671 } 2672 2673 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2674 size_t used = 0; 2675 ClassLoaderDataGraphMetaspaceIterator iter; 2676 while (iter.repeat()) { 2677 Metaspace* msp = iter.get_next(); 2678 // Sum allocated_blocks_words for each metaspace 2679 if (msp != NULL) { 2680 used += msp->used_words_slow(mdtype); 2681 } 2682 } 2683 return used * BytesPerWord; 2684 } 2685 2686 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2687 size_t free = 0; 2688 ClassLoaderDataGraphMetaspaceIterator iter; 2689 while (iter.repeat()) { 2690 Metaspace* msp = iter.get_next(); 2691 if (msp != NULL) { 2692 free += msp->free_words_slow(mdtype); 2693 } 2694 } 2695 return free * BytesPerWord; 2696 } 2697 2698 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2699 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2700 return 0; 2701 } 2702 // Don't count the space in the freelists. That space will be 2703 // added to the capacity calculation as needed. 2704 size_t capacity = 0; 2705 ClassLoaderDataGraphMetaspaceIterator iter; 2706 while (iter.repeat()) { 2707 Metaspace* msp = iter.get_next(); 2708 if (msp != NULL) { 2709 capacity += msp->capacity_words_slow(mdtype); 2710 } 2711 } 2712 return capacity * BytesPerWord; 2713 } 2714 2715 size_t MetaspaceAux::capacity_bytes_slow() { 2716 #ifdef PRODUCT 2717 // Use capacity_bytes() in PRODUCT instead of this function. 2718 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2719 #endif 2720 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2721 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2722 assert(capacity_bytes() == class_capacity + non_class_capacity, 2723 "bad accounting: capacity_bytes() " SIZE_FORMAT 2724 " class_capacity + non_class_capacity " SIZE_FORMAT 2725 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2726 capacity_bytes(), class_capacity + non_class_capacity, 2727 class_capacity, non_class_capacity); 2728 2729 return class_capacity + non_class_capacity; 2730 } 2731 2732 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2733 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2734 return list == NULL ? 0 : list->reserved_bytes(); 2735 } 2736 2737 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2738 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2739 return list == NULL ? 0 : list->committed_bytes(); 2740 } 2741 2742 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2743 2744 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2745 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2746 if (chunk_manager == NULL) { 2747 return 0; 2748 } 2749 chunk_manager->slow_verify(); 2750 return chunk_manager->free_chunks_total_words(); 2751 } 2752 2753 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2754 return free_chunks_total_words(mdtype) * BytesPerWord; 2755 } 2756 2757 size_t MetaspaceAux::free_chunks_total_words() { 2758 return free_chunks_total_words(Metaspace::ClassType) + 2759 free_chunks_total_words(Metaspace::NonClassType); 2760 } 2761 2762 size_t MetaspaceAux::free_chunks_total_bytes() { 2763 return free_chunks_total_words() * BytesPerWord; 2764 } 2765 2766 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2767 return Metaspace::get_chunk_manager(mdtype) != NULL; 2768 } 2769 2770 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2771 if (!has_chunk_free_list(mdtype)) { 2772 return MetaspaceChunkFreeListSummary(); 2773 } 2774 2775 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2776 return cm->chunk_free_list_summary(); 2777 } 2778 2779 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2780 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2781 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2782 } 2783 2784 void MetaspaceAux::print_on(outputStream* out) { 2785 Metaspace::MetadataType nct = Metaspace::NonClassType; 2786 2787 out->print_cr(" Metaspace " 2788 "used " SIZE_FORMAT "K, " 2789 "capacity " SIZE_FORMAT "K, " 2790 "committed " SIZE_FORMAT "K, " 2791 "reserved " SIZE_FORMAT "K", 2792 used_bytes()/K, 2793 capacity_bytes()/K, 2794 committed_bytes()/K, 2795 reserved_bytes()/K); 2796 2797 if (Metaspace::using_class_space()) { 2798 Metaspace::MetadataType ct = Metaspace::ClassType; 2799 out->print_cr(" class space " 2800 "used " SIZE_FORMAT "K, " 2801 "capacity " SIZE_FORMAT "K, " 2802 "committed " SIZE_FORMAT "K, " 2803 "reserved " SIZE_FORMAT "K", 2804 used_bytes(ct)/K, 2805 capacity_bytes(ct)/K, 2806 committed_bytes(ct)/K, 2807 reserved_bytes(ct)/K); 2808 } 2809 } 2810 2811 // Print information for class space and data space separately. 2812 // This is almost the same as above. 2813 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2814 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2815 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2816 size_t used_bytes = used_bytes_slow(mdtype); 2817 size_t free_bytes = free_bytes_slow(mdtype); 2818 size_t used_and_free = used_bytes + free_bytes + 2819 free_chunks_capacity_bytes; 2820 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2821 "K + unused in chunks " SIZE_FORMAT "K + " 2822 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2823 "K capacity in allocated chunks " SIZE_FORMAT "K", 2824 used_bytes / K, 2825 free_bytes / K, 2826 free_chunks_capacity_bytes / K, 2827 used_and_free / K, 2828 capacity_bytes / K); 2829 // Accounting can only be correct if we got the values during a safepoint 2830 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2831 } 2832 2833 // Print total fragmentation for class metaspaces 2834 void MetaspaceAux::print_class_waste(outputStream* out) { 2835 assert(Metaspace::using_class_space(), "class metaspace not used"); 2836 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2837 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2838 ClassLoaderDataGraphMetaspaceIterator iter; 2839 while (iter.repeat()) { 2840 Metaspace* msp = iter.get_next(); 2841 if (msp != NULL) { 2842 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2843 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2844 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2845 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2846 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2847 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2848 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2849 } 2850 } 2851 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2852 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2853 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2854 "large count " SIZE_FORMAT, 2855 cls_specialized_count, cls_specialized_waste, 2856 cls_small_count, cls_small_waste, 2857 cls_medium_count, cls_medium_waste, cls_humongous_count); 2858 } 2859 2860 // Print total fragmentation for data and class metaspaces separately 2861 void MetaspaceAux::print_waste(outputStream* out) { 2862 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2863 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2864 2865 ClassLoaderDataGraphMetaspaceIterator iter; 2866 while (iter.repeat()) { 2867 Metaspace* msp = iter.get_next(); 2868 if (msp != NULL) { 2869 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2870 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2871 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2872 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2873 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2874 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2875 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2876 } 2877 } 2878 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2879 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2880 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2881 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2882 "large count " SIZE_FORMAT, 2883 specialized_count, specialized_waste, small_count, 2884 small_waste, medium_count, medium_waste, humongous_count); 2885 if (Metaspace::using_class_space()) { 2886 print_class_waste(out); 2887 } 2888 } 2889 2890 // Dump global metaspace things from the end of ClassLoaderDataGraph 2891 void MetaspaceAux::dump(outputStream* out) { 2892 out->print_cr("All Metaspace:"); 2893 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2894 out->print("class space: "); print_on(out, Metaspace::ClassType); 2895 print_waste(out); 2896 } 2897 2898 void MetaspaceAux::verify_free_chunks() { 2899 Metaspace::chunk_manager_metadata()->verify(); 2900 if (Metaspace::using_class_space()) { 2901 Metaspace::chunk_manager_class()->verify(); 2902 } 2903 } 2904 2905 void MetaspaceAux::verify_capacity() { 2906 #ifdef ASSERT 2907 size_t running_sum_capacity_bytes = capacity_bytes(); 2908 // For purposes of the running sum of capacity, verify against capacity 2909 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2910 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2911 "capacity_words() * BytesPerWord " SIZE_FORMAT 2912 " capacity_bytes_slow()" SIZE_FORMAT, 2913 running_sum_capacity_bytes, capacity_in_use_bytes); 2914 for (Metaspace::MetadataType i = Metaspace::ClassType; 2915 i < Metaspace:: MetadataTypeCount; 2916 i = (Metaspace::MetadataType)(i + 1)) { 2917 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2918 assert(capacity_bytes(i) == capacity_in_use_bytes, 2919 "capacity_bytes(%u) " SIZE_FORMAT 2920 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2921 i, capacity_bytes(i), i, capacity_in_use_bytes); 2922 } 2923 #endif 2924 } 2925 2926 void MetaspaceAux::verify_used() { 2927 #ifdef ASSERT 2928 size_t running_sum_used_bytes = used_bytes(); 2929 // For purposes of the running sum of used, verify against used 2930 size_t used_in_use_bytes = used_bytes_slow(); 2931 assert(used_bytes() == used_in_use_bytes, 2932 "used_bytes() " SIZE_FORMAT 2933 " used_bytes_slow()" SIZE_FORMAT, 2934 used_bytes(), used_in_use_bytes); 2935 for (Metaspace::MetadataType i = Metaspace::ClassType; 2936 i < Metaspace:: MetadataTypeCount; 2937 i = (Metaspace::MetadataType)(i + 1)) { 2938 size_t used_in_use_bytes = used_bytes_slow(i); 2939 assert(used_bytes(i) == used_in_use_bytes, 2940 "used_bytes(%u) " SIZE_FORMAT 2941 " used_bytes_slow(%u)" SIZE_FORMAT, 2942 i, used_bytes(i), i, used_in_use_bytes); 2943 } 2944 #endif 2945 } 2946 2947 void MetaspaceAux::verify_metrics() { 2948 verify_capacity(); 2949 verify_used(); 2950 } 2951 2952 2953 // Metaspace methods 2954 2955 size_t Metaspace::_first_chunk_word_size = 0; 2956 size_t Metaspace::_first_class_chunk_word_size = 0; 2957 2958 size_t Metaspace::_commit_alignment = 0; 2959 size_t Metaspace::_reserve_alignment = 0; 2960 2961 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2962 initialize(lock, type); 2963 } 2964 2965 Metaspace::~Metaspace() { 2966 delete _vsm; 2967 if (using_class_space()) { 2968 delete _class_vsm; 2969 } 2970 } 2971 2972 VirtualSpaceList* Metaspace::_space_list = NULL; 2973 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2974 2975 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2976 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2977 2978 #define VIRTUALSPACEMULTIPLIER 2 2979 2980 #ifdef _LP64 2981 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2982 2983 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2984 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2985 // narrow_klass_base is the lower of the metaspace base and the cds base 2986 // (if cds is enabled). The narrow_klass_shift depends on the distance 2987 // between the lower base and higher address. 2988 address lower_base; 2989 address higher_address; 2990 #if INCLUDE_CDS 2991 if (UseSharedSpaces) { 2992 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2993 (address)(metaspace_base + compressed_class_space_size())); 2994 lower_base = MIN2(metaspace_base, cds_base); 2995 } else 2996 #endif 2997 { 2998 higher_address = metaspace_base + compressed_class_space_size(); 2999 lower_base = metaspace_base; 3000 3001 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3002 // If compressed class space fits in lower 32G, we don't need a base. 3003 if (higher_address <= (address)klass_encoding_max) { 3004 lower_base = 0; // Effectively lower base is zero. 3005 } 3006 } 3007 3008 Universe::set_narrow_klass_base(lower_base); 3009 3010 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3011 Universe::set_narrow_klass_shift(0); 3012 } else { 3013 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3014 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3015 } 3016 AOTLoader::set_narrow_klass_shift(); 3017 } 3018 3019 #if INCLUDE_CDS 3020 // Return TRUE if the specified metaspace_base and cds_base are close enough 3021 // to work with compressed klass pointers. 3022 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3023 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3024 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3025 address lower_base = MIN2((address)metaspace_base, cds_base); 3026 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3027 (address)(metaspace_base + compressed_class_space_size())); 3028 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3029 } 3030 #endif 3031 3032 // Try to allocate the metaspace at the requested addr. 3033 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3034 assert(using_class_space(), "called improperly"); 3035 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3036 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3037 "Metaspace size is too big"); 3038 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3039 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3040 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3041 3042 // Don't use large pages for the class space. 3043 bool large_pages = false; 3044 3045 #if !(defined(AARCH64) || defined(AIX)) 3046 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3047 _reserve_alignment, 3048 large_pages, 3049 requested_addr); 3050 #else // AARCH64 3051 ReservedSpace metaspace_rs; 3052 3053 // Our compressed klass pointers may fit nicely into the lower 32 3054 // bits. 3055 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3056 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3057 _reserve_alignment, 3058 large_pages, 3059 requested_addr); 3060 } 3061 3062 if (! metaspace_rs.is_reserved()) { 3063 // Aarch64: Try to align metaspace so that we can decode a compressed 3064 // klass with a single MOVK instruction. We can do this iff the 3065 // compressed class base is a multiple of 4G. 3066 // Aix: Search for a place where we can find memory. If we need to load 3067 // the base, 4G alignment is helpful, too. 3068 size_t increment = AARCH64_ONLY(4*)G; 3069 for (char *a = (char*)align_ptr_up(requested_addr, increment); 3070 a < (char*)(1024*G); 3071 a += increment) { 3072 if (a == (char *)(32*G)) { 3073 // Go faster from here on. Zero-based is no longer possible. 3074 increment = 4*G; 3075 } 3076 3077 #if INCLUDE_CDS 3078 if (UseSharedSpaces 3079 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3080 // We failed to find an aligned base that will reach. Fall 3081 // back to using our requested addr. 3082 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3083 _reserve_alignment, 3084 large_pages, 3085 requested_addr); 3086 break; 3087 } 3088 #endif 3089 3090 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3091 _reserve_alignment, 3092 large_pages, 3093 a); 3094 if (metaspace_rs.is_reserved()) 3095 break; 3096 } 3097 } 3098 3099 #endif // AARCH64 3100 3101 if (!metaspace_rs.is_reserved()) { 3102 #if INCLUDE_CDS 3103 if (UseSharedSpaces) { 3104 size_t increment = align_size_up(1*G, _reserve_alignment); 3105 3106 // Keep trying to allocate the metaspace, increasing the requested_addr 3107 // by 1GB each time, until we reach an address that will no longer allow 3108 // use of CDS with compressed klass pointers. 3109 char *addr = requested_addr; 3110 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3111 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3112 addr = addr + increment; 3113 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3114 _reserve_alignment, large_pages, addr); 3115 } 3116 } 3117 #endif 3118 // If no successful allocation then try to allocate the space anywhere. If 3119 // that fails then OOM doom. At this point we cannot try allocating the 3120 // metaspace as if UseCompressedClassPointers is off because too much 3121 // initialization has happened that depends on UseCompressedClassPointers. 3122 // So, UseCompressedClassPointers cannot be turned off at this point. 3123 if (!metaspace_rs.is_reserved()) { 3124 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3125 _reserve_alignment, large_pages); 3126 if (!metaspace_rs.is_reserved()) { 3127 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3128 compressed_class_space_size())); 3129 } 3130 } 3131 } 3132 3133 // If we got here then the metaspace got allocated. 3134 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3135 3136 #if INCLUDE_CDS 3137 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3138 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3139 FileMapInfo::stop_sharing_and_unmap( 3140 "Could not allocate metaspace at a compatible address"); 3141 } 3142 #endif 3143 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3144 UseSharedSpaces ? (address)cds_base : 0); 3145 3146 initialize_class_space(metaspace_rs); 3147 3148 if (log_is_enabled(Trace, gc, metaspace)) { 3149 Log(gc, metaspace) log; 3150 ResourceMark rm; 3151 print_compressed_class_space(log.trace_stream(), requested_addr); 3152 } 3153 } 3154 3155 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3156 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3157 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3158 if (_class_space_list != NULL) { 3159 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3160 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3161 compressed_class_space_size(), p2i(base)); 3162 if (requested_addr != 0) { 3163 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3164 } 3165 st->cr(); 3166 } 3167 } 3168 3169 // For UseCompressedClassPointers the class space is reserved above the top of 3170 // the Java heap. The argument passed in is at the base of the compressed space. 3171 void Metaspace::initialize_class_space(ReservedSpace rs) { 3172 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3173 assert(rs.size() >= CompressedClassSpaceSize, 3174 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3175 assert(using_class_space(), "Must be using class space"); 3176 _class_space_list = new VirtualSpaceList(rs); 3177 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3178 3179 if (!_class_space_list->initialization_succeeded()) { 3180 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3181 } 3182 } 3183 3184 #endif 3185 3186 void Metaspace::ergo_initialize() { 3187 if (DumpSharedSpaces) { 3188 // Using large pages when dumping the shared archive is currently not implemented. 3189 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3190 } 3191 3192 size_t page_size = os::vm_page_size(); 3193 if (UseLargePages && UseLargePagesInMetaspace) { 3194 page_size = os::large_page_size(); 3195 } 3196 3197 _commit_alignment = page_size; 3198 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3199 3200 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3201 // override if MaxMetaspaceSize was set on the command line or not. 3202 // This information is needed later to conform to the specification of the 3203 // java.lang.management.MemoryUsage API. 3204 // 3205 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3206 // globals.hpp to the aligned value, but this is not possible, since the 3207 // alignment depends on other flags being parsed. 3208 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3209 3210 if (MetaspaceSize > MaxMetaspaceSize) { 3211 MetaspaceSize = MaxMetaspaceSize; 3212 } 3213 3214 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3215 3216 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3217 3218 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3219 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3220 3221 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3222 set_compressed_class_space_size(CompressedClassSpaceSize); 3223 } 3224 3225 void Metaspace::global_initialize() { 3226 MetaspaceGC::initialize(); 3227 3228 // Initialize the alignment for shared spaces. 3229 int max_alignment = os::vm_allocation_granularity(); 3230 size_t cds_total = 0; 3231 3232 MetaspaceShared::set_max_alignment(max_alignment); 3233 3234 if (DumpSharedSpaces) { 3235 #if INCLUDE_CDS 3236 MetaspaceShared::estimate_regions_size(); 3237 3238 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3239 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3240 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3241 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3242 3243 // Initialize with the sum of the shared space sizes. The read-only 3244 // and read write metaspace chunks will be allocated out of this and the 3245 // remainder is the misc code and data chunks. 3246 cds_total = FileMapInfo::shared_spaces_size(); 3247 cds_total = align_size_up(cds_total, _reserve_alignment); 3248 _space_list = new VirtualSpaceList(cds_total/wordSize); 3249 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3250 3251 if (!_space_list->initialization_succeeded()) { 3252 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3253 } 3254 3255 #ifdef _LP64 3256 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3257 vm_exit_during_initialization("Unable to dump shared archive.", 3258 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3259 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3260 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3261 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3262 } 3263 3264 // Set the compressed klass pointer base so that decoding of these pointers works 3265 // properly when creating the shared archive. 3266 assert(UseCompressedOops && UseCompressedClassPointers, 3267 "UseCompressedOops and UseCompressedClassPointers must be set"); 3268 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3269 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3270 p2i(_space_list->current_virtual_space()->bottom())); 3271 3272 Universe::set_narrow_klass_shift(0); 3273 #endif // _LP64 3274 #endif // INCLUDE_CDS 3275 } else { 3276 #if INCLUDE_CDS 3277 if (UseSharedSpaces) { 3278 // If using shared space, open the file that contains the shared space 3279 // and map in the memory before initializing the rest of metaspace (so 3280 // the addresses don't conflict) 3281 address cds_address = NULL; 3282 FileMapInfo* mapinfo = new FileMapInfo(); 3283 3284 // Open the shared archive file, read and validate the header. If 3285 // initialization fails, shared spaces [UseSharedSpaces] are 3286 // disabled and the file is closed. 3287 // Map in spaces now also 3288 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3289 cds_total = FileMapInfo::shared_spaces_size(); 3290 cds_address = (address)mapinfo->header()->region_addr(0); 3291 #ifdef _LP64 3292 if (using_class_space()) { 3293 char* cds_end = (char*)(cds_address + cds_total); 3294 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3295 // If UseCompressedClassPointers is set then allocate the metaspace area 3296 // above the heap and above the CDS area (if it exists). 3297 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3298 // Map the shared string space after compressed pointers 3299 // because it relies on compressed class pointers setting to work 3300 mapinfo->map_string_regions(); 3301 } 3302 #endif // _LP64 3303 } else { 3304 assert(!mapinfo->is_open() && !UseSharedSpaces, 3305 "archive file not closed or shared spaces not disabled."); 3306 } 3307 } 3308 #endif // INCLUDE_CDS 3309 3310 #ifdef _LP64 3311 if (!UseSharedSpaces && using_class_space()) { 3312 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3313 allocate_metaspace_compressed_klass_ptrs(base, 0); 3314 } 3315 #endif // _LP64 3316 3317 // Initialize these before initializing the VirtualSpaceList 3318 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3319 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3320 // Make the first class chunk bigger than a medium chunk so it's not put 3321 // on the medium chunk list. The next chunk will be small and progress 3322 // from there. This size calculated by -version. 3323 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3324 (CompressedClassSpaceSize/BytesPerWord)*2); 3325 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3326 // Arbitrarily set the initial virtual space to a multiple 3327 // of the boot class loader size. 3328 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3329 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3330 3331 // Initialize the list of virtual spaces. 3332 _space_list = new VirtualSpaceList(word_size); 3333 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3334 3335 if (!_space_list->initialization_succeeded()) { 3336 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3337 } 3338 } 3339 3340 _tracer = new MetaspaceTracer(); 3341 } 3342 3343 void Metaspace::post_initialize() { 3344 MetaspaceGC::post_initialize(); 3345 } 3346 3347 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3348 size_t chunk_word_size, 3349 size_t chunk_bunch) { 3350 // Get a chunk from the chunk freelist 3351 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3352 if (chunk != NULL) { 3353 return chunk; 3354 } 3355 3356 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3357 } 3358 3359 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3360 3361 assert(space_list() != NULL, 3362 "Metadata VirtualSpaceList has not been initialized"); 3363 assert(chunk_manager_metadata() != NULL, 3364 "Metadata ChunkManager has not been initialized"); 3365 3366 _vsm = new SpaceManager(NonClassType, lock); 3367 if (_vsm == NULL) { 3368 return; 3369 } 3370 size_t word_size; 3371 size_t class_word_size; 3372 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3373 3374 if (using_class_space()) { 3375 assert(class_space_list() != NULL, 3376 "Class VirtualSpaceList has not been initialized"); 3377 assert(chunk_manager_class() != NULL, 3378 "Class ChunkManager has not been initialized"); 3379 3380 // Allocate SpaceManager for classes. 3381 _class_vsm = new SpaceManager(ClassType, lock); 3382 if (_class_vsm == NULL) { 3383 return; 3384 } 3385 } 3386 3387 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3388 3389 // Allocate chunk for metadata objects 3390 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3391 word_size, 3392 vsm()->medium_chunk_bunch()); 3393 // For dumping shared archive, report error if allocation has failed. 3394 if (DumpSharedSpaces && new_chunk == NULL) { 3395 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3396 } 3397 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3398 if (new_chunk != NULL) { 3399 // Add to this manager's list of chunks in use and current_chunk(). 3400 vsm()->add_chunk(new_chunk, true); 3401 } 3402 3403 // Allocate chunk for class metadata objects 3404 if (using_class_space()) { 3405 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3406 class_word_size, 3407 class_vsm()->medium_chunk_bunch()); 3408 if (class_chunk != NULL) { 3409 class_vsm()->add_chunk(class_chunk, true); 3410 } else { 3411 // For dumping shared archive, report error if allocation has failed. 3412 if (DumpSharedSpaces) { 3413 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3414 } 3415 } 3416 } 3417 3418 _alloc_record_head = NULL; 3419 _alloc_record_tail = NULL; 3420 } 3421 3422 size_t Metaspace::align_word_size_up(size_t word_size) { 3423 size_t byte_size = word_size * wordSize; 3424 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3425 } 3426 3427 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3428 // DumpSharedSpaces doesn't use class metadata area (yet) 3429 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3430 if (is_class_space_allocation(mdtype)) { 3431 return class_vsm()->allocate(word_size); 3432 } else { 3433 return vsm()->allocate(word_size); 3434 } 3435 } 3436 3437 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3438 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3439 assert(delta_bytes > 0, "Must be"); 3440 3441 size_t before = 0; 3442 size_t after = 0; 3443 MetaWord* res; 3444 bool incremented; 3445 3446 // Each thread increments the HWM at most once. Even if the thread fails to increment 3447 // the HWM, an allocation is still attempted. This is because another thread must then 3448 // have incremented the HWM and therefore the allocation might still succeed. 3449 do { 3450 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3451 res = allocate(word_size, mdtype); 3452 } while (!incremented && res == NULL); 3453 3454 if (incremented) { 3455 tracer()->report_gc_threshold(before, after, 3456 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3457 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3458 } 3459 3460 return res; 3461 } 3462 3463 // Space allocated in the Metaspace. This may 3464 // be across several metadata virtual spaces. 3465 char* Metaspace::bottom() const { 3466 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3467 return (char*)vsm()->current_chunk()->bottom(); 3468 } 3469 3470 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3471 if (mdtype == ClassType) { 3472 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3473 } else { 3474 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3475 } 3476 } 3477 3478 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3479 if (mdtype == ClassType) { 3480 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3481 } else { 3482 return vsm()->sum_free_in_chunks_in_use(); 3483 } 3484 } 3485 3486 // Space capacity in the Metaspace. It includes 3487 // space in the list of chunks from which allocations 3488 // have been made. Don't include space in the global freelist and 3489 // in the space available in the dictionary which 3490 // is already counted in some chunk. 3491 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3492 if (mdtype == ClassType) { 3493 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3494 } else { 3495 return vsm()->sum_capacity_in_chunks_in_use(); 3496 } 3497 } 3498 3499 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3500 return used_words_slow(mdtype) * BytesPerWord; 3501 } 3502 3503 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3504 return capacity_words_slow(mdtype) * BytesPerWord; 3505 } 3506 3507 size_t Metaspace::allocated_blocks_bytes() const { 3508 return vsm()->allocated_blocks_bytes() + 3509 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3510 } 3511 3512 size_t Metaspace::allocated_chunks_bytes() const { 3513 return vsm()->allocated_chunks_bytes() + 3514 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3515 } 3516 3517 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3518 assert(!SafepointSynchronize::is_at_safepoint() 3519 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3520 3521 if (DumpSharedSpaces && PrintSharedSpaces) { 3522 record_deallocation(ptr, vsm()->get_allocation_word_size(word_size)); 3523 } 3524 3525 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3526 3527 if (is_class && using_class_space()) { 3528 class_vsm()->deallocate(ptr, word_size); 3529 } else { 3530 vsm()->deallocate(ptr, word_size); 3531 } 3532 } 3533 3534 3535 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3536 bool read_only, MetaspaceObj::Type type, TRAPS) { 3537 if (HAS_PENDING_EXCEPTION) { 3538 assert(false, "Should not allocate with exception pending"); 3539 return NULL; // caller does a CHECK_NULL too 3540 } 3541 3542 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3543 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3544 3545 // Allocate in metaspaces without taking out a lock, because it deadlocks 3546 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3547 // to revisit this for application class data sharing. 3548 if (DumpSharedSpaces) { 3549 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3550 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3551 MetaWord* result = space->allocate(word_size, NonClassType); 3552 if (result == NULL) { 3553 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3554 } 3555 if (PrintSharedSpaces) { 3556 space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size)); 3557 } 3558 3559 // Zero initialize. 3560 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3561 3562 return result; 3563 } 3564 3565 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3566 3567 // Try to allocate metadata. 3568 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3569 3570 if (result == NULL) { 3571 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3572 3573 // Allocation failed. 3574 if (is_init_completed()) { 3575 // Only start a GC if the bootstrapping has completed. 3576 3577 // Try to clean out some memory and retry. 3578 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3579 loader_data, word_size, mdtype); 3580 } 3581 } 3582 3583 if (result == NULL) { 3584 SpaceManager* sm; 3585 if (is_class_space_allocation(mdtype)) { 3586 sm = loader_data->metaspace_non_null()->class_vsm(); 3587 } else { 3588 sm = loader_data->metaspace_non_null()->vsm(); 3589 } 3590 3591 result = sm->get_small_chunk_and_allocate(word_size); 3592 3593 if (result == NULL) { 3594 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3595 } 3596 } 3597 3598 // Zero initialize. 3599 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3600 3601 return result; 3602 } 3603 3604 size_t Metaspace::class_chunk_size(size_t word_size) { 3605 assert(using_class_space(), "Has to use class space"); 3606 return class_vsm()->calc_chunk_size(word_size); 3607 } 3608 3609 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3610 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3611 3612 // If result is still null, we are out of memory. 3613 Log(gc, metaspace, freelist) log; 3614 if (log.is_info()) { 3615 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3616 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3617 ResourceMark rm; 3618 outputStream* out = log.info_stream(); 3619 if (loader_data->metaspace_or_null() != NULL) { 3620 loader_data->dump(out); 3621 } 3622 MetaspaceAux::dump(out); 3623 } 3624 3625 bool out_of_compressed_class_space = false; 3626 if (is_class_space_allocation(mdtype)) { 3627 Metaspace* metaspace = loader_data->metaspace_non_null(); 3628 out_of_compressed_class_space = 3629 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3630 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3631 CompressedClassSpaceSize; 3632 } 3633 3634 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3635 const char* space_string = out_of_compressed_class_space ? 3636 "Compressed class space" : "Metaspace"; 3637 3638 report_java_out_of_memory(space_string); 3639 3640 if (JvmtiExport::should_post_resource_exhausted()) { 3641 JvmtiExport::post_resource_exhausted( 3642 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3643 space_string); 3644 } 3645 3646 if (!is_init_completed()) { 3647 vm_exit_during_initialization("OutOfMemoryError", space_string); 3648 } 3649 3650 if (out_of_compressed_class_space) { 3651 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3652 } else { 3653 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3654 } 3655 } 3656 3657 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3658 switch (mdtype) { 3659 case Metaspace::ClassType: return "Class"; 3660 case Metaspace::NonClassType: return "Metadata"; 3661 default: 3662 assert(false, "Got bad mdtype: %d", (int) mdtype); 3663 return NULL; 3664 } 3665 } 3666 3667 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3668 assert(DumpSharedSpaces, "sanity"); 3669 3670 int byte_size = (int)word_size * wordSize; 3671 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3672 3673 if (_alloc_record_head == NULL) { 3674 _alloc_record_head = _alloc_record_tail = rec; 3675 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3676 _alloc_record_tail->_next = rec; 3677 _alloc_record_tail = rec; 3678 } else { 3679 // slow linear search, but this doesn't happen that often, and only when dumping 3680 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3681 if (old->_ptr == ptr) { 3682 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3683 int remain_bytes = old->_byte_size - byte_size; 3684 assert(remain_bytes >= 0, "sanity"); 3685 old->_type = type; 3686 3687 if (remain_bytes == 0) { 3688 delete(rec); 3689 } else { 3690 address remain_ptr = address(ptr) + byte_size; 3691 rec->_ptr = remain_ptr; 3692 rec->_byte_size = remain_bytes; 3693 rec->_type = MetaspaceObj::DeallocatedType; 3694 rec->_next = old->_next; 3695 old->_byte_size = byte_size; 3696 old->_next = rec; 3697 } 3698 return; 3699 } 3700 } 3701 assert(0, "reallocating a freed pointer that was not recorded"); 3702 } 3703 } 3704 3705 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3706 assert(DumpSharedSpaces, "sanity"); 3707 3708 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3709 if (rec->_ptr == ptr) { 3710 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3711 rec->_type = MetaspaceObj::DeallocatedType; 3712 return; 3713 } 3714 } 3715 3716 assert(0, "deallocating a pointer that was not recorded"); 3717 } 3718 3719 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3720 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3721 3722 address last_addr = (address)bottom(); 3723 3724 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3725 address ptr = rec->_ptr; 3726 if (last_addr < ptr) { 3727 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3728 } 3729 closure->doit(ptr, rec->_type, rec->_byte_size); 3730 last_addr = ptr + rec->_byte_size; 3731 } 3732 3733 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3734 if (last_addr < top) { 3735 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3736 } 3737 } 3738 3739 void Metaspace::purge(MetadataType mdtype) { 3740 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3741 } 3742 3743 void Metaspace::purge() { 3744 MutexLockerEx cl(SpaceManager::expand_lock(), 3745 Mutex::_no_safepoint_check_flag); 3746 purge(NonClassType); 3747 if (using_class_space()) { 3748 purge(ClassType); 3749 } 3750 } 3751 3752 void Metaspace::print_on(outputStream* out) const { 3753 // Print both class virtual space counts and metaspace. 3754 if (Verbose) { 3755 vsm()->print_on(out); 3756 if (using_class_space()) { 3757 class_vsm()->print_on(out); 3758 } 3759 } 3760 } 3761 3762 bool Metaspace::contains(const void* ptr) { 3763 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3764 return true; 3765 } 3766 3767 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3768 return true; 3769 } 3770 3771 return get_space_list(NonClassType)->contains(ptr); 3772 } 3773 3774 void Metaspace::verify() { 3775 vsm()->verify(); 3776 if (using_class_space()) { 3777 class_vsm()->verify(); 3778 } 3779 } 3780 3781 void Metaspace::dump(outputStream* const out) const { 3782 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3783 vsm()->dump(out); 3784 if (using_class_space()) { 3785 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3786 class_vsm()->dump(out); 3787 } 3788 } 3789 3790 /////////////// Unit tests /////////////// 3791 3792 #ifndef PRODUCT 3793 3794 class TestMetaspaceAuxTest : AllStatic { 3795 public: 3796 static void test_reserved() { 3797 size_t reserved = MetaspaceAux::reserved_bytes(); 3798 3799 assert(reserved > 0, "assert"); 3800 3801 size_t committed = MetaspaceAux::committed_bytes(); 3802 assert(committed <= reserved, "assert"); 3803 3804 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3805 assert(reserved_metadata > 0, "assert"); 3806 assert(reserved_metadata <= reserved, "assert"); 3807 3808 if (UseCompressedClassPointers) { 3809 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3810 assert(reserved_class > 0, "assert"); 3811 assert(reserved_class < reserved, "assert"); 3812 } 3813 } 3814 3815 static void test_committed() { 3816 size_t committed = MetaspaceAux::committed_bytes(); 3817 3818 assert(committed > 0, "assert"); 3819 3820 size_t reserved = MetaspaceAux::reserved_bytes(); 3821 assert(committed <= reserved, "assert"); 3822 3823 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3824 assert(committed_metadata > 0, "assert"); 3825 assert(committed_metadata <= committed, "assert"); 3826 3827 if (UseCompressedClassPointers) { 3828 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3829 assert(committed_class > 0, "assert"); 3830 assert(committed_class < committed, "assert"); 3831 } 3832 } 3833 3834 static void test_virtual_space_list_large_chunk() { 3835 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3836 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3837 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3838 // vm_allocation_granularity aligned on Windows. 3839 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3840 large_size += (os::vm_page_size()/BytesPerWord); 3841 vs_list->get_new_chunk(large_size, large_size, 0); 3842 } 3843 3844 static void test() { 3845 test_reserved(); 3846 test_committed(); 3847 test_virtual_space_list_large_chunk(); 3848 } 3849 }; 3850 3851 void TestMetaspaceAux_test() { 3852 TestMetaspaceAuxTest::test(); 3853 } 3854 3855 class TestVirtualSpaceNodeTest { 3856 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3857 size_t& num_small_chunks, 3858 size_t& num_specialized_chunks) { 3859 num_medium_chunks = words_left / MediumChunk; 3860 words_left = words_left % MediumChunk; 3861 3862 num_small_chunks = words_left / SmallChunk; 3863 words_left = words_left % SmallChunk; 3864 // how many specialized chunks can we get? 3865 num_specialized_chunks = words_left / SpecializedChunk; 3866 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3867 } 3868 3869 public: 3870 static void test() { 3871 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3872 const size_t vsn_test_size_words = MediumChunk * 4; 3873 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3874 3875 // The chunk sizes must be multiples of eachother, or this will fail 3876 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3877 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3878 3879 { // No committed memory in VSN 3880 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3881 VirtualSpaceNode vsn(vsn_test_size_bytes); 3882 vsn.initialize(); 3883 vsn.retire(&cm); 3884 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3885 } 3886 3887 { // All of VSN is committed, half is used by chunks 3888 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3889 VirtualSpaceNode vsn(vsn_test_size_bytes); 3890 vsn.initialize(); 3891 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3892 vsn.get_chunk_vs(MediumChunk); 3893 vsn.get_chunk_vs(MediumChunk); 3894 vsn.retire(&cm); 3895 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3896 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3897 } 3898 3899 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3900 // This doesn't work for systems with vm_page_size >= 16K. 3901 if (page_chunks < MediumChunk) { 3902 // 4 pages of VSN is committed, some is used by chunks 3903 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3904 VirtualSpaceNode vsn(vsn_test_size_bytes); 3905 3906 vsn.initialize(); 3907 vsn.expand_by(page_chunks, page_chunks); 3908 vsn.get_chunk_vs(SmallChunk); 3909 vsn.get_chunk_vs(SpecializedChunk); 3910 vsn.retire(&cm); 3911 3912 // committed - used = words left to retire 3913 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3914 3915 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3916 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3917 3918 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3919 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3920 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3921 } 3922 3923 { // Half of VSN is committed, a humongous chunk is used 3924 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3925 VirtualSpaceNode vsn(vsn_test_size_bytes); 3926 vsn.initialize(); 3927 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3928 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3929 vsn.retire(&cm); 3930 3931 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3932 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3933 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3934 3935 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3936 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3937 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3938 } 3939 3940 } 3941 3942 #define assert_is_available_positive(word_size) \ 3943 assert(vsn.is_available(word_size), \ 3944 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3945 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3946 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3947 3948 #define assert_is_available_negative(word_size) \ 3949 assert(!vsn.is_available(word_size), \ 3950 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 3951 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3952 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3953 3954 static void test_is_available_positive() { 3955 // Reserve some memory. 3956 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3957 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3958 3959 // Commit some memory. 3960 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3961 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3962 assert(expanded, "Failed to commit"); 3963 3964 // Check that is_available accepts the committed size. 3965 assert_is_available_positive(commit_word_size); 3966 3967 // Check that is_available accepts half the committed size. 3968 size_t expand_word_size = commit_word_size / 2; 3969 assert_is_available_positive(expand_word_size); 3970 } 3971 3972 static void test_is_available_negative() { 3973 // Reserve some memory. 3974 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3975 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3976 3977 // Commit some memory. 3978 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3979 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3980 assert(expanded, "Failed to commit"); 3981 3982 // Check that is_available doesn't accept a too large size. 3983 size_t two_times_commit_word_size = commit_word_size * 2; 3984 assert_is_available_negative(two_times_commit_word_size); 3985 } 3986 3987 static void test_is_available_overflow() { 3988 // Reserve some memory. 3989 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3990 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3991 3992 // Commit some memory. 3993 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3994 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3995 assert(expanded, "Failed to commit"); 3996 3997 // Calculate a size that will overflow the virtual space size. 3998 void* virtual_space_max = (void*)(uintptr_t)-1; 3999 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4000 size_t overflow_size = bottom_to_max + BytesPerWord; 4001 size_t overflow_word_size = overflow_size / BytesPerWord; 4002 4003 // Check that is_available can handle the overflow. 4004 assert_is_available_negative(overflow_word_size); 4005 } 4006 4007 static void test_is_available() { 4008 TestVirtualSpaceNodeTest::test_is_available_positive(); 4009 TestVirtualSpaceNodeTest::test_is_available_negative(); 4010 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4011 } 4012 }; 4013 4014 void TestVirtualSpaceNode_test() { 4015 TestVirtualSpaceNodeTest::test(); 4016 TestVirtualSpaceNodeTest::test_is_available(); 4017 } 4018 #endif