1 /* 2 * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "aot/aotLoader.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectorPolicy.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "logging/log.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/binaryTreeDictionary.hpp" 32 #include "memory/filemap.hpp" 33 #include "memory/freeList.hpp" 34 #include "memory/metachunk.hpp" 35 #include "memory/metaspace.hpp" 36 #include "memory/metaspaceGCThresholdUpdater.hpp" 37 #include "memory/metaspaceShared.hpp" 38 #include "memory/metaspaceTracer.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "runtime/atomic.hpp" 42 #include "runtime/globals.hpp" 43 #include "runtime/init.hpp" 44 #include "runtime/java.hpp" 45 #include "runtime/mutex.hpp" 46 #include "runtime/orderAccess.inline.hpp" 47 #include "services/memTracker.hpp" 48 #include "services/memoryService.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/copy.hpp" 51 #include "utilities/debug.hpp" 52 #include "utilities/macros.hpp" 53 54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 56 57 // Set this constant to enable slow integrity checking of the free chunk lists 58 const bool metaspace_slow_verify = false; 59 60 size_t const allocation_from_dictionary_limit = 4 * K; 61 62 MetaWord* last_allocated = 0; 63 64 size_t Metaspace::_compressed_class_space_size; 65 const MetaspaceTracer* Metaspace::_tracer = NULL; 66 67 // Used in declarations in SpaceManager and ChunkManager 68 enum ChunkIndex { 69 ZeroIndex = 0, 70 SpecializedIndex = ZeroIndex, 71 SmallIndex = SpecializedIndex + 1, 72 MediumIndex = SmallIndex + 1, 73 HumongousIndex = MediumIndex + 1, 74 NumberOfFreeLists = 3, 75 NumberOfInUseLists = 4 76 }; 77 78 // Helper, returns a descriptive name for the given index. 79 static const char* chunk_size_name(ChunkIndex index) { 80 switch (index) { 81 case SpecializedIndex: 82 return "specialized"; 83 case SmallIndex: 84 return "small"; 85 case MediumIndex: 86 return "medium"; 87 case HumongousIndex: 88 return "humongous"; 89 default: 90 return "Invalid index"; 91 } 92 } 93 94 enum ChunkSizes { // in words. 95 ClassSpecializedChunk = 128, 96 SpecializedChunk = 128, 97 ClassSmallChunk = 256, 98 SmallChunk = 512, 99 ClassMediumChunk = 4 * K, 100 MediumChunk = 8 * K 101 }; 102 103 static ChunkIndex next_chunk_index(ChunkIndex i) { 104 assert(i < NumberOfInUseLists, "Out of bound"); 105 return (ChunkIndex) (i+1); 106 } 107 108 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 109 uint MetaspaceGC::_shrink_factor = 0; 110 bool MetaspaceGC::_should_concurrent_collect = false; 111 112 typedef class FreeList<Metachunk> ChunkList; 113 114 // Manages the global free lists of chunks. 115 class ChunkManager : public CHeapObj<mtInternal> { 116 friend class TestVirtualSpaceNodeTest; 117 118 // Free list of chunks of different sizes. 119 // SpecializedChunk 120 // SmallChunk 121 // MediumChunk 122 ChunkList _free_chunks[NumberOfFreeLists]; 123 124 // Return non-humongous chunk list by its index. 125 ChunkList* free_chunks(ChunkIndex index); 126 127 // Returns non-humongous chunk list for the given chunk word size. 128 ChunkList* find_free_chunks_list(size_t word_size); 129 130 // HumongousChunk 131 ChunkTreeDictionary _humongous_dictionary; 132 133 // Returns the humongous chunk dictionary. 134 ChunkTreeDictionary* humongous_dictionary() { 135 return &_humongous_dictionary; 136 } 137 138 // Size, in metaspace words, of all chunks managed by this ChunkManager 139 size_t _free_chunks_total; 140 // Number of chunks in this ChunkManager 141 size_t _free_chunks_count; 142 143 // Update counters after a chunk was added or removed removed. 144 void account_for_added_chunk(const Metachunk* c); 145 void account_for_removed_chunk(const Metachunk* c); 146 147 // Debug support 148 149 size_t sum_free_chunks(); 150 size_t sum_free_chunks_count(); 151 152 void locked_verify_free_chunks_total(); 153 void slow_locked_verify_free_chunks_total() { 154 if (metaspace_slow_verify) { 155 locked_verify_free_chunks_total(); 156 } 157 } 158 void locked_verify_free_chunks_count(); 159 void slow_locked_verify_free_chunks_count() { 160 if (metaspace_slow_verify) { 161 locked_verify_free_chunks_count(); 162 } 163 } 164 void verify_free_chunks_count(); 165 166 public: 167 168 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 169 : _free_chunks_total(0), _free_chunks_count(0) { 170 _free_chunks[SpecializedIndex].set_size(specialized_size); 171 _free_chunks[SmallIndex].set_size(small_size); 172 _free_chunks[MediumIndex].set_size(medium_size); 173 } 174 175 // add or delete (return) a chunk to the global freelist. 176 Metachunk* chunk_freelist_allocate(size_t word_size); 177 178 // Map a size to a list index assuming that there are lists 179 // for special, small, medium, and humongous chunks. 180 ChunkIndex list_index(size_t size); 181 182 // Map a given index to the chunk size. 183 size_t size_by_index(ChunkIndex index); 184 185 // Take a chunk from the ChunkManager. The chunk is expected to be in 186 // the chunk manager (the freelist if non-humongous, the dictionary if 187 // humongous). 188 void remove_chunk(Metachunk* chunk); 189 190 // Return a single chunk of type index to the ChunkManager. 191 void return_single_chunk(ChunkIndex index, Metachunk* chunk); 192 193 // Add the simple linked list of chunks to the freelist of chunks 194 // of type index. 195 void return_chunk_list(ChunkIndex index, Metachunk* chunk); 196 197 // Total of the space in the free chunks list 198 size_t free_chunks_total_words(); 199 size_t free_chunks_total_bytes(); 200 201 // Number of chunks in the free chunks list 202 size_t free_chunks_count(); 203 204 // Remove from a list by size. Selects list based on size of chunk. 205 Metachunk* free_chunks_get(size_t chunk_word_size); 206 207 #define index_bounds_check(index) \ 208 assert(index == SpecializedIndex || \ 209 index == SmallIndex || \ 210 index == MediumIndex || \ 211 index == HumongousIndex, "Bad index: %d", (int) index) 212 213 size_t num_free_chunks(ChunkIndex index) const { 214 index_bounds_check(index); 215 216 if (index == HumongousIndex) { 217 return _humongous_dictionary.total_free_blocks(); 218 } 219 220 ssize_t count = _free_chunks[index].count(); 221 return count == -1 ? 0 : (size_t) count; 222 } 223 224 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 225 index_bounds_check(index); 226 227 size_t word_size = 0; 228 if (index == HumongousIndex) { 229 word_size = _humongous_dictionary.total_size(); 230 } else { 231 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 232 word_size = size_per_chunk_in_words * num_free_chunks(index); 233 } 234 235 return word_size * BytesPerWord; 236 } 237 238 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 239 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 240 num_free_chunks(SmallIndex), 241 num_free_chunks(MediumIndex), 242 num_free_chunks(HumongousIndex), 243 size_free_chunks_in_bytes(SpecializedIndex), 244 size_free_chunks_in_bytes(SmallIndex), 245 size_free_chunks_in_bytes(MediumIndex), 246 size_free_chunks_in_bytes(HumongousIndex)); 247 } 248 249 // Debug support 250 void verify(); 251 void slow_verify() { 252 if (metaspace_slow_verify) { 253 verify(); 254 } 255 } 256 void locked_verify(); 257 void slow_locked_verify() { 258 if (metaspace_slow_verify) { 259 locked_verify(); 260 } 261 } 262 void verify_free_chunks_total(); 263 264 void locked_print_free_chunks(outputStream* st); 265 void locked_print_sum_free_chunks(outputStream* st); 266 267 void print_on(outputStream* st) const; 268 }; 269 270 class SmallBlocks : public CHeapObj<mtClass> { 271 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 272 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 273 274 private: 275 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 276 277 FreeList<Metablock>& list_at(size_t word_size) { 278 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 279 return _small_lists[word_size - _small_block_min_size]; 280 } 281 282 public: 283 SmallBlocks() { 284 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 285 uint k = i - _small_block_min_size; 286 _small_lists[k].set_size(i); 287 } 288 } 289 290 size_t total_size() const { 291 size_t result = 0; 292 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 293 uint k = i - _small_block_min_size; 294 result = result + _small_lists[k].count() * _small_lists[k].size(); 295 } 296 return result; 297 } 298 299 static uint small_block_max_size() { return _small_block_max_size; } 300 static uint small_block_min_size() { return _small_block_min_size; } 301 302 MetaWord* get_block(size_t word_size) { 303 if (list_at(word_size).count() > 0) { 304 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 305 return new_block; 306 } else { 307 return NULL; 308 } 309 } 310 void return_block(Metablock* free_chunk, size_t word_size) { 311 list_at(word_size).return_chunk_at_head(free_chunk, false); 312 assert(list_at(word_size).count() > 0, "Should have a chunk"); 313 } 314 315 void print_on(outputStream* st) const { 316 st->print_cr("SmallBlocks:"); 317 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 318 uint k = i - _small_block_min_size; 319 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 320 } 321 } 322 }; 323 324 // Used to manage the free list of Metablocks (a block corresponds 325 // to the allocation of a quantum of metadata). 326 class BlockFreelist : public CHeapObj<mtClass> { 327 BlockTreeDictionary* const _dictionary; 328 SmallBlocks* _small_blocks; 329 330 // Only allocate and split from freelist if the size of the allocation 331 // is at least 1/4th the size of the available block. 332 const static int WasteMultiplier = 4; 333 334 // Accessors 335 BlockTreeDictionary* dictionary() const { return _dictionary; } 336 SmallBlocks* small_blocks() { 337 if (_small_blocks == NULL) { 338 _small_blocks = new SmallBlocks(); 339 } 340 return _small_blocks; 341 } 342 343 public: 344 BlockFreelist(); 345 ~BlockFreelist(); 346 347 // Get and return a block to the free list 348 MetaWord* get_block(size_t word_size); 349 void return_block(MetaWord* p, size_t word_size); 350 351 size_t total_size() const { 352 size_t result = dictionary()->total_size(); 353 if (_small_blocks != NULL) { 354 result = result + _small_blocks->total_size(); 355 } 356 return result; 357 } 358 359 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 360 void print_on(outputStream* st) const; 361 }; 362 363 // A VirtualSpaceList node. 364 class VirtualSpaceNode : public CHeapObj<mtClass> { 365 friend class VirtualSpaceList; 366 367 // Link to next VirtualSpaceNode 368 VirtualSpaceNode* _next; 369 370 // total in the VirtualSpace 371 MemRegion _reserved; 372 ReservedSpace _rs; 373 VirtualSpace _virtual_space; 374 MetaWord* _top; 375 // count of chunks contained in this VirtualSpace 376 uintx _container_count; 377 378 // Convenience functions to access the _virtual_space 379 char* low() const { return virtual_space()->low(); } 380 char* high() const { return virtual_space()->high(); } 381 382 // The first Metachunk will be allocated at the bottom of the 383 // VirtualSpace 384 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 385 386 // Committed but unused space in the virtual space 387 size_t free_words_in_vs() const; 388 public: 389 390 VirtualSpaceNode(size_t byte_size); 391 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 392 ~VirtualSpaceNode(); 393 394 // Convenience functions for logical bottom and end 395 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 396 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 397 398 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 399 400 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 401 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 402 403 bool is_pre_committed() const { return _virtual_space.special(); } 404 405 // address of next available space in _virtual_space; 406 // Accessors 407 VirtualSpaceNode* next() { return _next; } 408 void set_next(VirtualSpaceNode* v) { _next = v; } 409 410 void set_reserved(MemRegion const v) { _reserved = v; } 411 void set_top(MetaWord* v) { _top = v; } 412 413 // Accessors 414 MemRegion* reserved() { return &_reserved; } 415 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 416 417 // Returns true if "word_size" is available in the VirtualSpace 418 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 419 420 MetaWord* top() const { return _top; } 421 void inc_top(size_t word_size) { _top += word_size; } 422 423 uintx container_count() { return _container_count; } 424 void inc_container_count(); 425 void dec_container_count(); 426 #ifdef ASSERT 427 uintx container_count_slow(); 428 void verify_container_count(); 429 #endif 430 431 // used and capacity in this single entry in the list 432 size_t used_words_in_vs() const; 433 size_t capacity_words_in_vs() const; 434 435 bool initialize(); 436 437 // get space from the virtual space 438 Metachunk* take_from_committed(size_t chunk_word_size); 439 440 // Allocate a chunk from the virtual space and return it. 441 Metachunk* get_chunk_vs(size_t chunk_word_size); 442 443 // Expands/shrinks the committed space in a virtual space. Delegates 444 // to Virtualspace 445 bool expand_by(size_t min_words, size_t preferred_words); 446 447 // In preparation for deleting this node, remove all the chunks 448 // in the node from any freelist. 449 void purge(ChunkManager* chunk_manager); 450 451 // If an allocation doesn't fit in the current node a new node is created. 452 // Allocate chunks out of the remaining committed space in this node 453 // to avoid wasting that memory. 454 // This always adds up because all the chunk sizes are multiples of 455 // the smallest chunk size. 456 void retire(ChunkManager* chunk_manager); 457 458 #ifdef ASSERT 459 // Debug support 460 void mangle(); 461 #endif 462 463 void print_on(outputStream* st) const; 464 }; 465 466 #define assert_is_aligned(value, alignment) \ 467 assert(is_aligned((value), (alignment)), \ 468 SIZE_FORMAT_HEX " is not aligned to " \ 469 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment)) 470 471 // Decide if large pages should be committed when the memory is reserved. 472 static bool should_commit_large_pages_when_reserving(size_t bytes) { 473 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 474 size_t words = bytes / BytesPerWord; 475 bool is_class = false; // We never reserve large pages for the class space. 476 if (MetaspaceGC::can_expand(words, is_class) && 477 MetaspaceGC::allowed_expansion() >= words) { 478 return true; 479 } 480 } 481 482 return false; 483 } 484 485 // byte_size is the size of the associated virtualspace. 486 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 487 assert_is_aligned(bytes, Metaspace::reserve_alignment()); 488 489 #if INCLUDE_CDS 490 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 491 // configurable address, generally at the top of the Java heap so other 492 // memory addresses don't conflict. 493 if (DumpSharedSpaces) { 494 bool large_pages = false; // No large pages when dumping the CDS archive. 495 char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 496 497 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 498 if (_rs.is_reserved()) { 499 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 500 } else { 501 // Get a mmap region anywhere if the SharedBaseAddress fails. 502 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 503 } 504 if (!_rs.is_reserved()) { 505 vm_exit_during_initialization("Unable to allocate memory for shared space", 506 err_msg(SIZE_FORMAT " bytes.", bytes)); 507 } 508 MetaspaceShared::initialize_shared_rs(&_rs); 509 } else 510 #endif 511 { 512 bool large_pages = should_commit_large_pages_when_reserving(bytes); 513 514 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 515 } 516 517 if (_rs.is_reserved()) { 518 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 519 assert(_rs.size() != 0, "Catch if we get a 0 size"); 520 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); 521 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); 522 523 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 524 } 525 } 526 527 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 528 Metachunk* chunk = first_chunk(); 529 Metachunk* invalid_chunk = (Metachunk*) top(); 530 while (chunk < invalid_chunk ) { 531 assert(chunk->is_tagged_free(), "Should be tagged free"); 532 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 533 chunk_manager->remove_chunk(chunk); 534 assert(chunk->next() == NULL && 535 chunk->prev() == NULL, 536 "Was not removed from its list"); 537 chunk = (Metachunk*) next; 538 } 539 } 540 541 #ifdef ASSERT 542 uintx VirtualSpaceNode::container_count_slow() { 543 uintx count = 0; 544 Metachunk* chunk = first_chunk(); 545 Metachunk* invalid_chunk = (Metachunk*) top(); 546 while (chunk < invalid_chunk ) { 547 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 548 // Don't count the chunks on the free lists. Those are 549 // still part of the VirtualSpaceNode but not currently 550 // counted. 551 if (!chunk->is_tagged_free()) { 552 count++; 553 } 554 chunk = (Metachunk*) next; 555 } 556 return count; 557 } 558 #endif 559 560 // List of VirtualSpaces for metadata allocation. 561 class VirtualSpaceList : public CHeapObj<mtClass> { 562 friend class VirtualSpaceNode; 563 564 enum VirtualSpaceSizes { 565 VirtualSpaceSize = 256 * K 566 }; 567 568 // Head of the list 569 VirtualSpaceNode* _virtual_space_list; 570 // virtual space currently being used for allocations 571 VirtualSpaceNode* _current_virtual_space; 572 573 // Is this VirtualSpaceList used for the compressed class space 574 bool _is_class; 575 576 // Sum of reserved and committed memory in the virtual spaces 577 size_t _reserved_words; 578 size_t _committed_words; 579 580 // Number of virtual spaces 581 size_t _virtual_space_count; 582 583 ~VirtualSpaceList(); 584 585 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 586 587 void set_virtual_space_list(VirtualSpaceNode* v) { 588 _virtual_space_list = v; 589 } 590 void set_current_virtual_space(VirtualSpaceNode* v) { 591 _current_virtual_space = v; 592 } 593 594 void link_vs(VirtualSpaceNode* new_entry); 595 596 // Get another virtual space and add it to the list. This 597 // is typically prompted by a failed attempt to allocate a chunk 598 // and is typically followed by the allocation of a chunk. 599 bool create_new_virtual_space(size_t vs_word_size); 600 601 // Chunk up the unused committed space in the current 602 // virtual space and add the chunks to the free list. 603 void retire_current_virtual_space(); 604 605 public: 606 VirtualSpaceList(size_t word_size); 607 VirtualSpaceList(ReservedSpace rs); 608 609 size_t free_bytes(); 610 611 Metachunk* get_new_chunk(size_t chunk_word_size, 612 size_t suggested_commit_granularity); 613 614 bool expand_node_by(VirtualSpaceNode* node, 615 size_t min_words, 616 size_t preferred_words); 617 618 bool expand_by(size_t min_words, 619 size_t preferred_words); 620 621 VirtualSpaceNode* current_virtual_space() { 622 return _current_virtual_space; 623 } 624 625 bool is_class() const { return _is_class; } 626 627 bool initialization_succeeded() { return _virtual_space_list != NULL; } 628 629 size_t reserved_words() { return _reserved_words; } 630 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 631 size_t committed_words() { return _committed_words; } 632 size_t committed_bytes() { return committed_words() * BytesPerWord; } 633 634 void inc_reserved_words(size_t v); 635 void dec_reserved_words(size_t v); 636 void inc_committed_words(size_t v); 637 void dec_committed_words(size_t v); 638 void inc_virtual_space_count(); 639 void dec_virtual_space_count(); 640 641 bool contains(const void* ptr); 642 643 // Unlink empty VirtualSpaceNodes and free it. 644 void purge(ChunkManager* chunk_manager); 645 646 void print_on(outputStream* st) const; 647 648 class VirtualSpaceListIterator : public StackObj { 649 VirtualSpaceNode* _virtual_spaces; 650 public: 651 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 652 _virtual_spaces(virtual_spaces) {} 653 654 bool repeat() { 655 return _virtual_spaces != NULL; 656 } 657 658 VirtualSpaceNode* get_next() { 659 VirtualSpaceNode* result = _virtual_spaces; 660 if (_virtual_spaces != NULL) { 661 _virtual_spaces = _virtual_spaces->next(); 662 } 663 return result; 664 } 665 }; 666 }; 667 668 class Metadebug : AllStatic { 669 // Debugging support for Metaspaces 670 static int _allocation_fail_alot_count; 671 672 public: 673 674 static void init_allocation_fail_alot_count(); 675 #ifdef ASSERT 676 static bool test_metadata_failure(); 677 #endif 678 }; 679 680 int Metadebug::_allocation_fail_alot_count = 0; 681 682 // SpaceManager - used by Metaspace to handle allocations 683 class SpaceManager : public CHeapObj<mtClass> { 684 friend class Metaspace; 685 friend class Metadebug; 686 687 private: 688 689 // protects allocations 690 Mutex* const _lock; 691 692 // Type of metadata allocated. 693 Metaspace::MetadataType _mdtype; 694 695 // List of chunks in use by this SpaceManager. Allocations 696 // are done from the current chunk. The list is used for deallocating 697 // chunks when the SpaceManager is freed. 698 Metachunk* _chunks_in_use[NumberOfInUseLists]; 699 Metachunk* _current_chunk; 700 701 // Maximum number of small chunks to allocate to a SpaceManager 702 static uint const _small_chunk_limit; 703 704 // Sum of all space in allocated chunks 705 size_t _allocated_blocks_words; 706 707 // Sum of all allocated chunks 708 size_t _allocated_chunks_words; 709 size_t _allocated_chunks_count; 710 711 // Free lists of blocks are per SpaceManager since they 712 // are assumed to be in chunks in use by the SpaceManager 713 // and all chunks in use by a SpaceManager are freed when 714 // the class loader using the SpaceManager is collected. 715 BlockFreelist* _block_freelists; 716 717 // protects virtualspace and chunk expansions 718 static const char* _expand_lock_name; 719 static const int _expand_lock_rank; 720 static Mutex* const _expand_lock; 721 722 private: 723 // Accessors 724 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 725 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 726 _chunks_in_use[index] = v; 727 } 728 729 BlockFreelist* block_freelists() const { return _block_freelists; } 730 731 Metaspace::MetadataType mdtype() { return _mdtype; } 732 733 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 734 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 735 736 Metachunk* current_chunk() const { return _current_chunk; } 737 void set_current_chunk(Metachunk* v) { 738 _current_chunk = v; 739 } 740 741 Metachunk* find_current_chunk(size_t word_size); 742 743 // Add chunk to the list of chunks in use 744 void add_chunk(Metachunk* v, bool make_current); 745 void retire_current_chunk(); 746 747 Mutex* lock() const { return _lock; } 748 749 protected: 750 void initialize(); 751 752 public: 753 SpaceManager(Metaspace::MetadataType mdtype, 754 Mutex* lock); 755 ~SpaceManager(); 756 757 enum ChunkMultiples { 758 MediumChunkMultiple = 4 759 }; 760 761 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } 762 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } 763 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } 764 765 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } 766 767 // Accessors 768 bool is_class() const { return _mdtype == Metaspace::ClassType; } 769 770 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } 771 size_t small_chunk_size() const { return small_chunk_size(is_class()); } 772 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } 773 774 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } 775 776 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } 777 778 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 779 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 780 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 781 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 782 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 783 784 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 785 786 static Mutex* expand_lock() { return _expand_lock; } 787 788 // Increment the per Metaspace and global running sums for Metachunks 789 // by the given size. This is used when a Metachunk to added to 790 // the in-use list. 791 void inc_size_metrics(size_t words); 792 // Increment the per Metaspace and global running sums Metablocks by the given 793 // size. This is used when a Metablock is allocated. 794 void inc_used_metrics(size_t words); 795 // Delete the portion of the running sums for this SpaceManager. That is, 796 // the globals running sums for the Metachunks and Metablocks are 797 // decremented for all the Metachunks in-use by this SpaceManager. 798 void dec_total_from_size_metrics(); 799 800 // Adjust the initial chunk size to match one of the fixed chunk list sizes, 801 // or return the unadjusted size if the requested size is humongous. 802 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); 803 size_t adjust_initial_chunk_size(size_t requested) const; 804 805 // Get the initial chunks size for this metaspace type. 806 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; 807 808 size_t sum_capacity_in_chunks_in_use() const; 809 size_t sum_used_in_chunks_in_use() const; 810 size_t sum_free_in_chunks_in_use() const; 811 size_t sum_waste_in_chunks_in_use() const; 812 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 813 814 size_t sum_count_in_chunks_in_use(); 815 size_t sum_count_in_chunks_in_use(ChunkIndex i); 816 817 Metachunk* get_new_chunk(size_t chunk_word_size); 818 819 // Block allocation and deallocation. 820 // Allocates a block from the current chunk 821 MetaWord* allocate(size_t word_size); 822 // Allocates a block from a small chunk 823 MetaWord* get_small_chunk_and_allocate(size_t word_size); 824 825 // Helper for allocations 826 MetaWord* allocate_work(size_t word_size); 827 828 // Returns a block to the per manager freelist 829 void deallocate(MetaWord* p, size_t word_size); 830 831 // Based on the allocation size and a minimum chunk size, 832 // returned chunk size (for expanding space for chunk allocation). 833 size_t calc_chunk_size(size_t allocation_word_size); 834 835 // Called when an allocation from the current chunk fails. 836 // Gets a new chunk (may require getting a new virtual space), 837 // and allocates from that chunk. 838 MetaWord* grow_and_allocate(size_t word_size); 839 840 // Notify memory usage to MemoryService. 841 void track_metaspace_memory_usage(); 842 843 // debugging support. 844 845 void dump(outputStream* const out) const; 846 void print_on(outputStream* st) const; 847 void locked_print_chunks_in_use_on(outputStream* st) const; 848 849 void verify(); 850 void verify_chunk_size(Metachunk* chunk); 851 #ifdef ASSERT 852 void verify_allocated_blocks_words(); 853 #endif 854 855 // This adjusts the size given to be greater than the minimum allocation size in 856 // words for data in metaspace. Esentially the minimum size is currently 3 words. 857 size_t get_allocation_word_size(size_t word_size) { 858 size_t byte_size = word_size * BytesPerWord; 859 860 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 861 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); 862 863 size_t raw_word_size = raw_bytes_size / BytesPerWord; 864 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 865 866 return raw_word_size; 867 } 868 }; 869 870 uint const SpaceManager::_small_chunk_limit = 4; 871 872 const char* SpaceManager::_expand_lock_name = 873 "SpaceManager chunk allocation lock"; 874 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 875 Mutex* const SpaceManager::_expand_lock = 876 new Mutex(SpaceManager::_expand_lock_rank, 877 SpaceManager::_expand_lock_name, 878 Mutex::_allow_vm_block_flag, 879 Monitor::_safepoint_check_never); 880 881 void VirtualSpaceNode::inc_container_count() { 882 assert_lock_strong(SpaceManager::expand_lock()); 883 _container_count++; 884 } 885 886 void VirtualSpaceNode::dec_container_count() { 887 assert_lock_strong(SpaceManager::expand_lock()); 888 _container_count--; 889 } 890 891 #ifdef ASSERT 892 void VirtualSpaceNode::verify_container_count() { 893 assert(_container_count == container_count_slow(), 894 "Inconsistency in container_count _container_count " UINTX_FORMAT 895 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 896 } 897 #endif 898 899 // BlockFreelist methods 900 901 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 902 903 BlockFreelist::~BlockFreelist() { 904 delete _dictionary; 905 if (_small_blocks != NULL) { 906 delete _small_blocks; 907 } 908 } 909 910 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 911 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 912 913 Metablock* free_chunk = ::new (p) Metablock(word_size); 914 if (word_size < SmallBlocks::small_block_max_size()) { 915 small_blocks()->return_block(free_chunk, word_size); 916 } else { 917 dictionary()->return_chunk(free_chunk); 918 } 919 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 920 SIZE_FORMAT, p2i(free_chunk), word_size); 921 } 922 923 MetaWord* BlockFreelist::get_block(size_t word_size) { 924 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 925 926 // Try small_blocks first. 927 if (word_size < SmallBlocks::small_block_max_size()) { 928 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 929 // this space manager. 930 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 931 if (new_block != NULL) { 932 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 933 p2i(new_block), word_size); 934 return new_block; 935 } 936 } 937 938 if (word_size < BlockFreelist::min_dictionary_size()) { 939 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 940 return NULL; 941 } 942 943 Metablock* free_block = 944 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 945 if (free_block == NULL) { 946 return NULL; 947 } 948 949 const size_t block_size = free_block->size(); 950 if (block_size > WasteMultiplier * word_size) { 951 return_block((MetaWord*)free_block, block_size); 952 return NULL; 953 } 954 955 MetaWord* new_block = (MetaWord*)free_block; 956 assert(block_size >= word_size, "Incorrect size of block from freelist"); 957 const size_t unused = block_size - word_size; 958 if (unused >= SmallBlocks::small_block_min_size()) { 959 return_block(new_block + word_size, unused); 960 } 961 962 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 963 p2i(new_block), word_size); 964 return new_block; 965 } 966 967 void BlockFreelist::print_on(outputStream* st) const { 968 dictionary()->print_free_lists(st); 969 if (_small_blocks != NULL) { 970 _small_blocks->print_on(st); 971 } 972 } 973 974 // VirtualSpaceNode methods 975 976 VirtualSpaceNode::~VirtualSpaceNode() { 977 _rs.release(); 978 #ifdef ASSERT 979 size_t word_size = sizeof(*this) / BytesPerWord; 980 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 981 #endif 982 } 983 984 size_t VirtualSpaceNode::used_words_in_vs() const { 985 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 986 } 987 988 // Space committed in the VirtualSpace 989 size_t VirtualSpaceNode::capacity_words_in_vs() const { 990 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 991 } 992 993 size_t VirtualSpaceNode::free_words_in_vs() const { 994 return pointer_delta(end(), top(), sizeof(MetaWord)); 995 } 996 997 // Allocates the chunk from the virtual space only. 998 // This interface is also used internally for debugging. Not all 999 // chunks removed here are necessarily used for allocation. 1000 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 1001 // Bottom of the new chunk 1002 MetaWord* chunk_limit = top(); 1003 assert(chunk_limit != NULL, "Not safe to call this method"); 1004 1005 // The virtual spaces are always expanded by the 1006 // commit granularity to enforce the following condition. 1007 // Without this the is_available check will not work correctly. 1008 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 1009 "The committed memory doesn't match the expanded memory."); 1010 1011 if (!is_available(chunk_word_size)) { 1012 Log(gc, metaspace, freelist) log; 1013 log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 1014 // Dump some information about the virtual space that is nearly full 1015 ResourceMark rm; 1016 print_on(log.debug_stream()); 1017 return NULL; 1018 } 1019 1020 // Take the space (bump top on the current virtual space). 1021 inc_top(chunk_word_size); 1022 1023 // Initialize the chunk 1024 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1025 return result; 1026 } 1027 1028 1029 // Expand the virtual space (commit more of the reserved space) 1030 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1031 size_t min_bytes = min_words * BytesPerWord; 1032 size_t preferred_bytes = preferred_words * BytesPerWord; 1033 1034 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1035 1036 if (uncommitted < min_bytes) { 1037 return false; 1038 } 1039 1040 size_t commit = MIN2(preferred_bytes, uncommitted); 1041 bool result = virtual_space()->expand_by(commit, false); 1042 1043 assert(result, "Failed to commit memory"); 1044 1045 return result; 1046 } 1047 1048 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1049 assert_lock_strong(SpaceManager::expand_lock()); 1050 Metachunk* result = take_from_committed(chunk_word_size); 1051 if (result != NULL) { 1052 inc_container_count(); 1053 } 1054 return result; 1055 } 1056 1057 bool VirtualSpaceNode::initialize() { 1058 1059 if (!_rs.is_reserved()) { 1060 return false; 1061 } 1062 1063 // These are necessary restriction to make sure that the virtual space always 1064 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1065 // aligned only the middle alignment of the VirtualSpace is used. 1066 assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); 1067 assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); 1068 1069 // ReservedSpaces marked as special will have the entire memory 1070 // pre-committed. Setting a committed size will make sure that 1071 // committed_size and actual_committed_size agrees. 1072 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1073 1074 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1075 Metaspace::commit_alignment()); 1076 if (result) { 1077 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1078 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1079 1080 set_top((MetaWord*)virtual_space()->low()); 1081 set_reserved(MemRegion((HeapWord*)_rs.base(), 1082 (HeapWord*)(_rs.base() + _rs.size()))); 1083 1084 assert(reserved()->start() == (HeapWord*) _rs.base(), 1085 "Reserved start was not set properly " PTR_FORMAT 1086 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1087 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1088 "Reserved size was not set properly " SIZE_FORMAT 1089 " != " SIZE_FORMAT, reserved()->word_size(), 1090 _rs.size() / BytesPerWord); 1091 } 1092 1093 return result; 1094 } 1095 1096 void VirtualSpaceNode::print_on(outputStream* st) const { 1097 size_t used = used_words_in_vs(); 1098 size_t capacity = capacity_words_in_vs(); 1099 VirtualSpace* vs = virtual_space(); 1100 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1101 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1102 PTR_FORMAT ", " PTR_FORMAT ")", 1103 p2i(vs), capacity / K, 1104 capacity == 0 ? 0 : used * 100 / capacity, 1105 p2i(bottom()), p2i(top()), p2i(end()), 1106 p2i(vs->high_boundary())); 1107 } 1108 1109 #ifdef ASSERT 1110 void VirtualSpaceNode::mangle() { 1111 size_t word_size = capacity_words_in_vs(); 1112 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1113 } 1114 #endif // ASSERT 1115 1116 // VirtualSpaceList methods 1117 // Space allocated from the VirtualSpace 1118 1119 VirtualSpaceList::~VirtualSpaceList() { 1120 VirtualSpaceListIterator iter(virtual_space_list()); 1121 while (iter.repeat()) { 1122 VirtualSpaceNode* vsl = iter.get_next(); 1123 delete vsl; 1124 } 1125 } 1126 1127 void VirtualSpaceList::inc_reserved_words(size_t v) { 1128 assert_lock_strong(SpaceManager::expand_lock()); 1129 _reserved_words = _reserved_words + v; 1130 } 1131 void VirtualSpaceList::dec_reserved_words(size_t v) { 1132 assert_lock_strong(SpaceManager::expand_lock()); 1133 _reserved_words = _reserved_words - v; 1134 } 1135 1136 #define assert_committed_below_limit() \ 1137 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1138 "Too much committed memory. Committed: " SIZE_FORMAT \ 1139 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1140 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1141 1142 void VirtualSpaceList::inc_committed_words(size_t v) { 1143 assert_lock_strong(SpaceManager::expand_lock()); 1144 _committed_words = _committed_words + v; 1145 1146 assert_committed_below_limit(); 1147 } 1148 void VirtualSpaceList::dec_committed_words(size_t v) { 1149 assert_lock_strong(SpaceManager::expand_lock()); 1150 _committed_words = _committed_words - v; 1151 1152 assert_committed_below_limit(); 1153 } 1154 1155 void VirtualSpaceList::inc_virtual_space_count() { 1156 assert_lock_strong(SpaceManager::expand_lock()); 1157 _virtual_space_count++; 1158 } 1159 void VirtualSpaceList::dec_virtual_space_count() { 1160 assert_lock_strong(SpaceManager::expand_lock()); 1161 _virtual_space_count--; 1162 } 1163 1164 void ChunkManager::remove_chunk(Metachunk* chunk) { 1165 size_t word_size = chunk->word_size(); 1166 ChunkIndex index = list_index(word_size); 1167 if (index != HumongousIndex) { 1168 free_chunks(index)->remove_chunk(chunk); 1169 } else { 1170 humongous_dictionary()->remove_chunk(chunk); 1171 } 1172 1173 // Chunk has been removed from the chunks free list, update counters. 1174 account_for_removed_chunk(chunk); 1175 } 1176 1177 // Walk the list of VirtualSpaceNodes and delete 1178 // nodes with a 0 container_count. Remove Metachunks in 1179 // the node from their respective freelists. 1180 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1181 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1182 assert_lock_strong(SpaceManager::expand_lock()); 1183 // Don't use a VirtualSpaceListIterator because this 1184 // list is being changed and a straightforward use of an iterator is not safe. 1185 VirtualSpaceNode* purged_vsl = NULL; 1186 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1187 VirtualSpaceNode* next_vsl = prev_vsl; 1188 while (next_vsl != NULL) { 1189 VirtualSpaceNode* vsl = next_vsl; 1190 DEBUG_ONLY(vsl->verify_container_count();) 1191 next_vsl = vsl->next(); 1192 // Don't free the current virtual space since it will likely 1193 // be needed soon. 1194 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1195 // Unlink it from the list 1196 if (prev_vsl == vsl) { 1197 // This is the case of the current node being the first node. 1198 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1199 set_virtual_space_list(vsl->next()); 1200 } else { 1201 prev_vsl->set_next(vsl->next()); 1202 } 1203 1204 vsl->purge(chunk_manager); 1205 dec_reserved_words(vsl->reserved_words()); 1206 dec_committed_words(vsl->committed_words()); 1207 dec_virtual_space_count(); 1208 purged_vsl = vsl; 1209 delete vsl; 1210 } else { 1211 prev_vsl = vsl; 1212 } 1213 } 1214 #ifdef ASSERT 1215 if (purged_vsl != NULL) { 1216 // List should be stable enough to use an iterator here. 1217 VirtualSpaceListIterator iter(virtual_space_list()); 1218 while (iter.repeat()) { 1219 VirtualSpaceNode* vsl = iter.get_next(); 1220 assert(vsl != purged_vsl, "Purge of vsl failed"); 1221 } 1222 } 1223 #endif 1224 } 1225 1226 1227 // This function looks at the mmap regions in the metaspace without locking. 1228 // The chunks are added with store ordering and not deleted except for at 1229 // unloading time during a safepoint. 1230 bool VirtualSpaceList::contains(const void* ptr) { 1231 // List should be stable enough to use an iterator here because removing virtual 1232 // space nodes is only allowed at a safepoint. 1233 VirtualSpaceListIterator iter(virtual_space_list()); 1234 while (iter.repeat()) { 1235 VirtualSpaceNode* vsn = iter.get_next(); 1236 if (vsn->contains(ptr)) { 1237 return true; 1238 } 1239 } 1240 return false; 1241 } 1242 1243 void VirtualSpaceList::retire_current_virtual_space() { 1244 assert_lock_strong(SpaceManager::expand_lock()); 1245 1246 VirtualSpaceNode* vsn = current_virtual_space(); 1247 1248 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1249 Metaspace::chunk_manager_metadata(); 1250 1251 vsn->retire(cm); 1252 } 1253 1254 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1255 DEBUG_ONLY(verify_container_count();) 1256 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1257 ChunkIndex index = (ChunkIndex)i; 1258 size_t chunk_size = chunk_manager->size_by_index(index); 1259 1260 while (free_words_in_vs() >= chunk_size) { 1261 Metachunk* chunk = get_chunk_vs(chunk_size); 1262 assert(chunk != NULL, "allocation should have been successful"); 1263 1264 chunk_manager->return_single_chunk(index, chunk); 1265 } 1266 DEBUG_ONLY(verify_container_count();) 1267 } 1268 assert(free_words_in_vs() == 0, "should be empty now"); 1269 } 1270 1271 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1272 _is_class(false), 1273 _virtual_space_list(NULL), 1274 _current_virtual_space(NULL), 1275 _reserved_words(0), 1276 _committed_words(0), 1277 _virtual_space_count(0) { 1278 MutexLockerEx cl(SpaceManager::expand_lock(), 1279 Mutex::_no_safepoint_check_flag); 1280 create_new_virtual_space(word_size); 1281 } 1282 1283 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1284 _is_class(true), 1285 _virtual_space_list(NULL), 1286 _current_virtual_space(NULL), 1287 _reserved_words(0), 1288 _committed_words(0), 1289 _virtual_space_count(0) { 1290 MutexLockerEx cl(SpaceManager::expand_lock(), 1291 Mutex::_no_safepoint_check_flag); 1292 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1293 bool succeeded = class_entry->initialize(); 1294 if (succeeded) { 1295 link_vs(class_entry); 1296 } 1297 } 1298 1299 size_t VirtualSpaceList::free_bytes() { 1300 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1301 } 1302 1303 // Allocate another meta virtual space and add it to the list. 1304 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1305 assert_lock_strong(SpaceManager::expand_lock()); 1306 1307 if (is_class()) { 1308 assert(false, "We currently don't support more than one VirtualSpace for" 1309 " the compressed class space. The initialization of the" 1310 " CCS uses another code path and should not hit this path."); 1311 return false; 1312 } 1313 1314 if (vs_word_size == 0) { 1315 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1316 return false; 1317 } 1318 1319 // Reserve the space 1320 size_t vs_byte_size = vs_word_size * BytesPerWord; 1321 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1322 1323 // Allocate the meta virtual space and initialize it. 1324 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1325 if (!new_entry->initialize()) { 1326 delete new_entry; 1327 return false; 1328 } else { 1329 assert(new_entry->reserved_words() == vs_word_size, 1330 "Reserved memory size differs from requested memory size"); 1331 // ensure lock-free iteration sees fully initialized node 1332 OrderAccess::storestore(); 1333 link_vs(new_entry); 1334 return true; 1335 } 1336 } 1337 1338 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1339 if (virtual_space_list() == NULL) { 1340 set_virtual_space_list(new_entry); 1341 } else { 1342 current_virtual_space()->set_next(new_entry); 1343 } 1344 set_current_virtual_space(new_entry); 1345 inc_reserved_words(new_entry->reserved_words()); 1346 inc_committed_words(new_entry->committed_words()); 1347 inc_virtual_space_count(); 1348 #ifdef ASSERT 1349 new_entry->mangle(); 1350 #endif 1351 if (log_is_enabled(Trace, gc, metaspace)) { 1352 Log(gc, metaspace) log; 1353 VirtualSpaceNode* vsl = current_virtual_space(); 1354 ResourceMark rm; 1355 vsl->print_on(log.trace_stream()); 1356 } 1357 } 1358 1359 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1360 size_t min_words, 1361 size_t preferred_words) { 1362 size_t before = node->committed_words(); 1363 1364 bool result = node->expand_by(min_words, preferred_words); 1365 1366 size_t after = node->committed_words(); 1367 1368 // after and before can be the same if the memory was pre-committed. 1369 assert(after >= before, "Inconsistency"); 1370 inc_committed_words(after - before); 1371 1372 return result; 1373 } 1374 1375 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1376 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 1377 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 1378 assert(min_words <= preferred_words, "Invalid arguments"); 1379 1380 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1381 return false; 1382 } 1383 1384 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1385 if (allowed_expansion_words < min_words) { 1386 return false; 1387 } 1388 1389 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1390 1391 // Commit more memory from the the current virtual space. 1392 bool vs_expanded = expand_node_by(current_virtual_space(), 1393 min_words, 1394 max_expansion_words); 1395 if (vs_expanded) { 1396 return true; 1397 } 1398 retire_current_virtual_space(); 1399 1400 // Get another virtual space. 1401 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1402 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1403 1404 if (create_new_virtual_space(grow_vs_words)) { 1405 if (current_virtual_space()->is_pre_committed()) { 1406 // The memory was pre-committed, so we are done here. 1407 assert(min_words <= current_virtual_space()->committed_words(), 1408 "The new VirtualSpace was pre-committed, so it" 1409 "should be large enough to fit the alloc request."); 1410 return true; 1411 } 1412 1413 return expand_node_by(current_virtual_space(), 1414 min_words, 1415 max_expansion_words); 1416 } 1417 1418 return false; 1419 } 1420 1421 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 1422 1423 // Allocate a chunk out of the current virtual space. 1424 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1425 1426 if (next != NULL) { 1427 return next; 1428 } 1429 1430 // The expand amount is currently only determined by the requested sizes 1431 // and not how much committed memory is left in the current virtual space. 1432 1433 size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words()); 1434 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 1435 if (min_word_size >= preferred_word_size) { 1436 // Can happen when humongous chunks are allocated. 1437 preferred_word_size = min_word_size; 1438 } 1439 1440 bool expanded = expand_by(min_word_size, preferred_word_size); 1441 if (expanded) { 1442 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1443 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1444 } 1445 1446 return next; 1447 } 1448 1449 void VirtualSpaceList::print_on(outputStream* st) const { 1450 VirtualSpaceListIterator iter(virtual_space_list()); 1451 while (iter.repeat()) { 1452 VirtualSpaceNode* node = iter.get_next(); 1453 node->print_on(st); 1454 } 1455 } 1456 1457 // MetaspaceGC methods 1458 1459 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1460 // Within the VM operation after the GC the attempt to allocate the metadata 1461 // should succeed. If the GC did not free enough space for the metaspace 1462 // allocation, the HWM is increased so that another virtualspace will be 1463 // allocated for the metadata. With perm gen the increase in the perm 1464 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1465 // metaspace policy uses those as the small and large steps for the HWM. 1466 // 1467 // After the GC the compute_new_size() for MetaspaceGC is called to 1468 // resize the capacity of the metaspaces. The current implementation 1469 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1470 // to resize the Java heap by some GC's. New flags can be implemented 1471 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1472 // free space is desirable in the metaspace capacity to decide how much 1473 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1474 // free space is desirable in the metaspace capacity before decreasing 1475 // the HWM. 1476 1477 // Calculate the amount to increase the high water mark (HWM). 1478 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1479 // another expansion is not requested too soon. If that is not 1480 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1481 // If that is still not enough, expand by the size of the allocation 1482 // plus some. 1483 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1484 size_t min_delta = MinMetaspaceExpansion; 1485 size_t max_delta = MaxMetaspaceExpansion; 1486 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 1487 1488 if (delta <= min_delta) { 1489 delta = min_delta; 1490 } else if (delta <= max_delta) { 1491 // Don't want to hit the high water mark on the next 1492 // allocation so make the delta greater than just enough 1493 // for this allocation. 1494 delta = max_delta; 1495 } else { 1496 // This allocation is large but the next ones are probably not 1497 // so increase by the minimum. 1498 delta = delta + min_delta; 1499 } 1500 1501 assert_is_aligned(delta, Metaspace::commit_alignment()); 1502 1503 return delta; 1504 } 1505 1506 size_t MetaspaceGC::capacity_until_GC() { 1507 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1508 assert(value >= MetaspaceSize, "Not initialized properly?"); 1509 return value; 1510 } 1511 1512 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1513 assert_is_aligned(v, Metaspace::commit_alignment()); 1514 1515 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1516 size_t new_value = capacity_until_GC + v; 1517 1518 if (new_value < capacity_until_GC) { 1519 // The addition wrapped around, set new_value to aligned max value. 1520 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 1521 } 1522 1523 intptr_t expected = (intptr_t) capacity_until_GC; 1524 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1525 1526 if (expected != actual) { 1527 return false; 1528 } 1529 1530 if (new_cap_until_GC != NULL) { 1531 *new_cap_until_GC = new_value; 1532 } 1533 if (old_cap_until_GC != NULL) { 1534 *old_cap_until_GC = capacity_until_GC; 1535 } 1536 return true; 1537 } 1538 1539 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1540 assert_is_aligned(v, Metaspace::commit_alignment()); 1541 1542 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1543 } 1544 1545 void MetaspaceGC::initialize() { 1546 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1547 // we can't do a GC during initialization. 1548 _capacity_until_GC = MaxMetaspaceSize; 1549 } 1550 1551 void MetaspaceGC::post_initialize() { 1552 // Reset the high-water mark once the VM initialization is done. 1553 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1554 } 1555 1556 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1557 // Check if the compressed class space is full. 1558 if (is_class && Metaspace::using_class_space()) { 1559 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1560 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1561 return false; 1562 } 1563 } 1564 1565 // Check if the user has imposed a limit on the metaspace memory. 1566 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1567 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1568 return false; 1569 } 1570 1571 return true; 1572 } 1573 1574 size_t MetaspaceGC::allowed_expansion() { 1575 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1576 size_t capacity_until_gc = capacity_until_GC(); 1577 1578 assert(capacity_until_gc >= committed_bytes, 1579 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1580 capacity_until_gc, committed_bytes); 1581 1582 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1583 size_t left_until_GC = capacity_until_gc - committed_bytes; 1584 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1585 1586 return left_to_commit / BytesPerWord; 1587 } 1588 1589 void MetaspaceGC::compute_new_size() { 1590 assert(_shrink_factor <= 100, "invalid shrink factor"); 1591 uint current_shrink_factor = _shrink_factor; 1592 _shrink_factor = 0; 1593 1594 // Using committed_bytes() for used_after_gc is an overestimation, since the 1595 // chunk free lists are included in committed_bytes() and the memory in an 1596 // un-fragmented chunk free list is available for future allocations. 1597 // However, if the chunk free lists becomes fragmented, then the memory may 1598 // not be available for future allocations and the memory is therefore "in use". 1599 // Including the chunk free lists in the definition of "in use" is therefore 1600 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1601 // shrink below committed_bytes() and this has caused serious bugs in the past. 1602 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1603 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1604 1605 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1606 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1607 1608 const double min_tmp = used_after_gc / maximum_used_percentage; 1609 size_t minimum_desired_capacity = 1610 (size_t)MIN2(min_tmp, double(max_uintx)); 1611 // Don't shrink less than the initial generation size 1612 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1613 MetaspaceSize); 1614 1615 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1616 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1617 minimum_free_percentage, maximum_used_percentage); 1618 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1619 1620 1621 size_t shrink_bytes = 0; 1622 if (capacity_until_GC < minimum_desired_capacity) { 1623 // If we have less capacity below the metaspace HWM, then 1624 // increment the HWM. 1625 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1626 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 1627 // Don't expand unless it's significant 1628 if (expand_bytes >= MinMetaspaceExpansion) { 1629 size_t new_capacity_until_GC = 0; 1630 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1631 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1632 1633 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1634 new_capacity_until_GC, 1635 MetaspaceGCThresholdUpdater::ComputeNewSize); 1636 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1637 minimum_desired_capacity / (double) K, 1638 expand_bytes / (double) K, 1639 MinMetaspaceExpansion / (double) K, 1640 new_capacity_until_GC / (double) K); 1641 } 1642 return; 1643 } 1644 1645 // No expansion, now see if we want to shrink 1646 // We would never want to shrink more than this 1647 assert(capacity_until_GC >= minimum_desired_capacity, 1648 SIZE_FORMAT " >= " SIZE_FORMAT, 1649 capacity_until_GC, minimum_desired_capacity); 1650 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1651 1652 // Should shrinking be considered? 1653 if (MaxMetaspaceFreeRatio < 100) { 1654 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1655 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1656 const double max_tmp = used_after_gc / minimum_used_percentage; 1657 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1658 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1659 MetaspaceSize); 1660 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1661 maximum_free_percentage, minimum_used_percentage); 1662 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1663 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1664 1665 assert(minimum_desired_capacity <= maximum_desired_capacity, 1666 "sanity check"); 1667 1668 if (capacity_until_GC > maximum_desired_capacity) { 1669 // Capacity too large, compute shrinking size 1670 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1671 // We don't want shrink all the way back to initSize if people call 1672 // System.gc(), because some programs do that between "phases" and then 1673 // we'd just have to grow the heap up again for the next phase. So we 1674 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1675 // on the third call, and 100% by the fourth call. But if we recompute 1676 // size without shrinking, it goes back to 0%. 1677 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1678 1679 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 1680 1681 assert(shrink_bytes <= max_shrink_bytes, 1682 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1683 shrink_bytes, max_shrink_bytes); 1684 if (current_shrink_factor == 0) { 1685 _shrink_factor = 10; 1686 } else { 1687 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1688 } 1689 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1690 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1691 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1692 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1693 } 1694 } 1695 1696 // Don't shrink unless it's significant 1697 if (shrink_bytes >= MinMetaspaceExpansion && 1698 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1699 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1700 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1701 new_capacity_until_GC, 1702 MetaspaceGCThresholdUpdater::ComputeNewSize); 1703 } 1704 } 1705 1706 // Metadebug methods 1707 1708 void Metadebug::init_allocation_fail_alot_count() { 1709 if (MetadataAllocationFailALot) { 1710 _allocation_fail_alot_count = 1711 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1712 } 1713 } 1714 1715 #ifdef ASSERT 1716 bool Metadebug::test_metadata_failure() { 1717 if (MetadataAllocationFailALot && 1718 Threads::is_vm_complete()) { 1719 if (_allocation_fail_alot_count > 0) { 1720 _allocation_fail_alot_count--; 1721 } else { 1722 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1723 init_allocation_fail_alot_count(); 1724 return true; 1725 } 1726 } 1727 return false; 1728 } 1729 #endif 1730 1731 // ChunkManager methods 1732 1733 size_t ChunkManager::free_chunks_total_words() { 1734 return _free_chunks_total; 1735 } 1736 1737 size_t ChunkManager::free_chunks_total_bytes() { 1738 return free_chunks_total_words() * BytesPerWord; 1739 } 1740 1741 // Update internal accounting after a chunk was added 1742 void ChunkManager::account_for_added_chunk(const Metachunk* c) { 1743 assert_lock_strong(SpaceManager::expand_lock()); 1744 _free_chunks_count ++; 1745 _free_chunks_total += c->word_size(); 1746 } 1747 1748 // Update internal accounting after a chunk was removed 1749 void ChunkManager::account_for_removed_chunk(const Metachunk* c) { 1750 assert_lock_strong(SpaceManager::expand_lock()); 1751 assert(_free_chunks_count >= 1, 1752 "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); 1753 assert(_free_chunks_total >= c->word_size(), 1754 "ChunkManager::_free_chunks_total: about to go negative" 1755 "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size()); 1756 _free_chunks_count --; 1757 _free_chunks_total -= c->word_size(); 1758 } 1759 1760 size_t ChunkManager::free_chunks_count() { 1761 #ifdef ASSERT 1762 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1763 MutexLockerEx cl(SpaceManager::expand_lock(), 1764 Mutex::_no_safepoint_check_flag); 1765 // This lock is only needed in debug because the verification 1766 // of the _free_chunks_totals walks the list of free chunks 1767 slow_locked_verify_free_chunks_count(); 1768 } 1769 #endif 1770 return _free_chunks_count; 1771 } 1772 1773 ChunkIndex ChunkManager::list_index(size_t size) { 1774 if (size_by_index(SpecializedIndex) == size) { 1775 return SpecializedIndex; 1776 } 1777 if (size_by_index(SmallIndex) == size) { 1778 return SmallIndex; 1779 } 1780 const size_t med_size = size_by_index(MediumIndex); 1781 if (med_size == size) { 1782 return MediumIndex; 1783 } 1784 1785 assert(size > med_size, "Not a humongous chunk"); 1786 return HumongousIndex; 1787 } 1788 1789 size_t ChunkManager::size_by_index(ChunkIndex index) { 1790 index_bounds_check(index); 1791 assert(index != HumongousIndex, "Do not call for humongous chunks."); 1792 return free_chunks(index)->size(); 1793 } 1794 1795 void ChunkManager::locked_verify_free_chunks_total() { 1796 assert_lock_strong(SpaceManager::expand_lock()); 1797 assert(sum_free_chunks() == _free_chunks_total, 1798 "_free_chunks_total " SIZE_FORMAT " is not the" 1799 " same as sum " SIZE_FORMAT, _free_chunks_total, 1800 sum_free_chunks()); 1801 } 1802 1803 void ChunkManager::verify_free_chunks_total() { 1804 MutexLockerEx cl(SpaceManager::expand_lock(), 1805 Mutex::_no_safepoint_check_flag); 1806 locked_verify_free_chunks_total(); 1807 } 1808 1809 void ChunkManager::locked_verify_free_chunks_count() { 1810 assert_lock_strong(SpaceManager::expand_lock()); 1811 assert(sum_free_chunks_count() == _free_chunks_count, 1812 "_free_chunks_count " SIZE_FORMAT " is not the" 1813 " same as sum " SIZE_FORMAT, _free_chunks_count, 1814 sum_free_chunks_count()); 1815 } 1816 1817 void ChunkManager::verify_free_chunks_count() { 1818 #ifdef ASSERT 1819 MutexLockerEx cl(SpaceManager::expand_lock(), 1820 Mutex::_no_safepoint_check_flag); 1821 locked_verify_free_chunks_count(); 1822 #endif 1823 } 1824 1825 void ChunkManager::verify() { 1826 MutexLockerEx cl(SpaceManager::expand_lock(), 1827 Mutex::_no_safepoint_check_flag); 1828 locked_verify(); 1829 } 1830 1831 void ChunkManager::locked_verify() { 1832 locked_verify_free_chunks_count(); 1833 locked_verify_free_chunks_total(); 1834 } 1835 1836 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1837 assert_lock_strong(SpaceManager::expand_lock()); 1838 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1839 _free_chunks_total, _free_chunks_count); 1840 } 1841 1842 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1843 assert_lock_strong(SpaceManager::expand_lock()); 1844 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1845 sum_free_chunks(), sum_free_chunks_count()); 1846 } 1847 1848 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1849 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 1850 "Bad index: %d", (int)index); 1851 1852 return &_free_chunks[index]; 1853 } 1854 1855 // These methods that sum the free chunk lists are used in printing 1856 // methods that are used in product builds. 1857 size_t ChunkManager::sum_free_chunks() { 1858 assert_lock_strong(SpaceManager::expand_lock()); 1859 size_t result = 0; 1860 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1861 ChunkList* list = free_chunks(i); 1862 1863 if (list == NULL) { 1864 continue; 1865 } 1866 1867 result = result + list->count() * list->size(); 1868 } 1869 result = result + humongous_dictionary()->total_size(); 1870 return result; 1871 } 1872 1873 size_t ChunkManager::sum_free_chunks_count() { 1874 assert_lock_strong(SpaceManager::expand_lock()); 1875 size_t count = 0; 1876 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1877 ChunkList* list = free_chunks(i); 1878 if (list == NULL) { 1879 continue; 1880 } 1881 count = count + list->count(); 1882 } 1883 count = count + humongous_dictionary()->total_free_blocks(); 1884 return count; 1885 } 1886 1887 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1888 ChunkIndex index = list_index(word_size); 1889 assert(index < HumongousIndex, "No humongous list"); 1890 return free_chunks(index); 1891 } 1892 1893 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1894 assert_lock_strong(SpaceManager::expand_lock()); 1895 1896 slow_locked_verify(); 1897 1898 Metachunk* chunk = NULL; 1899 if (list_index(word_size) != HumongousIndex) { 1900 ChunkList* free_list = find_free_chunks_list(word_size); 1901 assert(free_list != NULL, "Sanity check"); 1902 1903 chunk = free_list->head(); 1904 1905 if (chunk == NULL) { 1906 return NULL; 1907 } 1908 1909 // Remove the chunk as the head of the list. 1910 free_list->remove_chunk(chunk); 1911 1912 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1913 p2i(free_list), p2i(chunk), chunk->word_size()); 1914 } else { 1915 chunk = humongous_dictionary()->get_chunk( 1916 word_size, 1917 FreeBlockDictionary<Metachunk>::atLeast); 1918 1919 if (chunk == NULL) { 1920 return NULL; 1921 } 1922 1923 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1924 chunk->word_size(), word_size, chunk->word_size() - word_size); 1925 } 1926 1927 // Chunk has been removed from the chunk manager; update counters. 1928 account_for_removed_chunk(chunk); 1929 1930 // Remove it from the links to this freelist 1931 chunk->set_next(NULL); 1932 chunk->set_prev(NULL); 1933 #ifdef ASSERT 1934 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1935 // work. 1936 chunk->set_is_tagged_free(false); 1937 #endif 1938 chunk->container()->inc_container_count(); 1939 1940 slow_locked_verify(); 1941 return chunk; 1942 } 1943 1944 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1945 assert_lock_strong(SpaceManager::expand_lock()); 1946 slow_locked_verify(); 1947 1948 // Take from the beginning of the list 1949 Metachunk* chunk = free_chunks_get(word_size); 1950 if (chunk == NULL) { 1951 return NULL; 1952 } 1953 1954 assert((word_size <= chunk->word_size()) || 1955 (list_index(chunk->word_size()) == HumongousIndex), 1956 "Non-humongous variable sized chunk"); 1957 Log(gc, metaspace, freelist) log; 1958 if (log.is_debug()) { 1959 size_t list_count; 1960 if (list_index(word_size) < HumongousIndex) { 1961 ChunkList* list = find_free_chunks_list(word_size); 1962 list_count = list->count(); 1963 } else { 1964 list_count = humongous_dictionary()->total_count(); 1965 } 1966 log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1967 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1968 ResourceMark rm; 1969 locked_print_free_chunks(log.debug_stream()); 1970 } 1971 1972 return chunk; 1973 } 1974 1975 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { 1976 assert_lock_strong(SpaceManager::expand_lock()); 1977 assert(chunk != NULL, "Expected chunk."); 1978 assert(chunk->container() != NULL, "Container should have been set."); 1979 assert(chunk->is_tagged_free() == false, "Chunk should be in use."); 1980 index_bounds_check(index); 1981 1982 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not 1983 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary 1984 // keeps tree node pointers in the chunk payload area which mangle will overwrite. 1985 NOT_PRODUCT(chunk->mangle(badMetaWordVal);) 1986 1987 if (index != HumongousIndex) { 1988 // Return non-humongous chunk to freelist. 1989 ChunkList* list = free_chunks(index); 1990 assert(list->size() == chunk->word_size(), "Wrong chunk type."); 1991 list->return_chunk_at_head(chunk); 1992 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", 1993 chunk_size_name(index), p2i(chunk)); 1994 } else { 1995 // Return humongous chunk to dictionary. 1996 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); 1997 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, 1998 "Humongous chunk has wrong alignment."); 1999 _humongous_dictionary.return_chunk(chunk); 2000 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", 2001 chunk_size_name(index), p2i(chunk), chunk->word_size()); 2002 } 2003 chunk->container()->dec_container_count(); 2004 DEBUG_ONLY(chunk->set_is_tagged_free(true);) 2005 2006 // Chunk has been added; update counters. 2007 account_for_added_chunk(chunk); 2008 2009 } 2010 2011 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { 2012 index_bounds_check(index); 2013 if (chunks == NULL) { 2014 return; 2015 } 2016 LogTarget(Trace, gc, metaspace, freelist) log; 2017 if (log.is_enabled()) { // tracing 2018 log.print("returning list of %s chunks...", chunk_size_name(index)); 2019 } 2020 unsigned num_chunks_returned = 0; 2021 size_t size_chunks_returned = 0; 2022 Metachunk* cur = chunks; 2023 while (cur != NULL) { 2024 // Capture the next link before it is changed 2025 // by the call to return_chunk_at_head(); 2026 Metachunk* next = cur->next(); 2027 if (log.is_enabled()) { // tracing 2028 num_chunks_returned ++; 2029 size_chunks_returned += cur->word_size(); 2030 } 2031 return_single_chunk(index, cur); 2032 cur = next; 2033 } 2034 if (log.is_enabled()) { // tracing 2035 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".", 2036 num_chunks_returned, chunk_size_name(index), size_chunks_returned); 2037 if (index != HumongousIndex) { 2038 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size()); 2039 } else { 2040 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count()); 2041 } 2042 } 2043 } 2044 2045 void ChunkManager::print_on(outputStream* out) const { 2046 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 2047 } 2048 2049 // SpaceManager methods 2050 2051 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { 2052 size_t chunk_sizes[] = { 2053 specialized_chunk_size(is_class_space), 2054 small_chunk_size(is_class_space), 2055 medium_chunk_size(is_class_space) 2056 }; 2057 2058 // Adjust up to one of the fixed chunk sizes ... 2059 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { 2060 if (requested <= chunk_sizes[i]) { 2061 return chunk_sizes[i]; 2062 } 2063 } 2064 2065 // ... or return the size as a humongous chunk. 2066 return requested; 2067 } 2068 2069 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { 2070 return adjust_initial_chunk_size(requested, is_class()); 2071 } 2072 2073 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { 2074 size_t requested; 2075 2076 if (is_class()) { 2077 switch (type) { 2078 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; 2079 case Metaspace::ROMetaspaceType: requested = ClassSpecializedChunk; break; 2080 case Metaspace::ReadWriteMetaspaceType: requested = ClassSpecializedChunk; break; 2081 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break; 2082 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; 2083 default: requested = ClassSmallChunk; break; 2084 } 2085 } else { 2086 switch (type) { 2087 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; 2088 case Metaspace::ROMetaspaceType: requested = SharedReadOnlySize / wordSize; break; 2089 case Metaspace::ReadWriteMetaspaceType: requested = SharedReadWriteSize / wordSize; break; 2090 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break; 2091 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; 2092 default: requested = SmallChunk; break; 2093 } 2094 } 2095 2096 // Adjust to one of the fixed chunk sizes (unless humongous) 2097 const size_t adjusted = adjust_initial_chunk_size(requested); 2098 2099 assert(adjusted != 0, "Incorrect initial chunk size. Requested: " 2100 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted); 2101 2102 return adjusted; 2103 } 2104 2105 size_t SpaceManager::sum_free_in_chunks_in_use() const { 2106 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2107 size_t free = 0; 2108 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2109 Metachunk* chunk = chunks_in_use(i); 2110 while (chunk != NULL) { 2111 free += chunk->free_word_size(); 2112 chunk = chunk->next(); 2113 } 2114 } 2115 return free; 2116 } 2117 2118 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 2119 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2120 size_t result = 0; 2121 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2122 result += sum_waste_in_chunks_in_use(i); 2123 } 2124 2125 return result; 2126 } 2127 2128 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 2129 size_t result = 0; 2130 Metachunk* chunk = chunks_in_use(index); 2131 // Count the free space in all the chunk but not the 2132 // current chunk from which allocations are still being done. 2133 while (chunk != NULL) { 2134 if (chunk != current_chunk()) { 2135 result += chunk->free_word_size(); 2136 } 2137 chunk = chunk->next(); 2138 } 2139 return result; 2140 } 2141 2142 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 2143 // For CMS use "allocated_chunks_words()" which does not need the 2144 // Metaspace lock. For the other collectors sum over the 2145 // lists. Use both methods as a check that "allocated_chunks_words()" 2146 // is correct. That is, sum_capacity_in_chunks() is too expensive 2147 // to use in the product and allocated_chunks_words() should be used 2148 // but allow for checking that allocated_chunks_words() returns the same 2149 // value as sum_capacity_in_chunks_in_use() which is the definitive 2150 // answer. 2151 if (UseConcMarkSweepGC) { 2152 return allocated_chunks_words(); 2153 } else { 2154 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2155 size_t sum = 0; 2156 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2157 Metachunk* chunk = chunks_in_use(i); 2158 while (chunk != NULL) { 2159 sum += chunk->word_size(); 2160 chunk = chunk->next(); 2161 } 2162 } 2163 return sum; 2164 } 2165 } 2166 2167 size_t SpaceManager::sum_count_in_chunks_in_use() { 2168 size_t count = 0; 2169 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2170 count = count + sum_count_in_chunks_in_use(i); 2171 } 2172 2173 return count; 2174 } 2175 2176 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2177 size_t count = 0; 2178 Metachunk* chunk = chunks_in_use(i); 2179 while (chunk != NULL) { 2180 count++; 2181 chunk = chunk->next(); 2182 } 2183 return count; 2184 } 2185 2186 2187 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2188 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2189 size_t used = 0; 2190 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2191 Metachunk* chunk = chunks_in_use(i); 2192 while (chunk != NULL) { 2193 used += chunk->used_word_size(); 2194 chunk = chunk->next(); 2195 } 2196 } 2197 return used; 2198 } 2199 2200 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2201 2202 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2203 Metachunk* chunk = chunks_in_use(i); 2204 st->print("SpaceManager: %s " PTR_FORMAT, 2205 chunk_size_name(i), p2i(chunk)); 2206 if (chunk != NULL) { 2207 st->print_cr(" free " SIZE_FORMAT, 2208 chunk->free_word_size()); 2209 } else { 2210 st->cr(); 2211 } 2212 } 2213 2214 chunk_manager()->locked_print_free_chunks(st); 2215 chunk_manager()->locked_print_sum_free_chunks(st); 2216 } 2217 2218 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2219 2220 // Decide between a small chunk and a medium chunk. Up to 2221 // _small_chunk_limit small chunks can be allocated. 2222 // After that a medium chunk is preferred. 2223 size_t chunk_word_size; 2224 if (chunks_in_use(MediumIndex) == NULL && 2225 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2226 chunk_word_size = (size_t) small_chunk_size(); 2227 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2228 chunk_word_size = medium_chunk_size(); 2229 } 2230 } else { 2231 chunk_word_size = medium_chunk_size(); 2232 } 2233 2234 // Might still need a humongous chunk. Enforce 2235 // humongous allocations sizes to be aligned up to 2236 // the smallest chunk size. 2237 size_t if_humongous_sized_chunk = 2238 align_up(word_size + Metachunk::overhead(), 2239 smallest_chunk_size()); 2240 chunk_word_size = 2241 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2242 2243 assert(!SpaceManager::is_humongous(word_size) || 2244 chunk_word_size == if_humongous_sized_chunk, 2245 "Size calculation is wrong, word_size " SIZE_FORMAT 2246 " chunk_word_size " SIZE_FORMAT, 2247 word_size, chunk_word_size); 2248 Log(gc, metaspace, alloc) log; 2249 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2250 log.debug("Metadata humongous allocation:"); 2251 log.debug(" word_size " PTR_FORMAT, word_size); 2252 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2253 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2254 } 2255 return chunk_word_size; 2256 } 2257 2258 void SpaceManager::track_metaspace_memory_usage() { 2259 if (is_init_completed()) { 2260 if (is_class()) { 2261 MemoryService::track_compressed_class_memory_usage(); 2262 } 2263 MemoryService::track_metaspace_memory_usage(); 2264 } 2265 } 2266 2267 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2268 assert(vs_list()->current_virtual_space() != NULL, 2269 "Should have been set"); 2270 assert(current_chunk() == NULL || 2271 current_chunk()->allocate(word_size) == NULL, 2272 "Don't need to expand"); 2273 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2274 2275 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2276 size_t words_left = 0; 2277 size_t words_used = 0; 2278 if (current_chunk() != NULL) { 2279 words_left = current_chunk()->free_word_size(); 2280 words_used = current_chunk()->used_word_size(); 2281 } 2282 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2283 word_size, words_used, words_left); 2284 } 2285 2286 // Get another chunk 2287 size_t chunk_word_size = calc_chunk_size(word_size); 2288 Metachunk* next = get_new_chunk(chunk_word_size); 2289 2290 MetaWord* mem = NULL; 2291 2292 // If a chunk was available, add it to the in-use chunk list 2293 // and do an allocation from it. 2294 if (next != NULL) { 2295 // Add to this manager's list of chunks in use. 2296 add_chunk(next, false); 2297 mem = next->allocate(word_size); 2298 } 2299 2300 // Track metaspace memory usage statistic. 2301 track_metaspace_memory_usage(); 2302 2303 return mem; 2304 } 2305 2306 void SpaceManager::print_on(outputStream* st) const { 2307 2308 for (ChunkIndex i = ZeroIndex; 2309 i < NumberOfInUseLists ; 2310 i = next_chunk_index(i) ) { 2311 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2312 p2i(chunks_in_use(i)), 2313 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2314 } 2315 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2316 " Humongous " SIZE_FORMAT, 2317 sum_waste_in_chunks_in_use(SmallIndex), 2318 sum_waste_in_chunks_in_use(MediumIndex), 2319 sum_waste_in_chunks_in_use(HumongousIndex)); 2320 // block free lists 2321 if (block_freelists() != NULL) { 2322 st->print_cr("total in block free lists " SIZE_FORMAT, 2323 block_freelists()->total_size()); 2324 } 2325 } 2326 2327 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2328 Mutex* lock) : 2329 _mdtype(mdtype), 2330 _allocated_blocks_words(0), 2331 _allocated_chunks_words(0), 2332 _allocated_chunks_count(0), 2333 _block_freelists(NULL), 2334 _lock(lock) 2335 { 2336 initialize(); 2337 } 2338 2339 void SpaceManager::inc_size_metrics(size_t words) { 2340 assert_lock_strong(SpaceManager::expand_lock()); 2341 // Total of allocated Metachunks and allocated Metachunks count 2342 // for each SpaceManager 2343 _allocated_chunks_words = _allocated_chunks_words + words; 2344 _allocated_chunks_count++; 2345 // Global total of capacity in allocated Metachunks 2346 MetaspaceAux::inc_capacity(mdtype(), words); 2347 // Global total of allocated Metablocks. 2348 // used_words_slow() includes the overhead in each 2349 // Metachunk so include it in the used when the 2350 // Metachunk is first added (so only added once per 2351 // Metachunk). 2352 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2353 } 2354 2355 void SpaceManager::inc_used_metrics(size_t words) { 2356 // Add to the per SpaceManager total 2357 Atomic::add_ptr(words, &_allocated_blocks_words); 2358 // Add to the global total 2359 MetaspaceAux::inc_used(mdtype(), words); 2360 } 2361 2362 void SpaceManager::dec_total_from_size_metrics() { 2363 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2364 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2365 // Also deduct the overhead per Metachunk 2366 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2367 } 2368 2369 void SpaceManager::initialize() { 2370 Metadebug::init_allocation_fail_alot_count(); 2371 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2372 _chunks_in_use[i] = NULL; 2373 } 2374 _current_chunk = NULL; 2375 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2376 } 2377 2378 SpaceManager::~SpaceManager() { 2379 // This call this->_lock which can't be done while holding expand_lock() 2380 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2381 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2382 " allocated_chunks_words() " SIZE_FORMAT, 2383 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2384 2385 MutexLockerEx fcl(SpaceManager::expand_lock(), 2386 Mutex::_no_safepoint_check_flag); 2387 2388 chunk_manager()->slow_locked_verify(); 2389 2390 dec_total_from_size_metrics(); 2391 2392 Log(gc, metaspace, freelist) log; 2393 if (log.is_trace()) { 2394 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2395 ResourceMark rm; 2396 locked_print_chunks_in_use_on(log.trace_stream()); 2397 if (block_freelists() != NULL) { 2398 block_freelists()->print_on(log.trace_stream()); 2399 } 2400 } 2401 2402 // Add all the chunks in use by this space manager 2403 // to the global list of free chunks. 2404 2405 // Follow each list of chunks-in-use and add them to the 2406 // free lists. Each list is NULL terminated. 2407 2408 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) { 2409 Metachunk* chunks = chunks_in_use(i); 2410 chunk_manager()->return_chunk_list(i, chunks); 2411 set_chunks_in_use(i, NULL); 2412 } 2413 2414 chunk_manager()->slow_locked_verify(); 2415 2416 if (_block_freelists != NULL) { 2417 delete _block_freelists; 2418 } 2419 } 2420 2421 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2422 assert_lock_strong(_lock); 2423 // Allocations and deallocations are in raw_word_size 2424 size_t raw_word_size = get_allocation_word_size(word_size); 2425 // Lazily create a block_freelist 2426 if (block_freelists() == NULL) { 2427 _block_freelists = new BlockFreelist(); 2428 } 2429 block_freelists()->return_block(p, raw_word_size); 2430 } 2431 2432 // Adds a chunk to the list of chunks in use. 2433 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2434 2435 assert(new_chunk != NULL, "Should not be NULL"); 2436 assert(new_chunk->next() == NULL, "Should not be on a list"); 2437 2438 new_chunk->reset_empty(); 2439 2440 // Find the correct list and and set the current 2441 // chunk for that list. 2442 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2443 2444 if (index != HumongousIndex) { 2445 retire_current_chunk(); 2446 set_current_chunk(new_chunk); 2447 new_chunk->set_next(chunks_in_use(index)); 2448 set_chunks_in_use(index, new_chunk); 2449 } else { 2450 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2451 // small, so small will be null. Link this first chunk as the current 2452 // chunk. 2453 if (make_current) { 2454 // Set as the current chunk but otherwise treat as a humongous chunk. 2455 set_current_chunk(new_chunk); 2456 } 2457 // Link at head. The _current_chunk only points to a humongous chunk for 2458 // the null class loader metaspace (class and data virtual space managers) 2459 // any humongous chunks so will not point to the tail 2460 // of the humongous chunks list. 2461 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2462 set_chunks_in_use(HumongousIndex, new_chunk); 2463 2464 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2465 } 2466 2467 // Add to the running sum of capacity 2468 inc_size_metrics(new_chunk->word_size()); 2469 2470 assert(new_chunk->is_empty(), "Not ready for reuse"); 2471 Log(gc, metaspace, freelist) log; 2472 if (log.is_trace()) { 2473 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2474 ResourceMark rm; 2475 outputStream* out = log.trace_stream(); 2476 new_chunk->print_on(out); 2477 chunk_manager()->locked_print_free_chunks(out); 2478 } 2479 } 2480 2481 void SpaceManager::retire_current_chunk() { 2482 if (current_chunk() != NULL) { 2483 size_t remaining_words = current_chunk()->free_word_size(); 2484 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2485 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2486 deallocate(ptr, remaining_words); 2487 inc_used_metrics(remaining_words); 2488 } 2489 } 2490 } 2491 2492 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { 2493 // Get a chunk from the chunk freelist 2494 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); 2495 2496 if (next == NULL) { 2497 next = vs_list()->get_new_chunk(chunk_word_size, 2498 medium_chunk_bunch()); 2499 } 2500 2501 Log(gc, metaspace, alloc) log; 2502 if (log.is_debug() && next != NULL && 2503 SpaceManager::is_humongous(next->word_size())) { 2504 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2505 } 2506 2507 return next; 2508 } 2509 2510 /* 2511 * The policy is to allocate up to _small_chunk_limit small chunks 2512 * after which only medium chunks are allocated. This is done to 2513 * reduce fragmentation. In some cases, this can result in a lot 2514 * of small chunks being allocated to the point where it's not 2515 * possible to expand. If this happens, there may be no medium chunks 2516 * available and OOME would be thrown. Instead of doing that, 2517 * if the allocation request size fits in a small chunk, an attempt 2518 * will be made to allocate a small chunk. 2519 */ 2520 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2521 size_t raw_word_size = get_allocation_word_size(word_size); 2522 2523 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2524 return NULL; 2525 } 2526 2527 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2528 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2529 2530 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2531 2532 MetaWord* mem = NULL; 2533 2534 if (chunk != NULL) { 2535 // Add chunk to the in-use chunk list and do an allocation from it. 2536 // Add to this manager's list of chunks in use. 2537 add_chunk(chunk, false); 2538 mem = chunk->allocate(raw_word_size); 2539 2540 inc_used_metrics(raw_word_size); 2541 2542 // Track metaspace memory usage statistic. 2543 track_metaspace_memory_usage(); 2544 } 2545 2546 return mem; 2547 } 2548 2549 MetaWord* SpaceManager::allocate(size_t word_size) { 2550 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2551 size_t raw_word_size = get_allocation_word_size(word_size); 2552 BlockFreelist* fl = block_freelists(); 2553 MetaWord* p = NULL; 2554 // Allocation from the dictionary is expensive in the sense that 2555 // the dictionary has to be searched for a size. Don't allocate 2556 // from the dictionary until it starts to get fat. Is this 2557 // a reasonable policy? Maybe an skinny dictionary is fast enough 2558 // for allocations. Do some profiling. JJJ 2559 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2560 p = fl->get_block(raw_word_size); 2561 } 2562 if (p == NULL) { 2563 p = allocate_work(raw_word_size); 2564 } 2565 2566 return p; 2567 } 2568 2569 // Returns the address of spaced allocated for "word_size". 2570 // This methods does not know about blocks (Metablocks) 2571 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2572 assert_lock_strong(_lock); 2573 #ifdef ASSERT 2574 if (Metadebug::test_metadata_failure()) { 2575 return NULL; 2576 } 2577 #endif 2578 // Is there space in the current chunk? 2579 MetaWord* result = NULL; 2580 2581 // For DumpSharedSpaces, only allocate out of the current chunk which is 2582 // never null because we gave it the size we wanted. Caller reports out 2583 // of memory if this returns null. 2584 if (DumpSharedSpaces) { 2585 assert(current_chunk() != NULL, "should never happen"); 2586 inc_used_metrics(word_size); 2587 return current_chunk()->allocate(word_size); // caller handles null result 2588 } 2589 2590 if (current_chunk() != NULL) { 2591 result = current_chunk()->allocate(word_size); 2592 } 2593 2594 if (result == NULL) { 2595 result = grow_and_allocate(word_size); 2596 } 2597 2598 if (result != NULL) { 2599 inc_used_metrics(word_size); 2600 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2601 "Head of the list is being allocated"); 2602 } 2603 2604 return result; 2605 } 2606 2607 void SpaceManager::verify() { 2608 // If there are blocks in the dictionary, then 2609 // verification of chunks does not work since 2610 // being in the dictionary alters a chunk. 2611 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2612 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2613 Metachunk* curr = chunks_in_use(i); 2614 while (curr != NULL) { 2615 curr->verify(); 2616 verify_chunk_size(curr); 2617 curr = curr->next(); 2618 } 2619 } 2620 } 2621 } 2622 2623 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2624 assert(is_humongous(chunk->word_size()) || 2625 chunk->word_size() == medium_chunk_size() || 2626 chunk->word_size() == small_chunk_size() || 2627 chunk->word_size() == specialized_chunk_size(), 2628 "Chunk size is wrong"); 2629 return; 2630 } 2631 2632 #ifdef ASSERT 2633 void SpaceManager::verify_allocated_blocks_words() { 2634 // Verification is only guaranteed at a safepoint. 2635 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2636 "Verification can fail if the applications is running"); 2637 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2638 "allocation total is not consistent " SIZE_FORMAT 2639 " vs " SIZE_FORMAT, 2640 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2641 } 2642 2643 #endif 2644 2645 void SpaceManager::dump(outputStream* const out) const { 2646 size_t curr_total = 0; 2647 size_t waste = 0; 2648 uint i = 0; 2649 size_t used = 0; 2650 size_t capacity = 0; 2651 2652 // Add up statistics for all chunks in this SpaceManager. 2653 for (ChunkIndex index = ZeroIndex; 2654 index < NumberOfInUseLists; 2655 index = next_chunk_index(index)) { 2656 for (Metachunk* curr = chunks_in_use(index); 2657 curr != NULL; 2658 curr = curr->next()) { 2659 out->print("%d) ", i++); 2660 curr->print_on(out); 2661 curr_total += curr->word_size(); 2662 used += curr->used_word_size(); 2663 capacity += curr->word_size(); 2664 waste += curr->free_word_size() + curr->overhead();; 2665 } 2666 } 2667 2668 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2669 if (block_freelists() != NULL) block_freelists()->print_on(out); 2670 } 2671 2672 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2673 // Free space isn't wasted. 2674 waste -= free; 2675 2676 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2677 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2678 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2679 } 2680 2681 // MetaspaceAux 2682 2683 2684 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2685 size_t MetaspaceAux::_used_words[] = {0, 0}; 2686 2687 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2688 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2689 return list == NULL ? 0 : list->free_bytes(); 2690 } 2691 2692 size_t MetaspaceAux::free_bytes() { 2693 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2694 } 2695 2696 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2697 assert_lock_strong(SpaceManager::expand_lock()); 2698 assert(words <= capacity_words(mdtype), 2699 "About to decrement below 0: words " SIZE_FORMAT 2700 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2701 words, mdtype, capacity_words(mdtype)); 2702 _capacity_words[mdtype] -= words; 2703 } 2704 2705 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2706 assert_lock_strong(SpaceManager::expand_lock()); 2707 // Needs to be atomic 2708 _capacity_words[mdtype] += words; 2709 } 2710 2711 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2712 assert(words <= used_words(mdtype), 2713 "About to decrement below 0: words " SIZE_FORMAT 2714 " is greater than _used_words[%u] " SIZE_FORMAT, 2715 words, mdtype, used_words(mdtype)); 2716 // For CMS deallocation of the Metaspaces occurs during the 2717 // sweep which is a concurrent phase. Protection by the expand_lock() 2718 // is not enough since allocation is on a per Metaspace basis 2719 // and protected by the Metaspace lock. 2720 jlong minus_words = (jlong) - (jlong) words; 2721 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2722 } 2723 2724 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2725 // _used_words tracks allocations for 2726 // each piece of metadata. Those allocations are 2727 // generally done concurrently by different application 2728 // threads so must be done atomically. 2729 Atomic::add_ptr(words, &_used_words[mdtype]); 2730 } 2731 2732 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2733 size_t used = 0; 2734 ClassLoaderDataGraphMetaspaceIterator iter; 2735 while (iter.repeat()) { 2736 Metaspace* msp = iter.get_next(); 2737 // Sum allocated_blocks_words for each metaspace 2738 if (msp != NULL) { 2739 used += msp->used_words_slow(mdtype); 2740 } 2741 } 2742 return used * BytesPerWord; 2743 } 2744 2745 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2746 size_t free = 0; 2747 ClassLoaderDataGraphMetaspaceIterator iter; 2748 while (iter.repeat()) { 2749 Metaspace* msp = iter.get_next(); 2750 if (msp != NULL) { 2751 free += msp->free_words_slow(mdtype); 2752 } 2753 } 2754 return free * BytesPerWord; 2755 } 2756 2757 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2758 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2759 return 0; 2760 } 2761 // Don't count the space in the freelists. That space will be 2762 // added to the capacity calculation as needed. 2763 size_t capacity = 0; 2764 ClassLoaderDataGraphMetaspaceIterator iter; 2765 while (iter.repeat()) { 2766 Metaspace* msp = iter.get_next(); 2767 if (msp != NULL) { 2768 capacity += msp->capacity_words_slow(mdtype); 2769 } 2770 } 2771 return capacity * BytesPerWord; 2772 } 2773 2774 size_t MetaspaceAux::capacity_bytes_slow() { 2775 #ifdef PRODUCT 2776 // Use capacity_bytes() in PRODUCT instead of this function. 2777 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2778 #endif 2779 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2780 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2781 assert(capacity_bytes() == class_capacity + non_class_capacity, 2782 "bad accounting: capacity_bytes() " SIZE_FORMAT 2783 " class_capacity + non_class_capacity " SIZE_FORMAT 2784 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2785 capacity_bytes(), class_capacity + non_class_capacity, 2786 class_capacity, non_class_capacity); 2787 2788 return class_capacity + non_class_capacity; 2789 } 2790 2791 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2792 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2793 return list == NULL ? 0 : list->reserved_bytes(); 2794 } 2795 2796 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2797 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2798 return list == NULL ? 0 : list->committed_bytes(); 2799 } 2800 2801 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2802 2803 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2804 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2805 if (chunk_manager == NULL) { 2806 return 0; 2807 } 2808 chunk_manager->slow_verify(); 2809 return chunk_manager->free_chunks_total_words(); 2810 } 2811 2812 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2813 return free_chunks_total_words(mdtype) * BytesPerWord; 2814 } 2815 2816 size_t MetaspaceAux::free_chunks_total_words() { 2817 return free_chunks_total_words(Metaspace::ClassType) + 2818 free_chunks_total_words(Metaspace::NonClassType); 2819 } 2820 2821 size_t MetaspaceAux::free_chunks_total_bytes() { 2822 return free_chunks_total_words() * BytesPerWord; 2823 } 2824 2825 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2826 return Metaspace::get_chunk_manager(mdtype) != NULL; 2827 } 2828 2829 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2830 if (!has_chunk_free_list(mdtype)) { 2831 return MetaspaceChunkFreeListSummary(); 2832 } 2833 2834 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2835 return cm->chunk_free_list_summary(); 2836 } 2837 2838 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2839 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2840 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2841 } 2842 2843 void MetaspaceAux::print_on(outputStream* out) { 2844 Metaspace::MetadataType nct = Metaspace::NonClassType; 2845 2846 out->print_cr(" Metaspace " 2847 "used " SIZE_FORMAT "K, " 2848 "capacity " SIZE_FORMAT "K, " 2849 "committed " SIZE_FORMAT "K, " 2850 "reserved " SIZE_FORMAT "K", 2851 used_bytes()/K, 2852 capacity_bytes()/K, 2853 committed_bytes()/K, 2854 reserved_bytes()/K); 2855 2856 if (Metaspace::using_class_space()) { 2857 Metaspace::MetadataType ct = Metaspace::ClassType; 2858 out->print_cr(" class space " 2859 "used " SIZE_FORMAT "K, " 2860 "capacity " SIZE_FORMAT "K, " 2861 "committed " SIZE_FORMAT "K, " 2862 "reserved " SIZE_FORMAT "K", 2863 used_bytes(ct)/K, 2864 capacity_bytes(ct)/K, 2865 committed_bytes(ct)/K, 2866 reserved_bytes(ct)/K); 2867 } 2868 } 2869 2870 // Print information for class space and data space separately. 2871 // This is almost the same as above. 2872 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2873 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2874 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2875 size_t used_bytes = used_bytes_slow(mdtype); 2876 size_t free_bytes = free_bytes_slow(mdtype); 2877 size_t used_and_free = used_bytes + free_bytes + 2878 free_chunks_capacity_bytes; 2879 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2880 "K + unused in chunks " SIZE_FORMAT "K + " 2881 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2882 "K capacity in allocated chunks " SIZE_FORMAT "K", 2883 used_bytes / K, 2884 free_bytes / K, 2885 free_chunks_capacity_bytes / K, 2886 used_and_free / K, 2887 capacity_bytes / K); 2888 // Accounting can only be correct if we got the values during a safepoint 2889 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2890 } 2891 2892 // Print total fragmentation for class metaspaces 2893 void MetaspaceAux::print_class_waste(outputStream* out) { 2894 assert(Metaspace::using_class_space(), "class metaspace not used"); 2895 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2896 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2897 ClassLoaderDataGraphMetaspaceIterator iter; 2898 while (iter.repeat()) { 2899 Metaspace* msp = iter.get_next(); 2900 if (msp != NULL) { 2901 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2902 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2903 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2904 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2905 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2906 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2907 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2908 } 2909 } 2910 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2911 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2912 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2913 "large count " SIZE_FORMAT, 2914 cls_specialized_count, cls_specialized_waste, 2915 cls_small_count, cls_small_waste, 2916 cls_medium_count, cls_medium_waste, cls_humongous_count); 2917 } 2918 2919 // Print total fragmentation for data and class metaspaces separately 2920 void MetaspaceAux::print_waste(outputStream* out) { 2921 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2922 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2923 2924 ClassLoaderDataGraphMetaspaceIterator iter; 2925 while (iter.repeat()) { 2926 Metaspace* msp = iter.get_next(); 2927 if (msp != NULL) { 2928 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2929 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2930 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2931 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2932 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2933 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2934 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2935 } 2936 } 2937 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2938 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2939 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2940 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2941 "large count " SIZE_FORMAT, 2942 specialized_count, specialized_waste, small_count, 2943 small_waste, medium_count, medium_waste, humongous_count); 2944 if (Metaspace::using_class_space()) { 2945 print_class_waste(out); 2946 } 2947 } 2948 2949 // Dump global metaspace things from the end of ClassLoaderDataGraph 2950 void MetaspaceAux::dump(outputStream* out) { 2951 out->print_cr("All Metaspace:"); 2952 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2953 out->print("class space: "); print_on(out, Metaspace::ClassType); 2954 print_waste(out); 2955 } 2956 2957 void MetaspaceAux::verify_free_chunks() { 2958 Metaspace::chunk_manager_metadata()->verify(); 2959 if (Metaspace::using_class_space()) { 2960 Metaspace::chunk_manager_class()->verify(); 2961 } 2962 } 2963 2964 void MetaspaceAux::verify_capacity() { 2965 #ifdef ASSERT 2966 size_t running_sum_capacity_bytes = capacity_bytes(); 2967 // For purposes of the running sum of capacity, verify against capacity 2968 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2969 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2970 "capacity_words() * BytesPerWord " SIZE_FORMAT 2971 " capacity_bytes_slow()" SIZE_FORMAT, 2972 running_sum_capacity_bytes, capacity_in_use_bytes); 2973 for (Metaspace::MetadataType i = Metaspace::ClassType; 2974 i < Metaspace:: MetadataTypeCount; 2975 i = (Metaspace::MetadataType)(i + 1)) { 2976 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2977 assert(capacity_bytes(i) == capacity_in_use_bytes, 2978 "capacity_bytes(%u) " SIZE_FORMAT 2979 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2980 i, capacity_bytes(i), i, capacity_in_use_bytes); 2981 } 2982 #endif 2983 } 2984 2985 void MetaspaceAux::verify_used() { 2986 #ifdef ASSERT 2987 size_t running_sum_used_bytes = used_bytes(); 2988 // For purposes of the running sum of used, verify against used 2989 size_t used_in_use_bytes = used_bytes_slow(); 2990 assert(used_bytes() == used_in_use_bytes, 2991 "used_bytes() " SIZE_FORMAT 2992 " used_bytes_slow()" SIZE_FORMAT, 2993 used_bytes(), used_in_use_bytes); 2994 for (Metaspace::MetadataType i = Metaspace::ClassType; 2995 i < Metaspace:: MetadataTypeCount; 2996 i = (Metaspace::MetadataType)(i + 1)) { 2997 size_t used_in_use_bytes = used_bytes_slow(i); 2998 assert(used_bytes(i) == used_in_use_bytes, 2999 "used_bytes(%u) " SIZE_FORMAT 3000 " used_bytes_slow(%u)" SIZE_FORMAT, 3001 i, used_bytes(i), i, used_in_use_bytes); 3002 } 3003 #endif 3004 } 3005 3006 void MetaspaceAux::verify_metrics() { 3007 verify_capacity(); 3008 verify_used(); 3009 } 3010 3011 3012 // Metaspace methods 3013 3014 size_t Metaspace::_first_chunk_word_size = 0; 3015 size_t Metaspace::_first_class_chunk_word_size = 0; 3016 3017 size_t Metaspace::_commit_alignment = 0; 3018 size_t Metaspace::_reserve_alignment = 0; 3019 3020 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 3021 initialize(lock, type); 3022 } 3023 3024 Metaspace::~Metaspace() { 3025 delete _vsm; 3026 if (using_class_space()) { 3027 delete _class_vsm; 3028 } 3029 } 3030 3031 VirtualSpaceList* Metaspace::_space_list = NULL; 3032 VirtualSpaceList* Metaspace::_class_space_list = NULL; 3033 3034 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 3035 ChunkManager* Metaspace::_chunk_manager_class = NULL; 3036 3037 #define VIRTUALSPACEMULTIPLIER 2 3038 3039 #ifdef _LP64 3040 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 3041 3042 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 3043 // Figure out the narrow_klass_base and the narrow_klass_shift. The 3044 // narrow_klass_base is the lower of the metaspace base and the cds base 3045 // (if cds is enabled). The narrow_klass_shift depends on the distance 3046 // between the lower base and higher address. 3047 address lower_base; 3048 address higher_address; 3049 #if INCLUDE_CDS 3050 if (UseSharedSpaces) { 3051 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3052 (address)(metaspace_base + compressed_class_space_size())); 3053 lower_base = MIN2(metaspace_base, cds_base); 3054 } else 3055 #endif 3056 { 3057 higher_address = metaspace_base + compressed_class_space_size(); 3058 lower_base = metaspace_base; 3059 3060 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3061 // If compressed class space fits in lower 32G, we don't need a base. 3062 if (higher_address <= (address)klass_encoding_max) { 3063 lower_base = 0; // Effectively lower base is zero. 3064 } 3065 } 3066 3067 Universe::set_narrow_klass_base(lower_base); 3068 3069 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3070 Universe::set_narrow_klass_shift(0); 3071 } else { 3072 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3073 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3074 } 3075 AOTLoader::set_narrow_klass_shift(); 3076 } 3077 3078 #if INCLUDE_CDS 3079 // Return TRUE if the specified metaspace_base and cds_base are close enough 3080 // to work with compressed klass pointers. 3081 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3082 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3083 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3084 address lower_base = MIN2((address)metaspace_base, cds_base); 3085 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3086 (address)(metaspace_base + compressed_class_space_size())); 3087 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3088 } 3089 #endif 3090 3091 // Try to allocate the metaspace at the requested addr. 3092 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3093 assert(using_class_space(), "called improperly"); 3094 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3095 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3096 "Metaspace size is too big"); 3097 assert_is_aligned(requested_addr, _reserve_alignment); 3098 assert_is_aligned(cds_base, _reserve_alignment); 3099 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 3100 3101 // Don't use large pages for the class space. 3102 bool large_pages = false; 3103 3104 #if !(defined(AARCH64) || defined(AIX)) 3105 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3106 _reserve_alignment, 3107 large_pages, 3108 requested_addr); 3109 #else // AARCH64 3110 ReservedSpace metaspace_rs; 3111 3112 // Our compressed klass pointers may fit nicely into the lower 32 3113 // bits. 3114 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3115 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3116 _reserve_alignment, 3117 large_pages, 3118 requested_addr); 3119 } 3120 3121 if (! metaspace_rs.is_reserved()) { 3122 // Aarch64: Try to align metaspace so that we can decode a compressed 3123 // klass with a single MOVK instruction. We can do this iff the 3124 // compressed class base is a multiple of 4G. 3125 // Aix: Search for a place where we can find memory. If we need to load 3126 // the base, 4G alignment is helpful, too. 3127 size_t increment = AARCH64_ONLY(4*)G; 3128 for (char *a = align_up(requested_addr, increment); 3129 a < (char*)(1024*G); 3130 a += increment) { 3131 if (a == (char *)(32*G)) { 3132 // Go faster from here on. Zero-based is no longer possible. 3133 increment = 4*G; 3134 } 3135 3136 #if INCLUDE_CDS 3137 if (UseSharedSpaces 3138 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3139 // We failed to find an aligned base that will reach. Fall 3140 // back to using our requested addr. 3141 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3142 _reserve_alignment, 3143 large_pages, 3144 requested_addr); 3145 break; 3146 } 3147 #endif 3148 3149 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3150 _reserve_alignment, 3151 large_pages, 3152 a); 3153 if (metaspace_rs.is_reserved()) 3154 break; 3155 } 3156 } 3157 3158 #endif // AARCH64 3159 3160 if (!metaspace_rs.is_reserved()) { 3161 #if INCLUDE_CDS 3162 if (UseSharedSpaces) { 3163 size_t increment = align_up(1*G, _reserve_alignment); 3164 3165 // Keep trying to allocate the metaspace, increasing the requested_addr 3166 // by 1GB each time, until we reach an address that will no longer allow 3167 // use of CDS with compressed klass pointers. 3168 char *addr = requested_addr; 3169 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3170 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3171 addr = addr + increment; 3172 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3173 _reserve_alignment, large_pages, addr); 3174 } 3175 } 3176 #endif 3177 // If no successful allocation then try to allocate the space anywhere. If 3178 // that fails then OOM doom. At this point we cannot try allocating the 3179 // metaspace as if UseCompressedClassPointers is off because too much 3180 // initialization has happened that depends on UseCompressedClassPointers. 3181 // So, UseCompressedClassPointers cannot be turned off at this point. 3182 if (!metaspace_rs.is_reserved()) { 3183 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3184 _reserve_alignment, large_pages); 3185 if (!metaspace_rs.is_reserved()) { 3186 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3187 compressed_class_space_size())); 3188 } 3189 } 3190 } 3191 3192 // If we got here then the metaspace got allocated. 3193 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3194 3195 #if INCLUDE_CDS 3196 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3197 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3198 FileMapInfo::stop_sharing_and_unmap( 3199 "Could not allocate metaspace at a compatible address"); 3200 } 3201 #endif 3202 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3203 UseSharedSpaces ? (address)cds_base : 0); 3204 3205 initialize_class_space(metaspace_rs); 3206 3207 if (log_is_enabled(Trace, gc, metaspace)) { 3208 Log(gc, metaspace) log; 3209 ResourceMark rm; 3210 print_compressed_class_space(log.trace_stream(), requested_addr); 3211 } 3212 } 3213 3214 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3215 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3216 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3217 if (_class_space_list != NULL) { 3218 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3219 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3220 compressed_class_space_size(), p2i(base)); 3221 if (requested_addr != 0) { 3222 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3223 } 3224 st->cr(); 3225 } 3226 } 3227 3228 // For UseCompressedClassPointers the class space is reserved above the top of 3229 // the Java heap. The argument passed in is at the base of the compressed space. 3230 void Metaspace::initialize_class_space(ReservedSpace rs) { 3231 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3232 assert(rs.size() >= CompressedClassSpaceSize, 3233 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3234 assert(using_class_space(), "Must be using class space"); 3235 _class_space_list = new VirtualSpaceList(rs); 3236 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3237 3238 if (!_class_space_list->initialization_succeeded()) { 3239 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3240 } 3241 } 3242 3243 #endif 3244 3245 void Metaspace::ergo_initialize() { 3246 if (DumpSharedSpaces) { 3247 // Using large pages when dumping the shared archive is currently not implemented. 3248 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3249 } 3250 3251 size_t page_size = os::vm_page_size(); 3252 if (UseLargePages && UseLargePagesInMetaspace) { 3253 page_size = os::large_page_size(); 3254 } 3255 3256 _commit_alignment = page_size; 3257 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3258 3259 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3260 // override if MaxMetaspaceSize was set on the command line or not. 3261 // This information is needed later to conform to the specification of the 3262 // java.lang.management.MemoryUsage API. 3263 // 3264 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3265 // globals.hpp to the aligned value, but this is not possible, since the 3266 // alignment depends on other flags being parsed. 3267 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3268 3269 if (MetaspaceSize > MaxMetaspaceSize) { 3270 MetaspaceSize = MaxMetaspaceSize; 3271 } 3272 3273 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 3274 3275 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3276 3277 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3278 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3279 3280 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3281 set_compressed_class_space_size(CompressedClassSpaceSize); 3282 } 3283 3284 void Metaspace::global_initialize() { 3285 MetaspaceGC::initialize(); 3286 3287 // Initialize the alignment for shared spaces. 3288 int max_alignment = os::vm_allocation_granularity(); 3289 size_t cds_total = 0; 3290 3291 MetaspaceShared::set_max_alignment(max_alignment); 3292 3293 if (DumpSharedSpaces) { 3294 #if INCLUDE_CDS 3295 MetaspaceShared::estimate_regions_size(); 3296 3297 SharedReadOnlySize = align_up(SharedReadOnlySize, max_alignment); 3298 SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment); 3299 SharedMiscDataSize = align_up(SharedMiscDataSize, max_alignment); 3300 SharedMiscCodeSize = align_up(SharedMiscCodeSize, max_alignment); 3301 3302 // Initialize with the sum of the shared space sizes. The read-only 3303 // and read write metaspace chunks will be allocated out of this and the 3304 // remainder is the misc code and data chunks. 3305 cds_total = FileMapInfo::shared_spaces_size(); 3306 cds_total = align_up(cds_total, _reserve_alignment); 3307 _space_list = new VirtualSpaceList(cds_total/wordSize); 3308 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3309 3310 if (!_space_list->initialization_succeeded()) { 3311 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3312 } 3313 3314 #ifdef _LP64 3315 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3316 vm_exit_during_initialization("Unable to dump shared archive.", 3317 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3318 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3319 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3320 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3321 } 3322 3323 // Set the compressed klass pointer base so that decoding of these pointers works 3324 // properly when creating the shared archive. 3325 assert(UseCompressedOops && UseCompressedClassPointers, 3326 "UseCompressedOops and UseCompressedClassPointers must be set"); 3327 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3328 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3329 p2i(_space_list->current_virtual_space()->bottom())); 3330 3331 Universe::set_narrow_klass_shift(0); 3332 #endif // _LP64 3333 #endif // INCLUDE_CDS 3334 } else { 3335 #if INCLUDE_CDS 3336 if (UseSharedSpaces) { 3337 // If using shared space, open the file that contains the shared space 3338 // and map in the memory before initializing the rest of metaspace (so 3339 // the addresses don't conflict) 3340 address cds_address = NULL; 3341 FileMapInfo* mapinfo = new FileMapInfo(); 3342 3343 // Open the shared archive file, read and validate the header. If 3344 // initialization fails, shared spaces [UseSharedSpaces] are 3345 // disabled and the file is closed. 3346 // Map in spaces now also 3347 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3348 cds_total = FileMapInfo::shared_spaces_size(); 3349 cds_address = (address)mapinfo->header()->region_addr(0); 3350 #ifdef _LP64 3351 if (using_class_space()) { 3352 char* cds_end = (char*)(cds_address + cds_total); 3353 cds_end = align_up(cds_end, _reserve_alignment); 3354 // If UseCompressedClassPointers is set then allocate the metaspace area 3355 // above the heap and above the CDS area (if it exists). 3356 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3357 // Map the shared string space after compressed pointers 3358 // because it relies on compressed class pointers setting to work 3359 mapinfo->map_string_regions(); 3360 } 3361 #endif // _LP64 3362 } else { 3363 assert(!mapinfo->is_open() && !UseSharedSpaces, 3364 "archive file not closed or shared spaces not disabled."); 3365 } 3366 } 3367 #endif // INCLUDE_CDS 3368 3369 #ifdef _LP64 3370 if (!UseSharedSpaces && using_class_space()) { 3371 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3372 allocate_metaspace_compressed_klass_ptrs(base, 0); 3373 } 3374 #endif // _LP64 3375 3376 // Initialize these before initializing the VirtualSpaceList 3377 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3378 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3379 // Make the first class chunk bigger than a medium chunk so it's not put 3380 // on the medium chunk list. The next chunk will be small and progress 3381 // from there. This size calculated by -version. 3382 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3383 (CompressedClassSpaceSize/BytesPerWord)*2); 3384 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3385 // Arbitrarily set the initial virtual space to a multiple 3386 // of the boot class loader size. 3387 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3388 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 3389 3390 // Initialize the list of virtual spaces. 3391 _space_list = new VirtualSpaceList(word_size); 3392 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3393 3394 if (!_space_list->initialization_succeeded()) { 3395 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3396 } 3397 } 3398 3399 _tracer = new MetaspaceTracer(); 3400 } 3401 3402 void Metaspace::post_initialize() { 3403 MetaspaceGC::post_initialize(); 3404 } 3405 3406 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) { 3407 Metachunk* chunk = get_initialization_chunk(type, mdtype); 3408 if (chunk != NULL) { 3409 // Add to this manager's list of chunks in use and current_chunk(). 3410 get_space_manager(mdtype)->add_chunk(chunk, true); 3411 } 3412 } 3413 3414 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) { 3415 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 3416 3417 // Get a chunk from the chunk freelist 3418 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3419 3420 if (chunk == NULL) { 3421 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size, 3422 get_space_manager(mdtype)->medium_chunk_bunch()); 3423 } 3424 3425 // For dumping shared archive, report error if allocation has failed. 3426 if (DumpSharedSpaces && chunk == NULL) { 3427 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord); 3428 } 3429 3430 return chunk; 3431 } 3432 3433 void Metaspace::verify_global_initialization() { 3434 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 3435 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 3436 3437 if (using_class_space()) { 3438 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 3439 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 3440 } 3441 } 3442 3443 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3444 verify_global_initialization(); 3445 3446 // Allocate SpaceManager for metadata objects. 3447 _vsm = new SpaceManager(NonClassType, lock); 3448 3449 if (using_class_space()) { 3450 // Allocate SpaceManager for classes. 3451 _class_vsm = new SpaceManager(ClassType, lock); 3452 } 3453 3454 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3455 3456 // Allocate chunk for metadata objects 3457 initialize_first_chunk(type, NonClassType); 3458 3459 // Allocate chunk for class metadata objects 3460 if (using_class_space()) { 3461 initialize_first_chunk(type, ClassType); 3462 } 3463 3464 _alloc_record_head = NULL; 3465 _alloc_record_tail = NULL; 3466 } 3467 3468 size_t Metaspace::align_word_size_up(size_t word_size) { 3469 size_t byte_size = word_size * wordSize; 3470 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3471 } 3472 3473 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3474 // DumpSharedSpaces doesn't use class metadata area (yet) 3475 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3476 if (is_class_space_allocation(mdtype)) { 3477 return class_vsm()->allocate(word_size); 3478 } else { 3479 return vsm()->allocate(word_size); 3480 } 3481 } 3482 3483 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3484 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3485 assert(delta_bytes > 0, "Must be"); 3486 3487 size_t before = 0; 3488 size_t after = 0; 3489 MetaWord* res; 3490 bool incremented; 3491 3492 // Each thread increments the HWM at most once. Even if the thread fails to increment 3493 // the HWM, an allocation is still attempted. This is because another thread must then 3494 // have incremented the HWM and therefore the allocation might still succeed. 3495 do { 3496 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3497 res = allocate(word_size, mdtype); 3498 } while (!incremented && res == NULL); 3499 3500 if (incremented) { 3501 tracer()->report_gc_threshold(before, after, 3502 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3503 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3504 } 3505 3506 return res; 3507 } 3508 3509 // Space allocated in the Metaspace. This may 3510 // be across several metadata virtual spaces. 3511 char* Metaspace::bottom() const { 3512 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3513 return (char*)vsm()->current_chunk()->bottom(); 3514 } 3515 3516 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3517 if (mdtype == ClassType) { 3518 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3519 } else { 3520 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3521 } 3522 } 3523 3524 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3525 if (mdtype == ClassType) { 3526 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3527 } else { 3528 return vsm()->sum_free_in_chunks_in_use(); 3529 } 3530 } 3531 3532 // Space capacity in the Metaspace. It includes 3533 // space in the list of chunks from which allocations 3534 // have been made. Don't include space in the global freelist and 3535 // in the space available in the dictionary which 3536 // is already counted in some chunk. 3537 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3538 if (mdtype == ClassType) { 3539 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3540 } else { 3541 return vsm()->sum_capacity_in_chunks_in_use(); 3542 } 3543 } 3544 3545 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3546 return used_words_slow(mdtype) * BytesPerWord; 3547 } 3548 3549 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3550 return capacity_words_slow(mdtype) * BytesPerWord; 3551 } 3552 3553 size_t Metaspace::allocated_blocks_bytes() const { 3554 return vsm()->allocated_blocks_bytes() + 3555 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3556 } 3557 3558 size_t Metaspace::allocated_chunks_bytes() const { 3559 return vsm()->allocated_chunks_bytes() + 3560 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3561 } 3562 3563 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3564 assert(!SafepointSynchronize::is_at_safepoint() 3565 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3566 3567 if (DumpSharedSpaces && log_is_enabled(Info, cds)) { 3568 record_deallocation(ptr, vsm()->get_allocation_word_size(word_size)); 3569 } 3570 3571 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3572 3573 if (is_class && using_class_space()) { 3574 class_vsm()->deallocate(ptr, word_size); 3575 } else { 3576 vsm()->deallocate(ptr, word_size); 3577 } 3578 } 3579 3580 3581 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3582 bool read_only, MetaspaceObj::Type type, TRAPS) { 3583 if (HAS_PENDING_EXCEPTION) { 3584 assert(false, "Should not allocate with exception pending"); 3585 return NULL; // caller does a CHECK_NULL too 3586 } 3587 3588 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3589 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3590 3591 // Allocate in metaspaces without taking out a lock, because it deadlocks 3592 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3593 // to revisit this for application class data sharing. 3594 if (DumpSharedSpaces) { 3595 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3596 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3597 MetaWord* result = space->allocate(word_size, NonClassType); 3598 if (result == NULL) { 3599 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3600 } 3601 if (log_is_enabled(Info, cds)) { 3602 space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size)); 3603 } 3604 3605 // Zero initialize. 3606 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3607 3608 return result; 3609 } 3610 3611 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3612 3613 // Try to allocate metadata. 3614 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3615 3616 if (result == NULL) { 3617 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3618 3619 // Allocation failed. 3620 if (is_init_completed()) { 3621 // Only start a GC if the bootstrapping has completed. 3622 3623 // Try to clean out some memory and retry. 3624 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3625 loader_data, word_size, mdtype); 3626 } 3627 } 3628 3629 if (result == NULL) { 3630 SpaceManager* sm; 3631 if (is_class_space_allocation(mdtype)) { 3632 sm = loader_data->metaspace_non_null()->class_vsm(); 3633 } else { 3634 sm = loader_data->metaspace_non_null()->vsm(); 3635 } 3636 3637 result = sm->get_small_chunk_and_allocate(word_size); 3638 3639 if (result == NULL) { 3640 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3641 } 3642 } 3643 3644 // Zero initialize. 3645 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3646 3647 return result; 3648 } 3649 3650 size_t Metaspace::class_chunk_size(size_t word_size) { 3651 assert(using_class_space(), "Has to use class space"); 3652 return class_vsm()->calc_chunk_size(word_size); 3653 } 3654 3655 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3656 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3657 3658 // If result is still null, we are out of memory. 3659 Log(gc, metaspace, freelist) log; 3660 if (log.is_info()) { 3661 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3662 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3663 ResourceMark rm; 3664 outputStream* out = log.info_stream(); 3665 if (loader_data->metaspace_or_null() != NULL) { 3666 loader_data->dump(out); 3667 } 3668 MetaspaceAux::dump(out); 3669 } 3670 3671 bool out_of_compressed_class_space = false; 3672 if (is_class_space_allocation(mdtype)) { 3673 Metaspace* metaspace = loader_data->metaspace_non_null(); 3674 out_of_compressed_class_space = 3675 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3676 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3677 CompressedClassSpaceSize; 3678 } 3679 3680 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3681 const char* space_string = out_of_compressed_class_space ? 3682 "Compressed class space" : "Metaspace"; 3683 3684 report_java_out_of_memory(space_string); 3685 3686 if (JvmtiExport::should_post_resource_exhausted()) { 3687 JvmtiExport::post_resource_exhausted( 3688 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3689 space_string); 3690 } 3691 3692 if (!is_init_completed()) { 3693 vm_exit_during_initialization("OutOfMemoryError", space_string); 3694 } 3695 3696 if (out_of_compressed_class_space) { 3697 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3698 } else { 3699 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3700 } 3701 } 3702 3703 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3704 switch (mdtype) { 3705 case Metaspace::ClassType: return "Class"; 3706 case Metaspace::NonClassType: return "Metadata"; 3707 default: 3708 assert(false, "Got bad mdtype: %d", (int) mdtype); 3709 return NULL; 3710 } 3711 } 3712 3713 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3714 assert(DumpSharedSpaces, "sanity"); 3715 3716 int byte_size = (int)word_size * wordSize; 3717 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3718 3719 if (_alloc_record_head == NULL) { 3720 _alloc_record_head = _alloc_record_tail = rec; 3721 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3722 _alloc_record_tail->_next = rec; 3723 _alloc_record_tail = rec; 3724 } else { 3725 // slow linear search, but this doesn't happen that often, and only when dumping 3726 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3727 if (old->_ptr == ptr) { 3728 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3729 int remain_bytes = old->_byte_size - byte_size; 3730 assert(remain_bytes >= 0, "sanity"); 3731 old->_type = type; 3732 3733 if (remain_bytes == 0) { 3734 delete(rec); 3735 } else { 3736 address remain_ptr = address(ptr) + byte_size; 3737 rec->_ptr = remain_ptr; 3738 rec->_byte_size = remain_bytes; 3739 rec->_type = MetaspaceObj::DeallocatedType; 3740 rec->_next = old->_next; 3741 old->_byte_size = byte_size; 3742 old->_next = rec; 3743 } 3744 return; 3745 } 3746 } 3747 assert(0, "reallocating a freed pointer that was not recorded"); 3748 } 3749 } 3750 3751 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3752 assert(DumpSharedSpaces, "sanity"); 3753 3754 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3755 if (rec->_ptr == ptr) { 3756 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3757 rec->_type = MetaspaceObj::DeallocatedType; 3758 return; 3759 } 3760 } 3761 3762 assert(0, "deallocating a pointer that was not recorded"); 3763 } 3764 3765 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3766 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3767 3768 address last_addr = (address)bottom(); 3769 3770 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3771 address ptr = rec->_ptr; 3772 if (last_addr < ptr) { 3773 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3774 } 3775 closure->doit(ptr, rec->_type, rec->_byte_size); 3776 last_addr = ptr + rec->_byte_size; 3777 } 3778 3779 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3780 if (last_addr < top) { 3781 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3782 } 3783 } 3784 3785 void Metaspace::purge(MetadataType mdtype) { 3786 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3787 } 3788 3789 void Metaspace::purge() { 3790 MutexLockerEx cl(SpaceManager::expand_lock(), 3791 Mutex::_no_safepoint_check_flag); 3792 purge(NonClassType); 3793 if (using_class_space()) { 3794 purge(ClassType); 3795 } 3796 } 3797 3798 void Metaspace::print_on(outputStream* out) const { 3799 // Print both class virtual space counts and metaspace. 3800 if (Verbose) { 3801 vsm()->print_on(out); 3802 if (using_class_space()) { 3803 class_vsm()->print_on(out); 3804 } 3805 } 3806 } 3807 3808 bool Metaspace::contains(const void* ptr) { 3809 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3810 return true; 3811 } 3812 return contains_non_shared(ptr); 3813 } 3814 3815 bool Metaspace::contains_non_shared(const void* ptr) { 3816 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3817 return true; 3818 } 3819 3820 return get_space_list(NonClassType)->contains(ptr); 3821 } 3822 3823 void Metaspace::verify() { 3824 vsm()->verify(); 3825 if (using_class_space()) { 3826 class_vsm()->verify(); 3827 } 3828 } 3829 3830 void Metaspace::dump(outputStream* const out) const { 3831 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3832 vsm()->dump(out); 3833 if (using_class_space()) { 3834 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3835 class_vsm()->dump(out); 3836 } 3837 } 3838 3839 /////////////// Unit tests /////////////// 3840 3841 #ifndef PRODUCT 3842 3843 class TestMetaspaceAuxTest : AllStatic { 3844 public: 3845 static void test_reserved() { 3846 size_t reserved = MetaspaceAux::reserved_bytes(); 3847 3848 assert(reserved > 0, "assert"); 3849 3850 size_t committed = MetaspaceAux::committed_bytes(); 3851 assert(committed <= reserved, "assert"); 3852 3853 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3854 assert(reserved_metadata > 0, "assert"); 3855 assert(reserved_metadata <= reserved, "assert"); 3856 3857 if (UseCompressedClassPointers) { 3858 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3859 assert(reserved_class > 0, "assert"); 3860 assert(reserved_class < reserved, "assert"); 3861 } 3862 } 3863 3864 static void test_committed() { 3865 size_t committed = MetaspaceAux::committed_bytes(); 3866 3867 assert(committed > 0, "assert"); 3868 3869 size_t reserved = MetaspaceAux::reserved_bytes(); 3870 assert(committed <= reserved, "assert"); 3871 3872 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3873 assert(committed_metadata > 0, "assert"); 3874 assert(committed_metadata <= committed, "assert"); 3875 3876 if (UseCompressedClassPointers) { 3877 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3878 assert(committed_class > 0, "assert"); 3879 assert(committed_class < committed, "assert"); 3880 } 3881 } 3882 3883 static void test_virtual_space_list_large_chunk() { 3884 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3885 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3886 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3887 // vm_allocation_granularity aligned on Windows. 3888 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3889 large_size += (os::vm_page_size()/BytesPerWord); 3890 vs_list->get_new_chunk(large_size, 0); 3891 } 3892 3893 static void test() { 3894 test_reserved(); 3895 test_committed(); 3896 test_virtual_space_list_large_chunk(); 3897 } 3898 }; 3899 3900 void TestMetaspaceAux_test() { 3901 TestMetaspaceAuxTest::test(); 3902 } 3903 3904 class TestVirtualSpaceNodeTest { 3905 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3906 size_t& num_small_chunks, 3907 size_t& num_specialized_chunks) { 3908 num_medium_chunks = words_left / MediumChunk; 3909 words_left = words_left % MediumChunk; 3910 3911 num_small_chunks = words_left / SmallChunk; 3912 words_left = words_left % SmallChunk; 3913 // how many specialized chunks can we get? 3914 num_specialized_chunks = words_left / SpecializedChunk; 3915 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3916 } 3917 3918 public: 3919 static void test() { 3920 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3921 const size_t vsn_test_size_words = MediumChunk * 4; 3922 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3923 3924 // The chunk sizes must be multiples of eachother, or this will fail 3925 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3926 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3927 3928 { // No committed memory in VSN 3929 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3930 VirtualSpaceNode vsn(vsn_test_size_bytes); 3931 vsn.initialize(); 3932 vsn.retire(&cm); 3933 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3934 } 3935 3936 { // All of VSN is committed, half is used by chunks 3937 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3938 VirtualSpaceNode vsn(vsn_test_size_bytes); 3939 vsn.initialize(); 3940 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3941 vsn.get_chunk_vs(MediumChunk); 3942 vsn.get_chunk_vs(MediumChunk); 3943 vsn.retire(&cm); 3944 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3945 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3946 } 3947 3948 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3949 // This doesn't work for systems with vm_page_size >= 16K. 3950 if (page_chunks < MediumChunk) { 3951 // 4 pages of VSN is committed, some is used by chunks 3952 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3953 VirtualSpaceNode vsn(vsn_test_size_bytes); 3954 3955 vsn.initialize(); 3956 vsn.expand_by(page_chunks, page_chunks); 3957 vsn.get_chunk_vs(SmallChunk); 3958 vsn.get_chunk_vs(SpecializedChunk); 3959 vsn.retire(&cm); 3960 3961 // committed - used = words left to retire 3962 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3963 3964 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3965 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3966 3967 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3968 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3969 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3970 } 3971 3972 { // Half of VSN is committed, a humongous chunk is used 3973 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3974 VirtualSpaceNode vsn(vsn_test_size_bytes); 3975 vsn.initialize(); 3976 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3977 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3978 vsn.retire(&cm); 3979 3980 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3981 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3982 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3983 3984 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3985 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3986 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3987 } 3988 3989 } 3990 3991 #define assert_is_available_positive(word_size) \ 3992 assert(vsn.is_available(word_size), \ 3993 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3994 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3995 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3996 3997 #define assert_is_available_negative(word_size) \ 3998 assert(!vsn.is_available(word_size), \ 3999 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 4000 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 4001 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 4002 4003 static void test_is_available_positive() { 4004 // Reserve some memory. 4005 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4006 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4007 4008 // Commit some memory. 4009 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4010 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4011 assert(expanded, "Failed to commit"); 4012 4013 // Check that is_available accepts the committed size. 4014 assert_is_available_positive(commit_word_size); 4015 4016 // Check that is_available accepts half the committed size. 4017 size_t expand_word_size = commit_word_size / 2; 4018 assert_is_available_positive(expand_word_size); 4019 } 4020 4021 static void test_is_available_negative() { 4022 // Reserve some memory. 4023 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4024 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4025 4026 // Commit some memory. 4027 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4028 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4029 assert(expanded, "Failed to commit"); 4030 4031 // Check that is_available doesn't accept a too large size. 4032 size_t two_times_commit_word_size = commit_word_size * 2; 4033 assert_is_available_negative(two_times_commit_word_size); 4034 } 4035 4036 static void test_is_available_overflow() { 4037 // Reserve some memory. 4038 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4039 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4040 4041 // Commit some memory. 4042 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4043 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4044 assert(expanded, "Failed to commit"); 4045 4046 // Calculate a size that will overflow the virtual space size. 4047 void* virtual_space_max = (void*)(uintptr_t)-1; 4048 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4049 size_t overflow_size = bottom_to_max + BytesPerWord; 4050 size_t overflow_word_size = overflow_size / BytesPerWord; 4051 4052 // Check that is_available can handle the overflow. 4053 assert_is_available_negative(overflow_word_size); 4054 } 4055 4056 static void test_is_available() { 4057 TestVirtualSpaceNodeTest::test_is_available_positive(); 4058 TestVirtualSpaceNodeTest::test_is_available_negative(); 4059 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4060 } 4061 }; 4062 4063 void TestVirtualSpaceNode_test() { 4064 TestVirtualSpaceNodeTest::test(); 4065 TestVirtualSpaceNodeTest::test_is_available(); 4066 } 4067 4068 // The following test is placed here instead of a gtest / unittest file 4069 // because the ChunkManager class is only available in this file. 4070 void ChunkManager_test_list_index() { 4071 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 4072 4073 // Test previous bug where a query for a humongous class metachunk, 4074 // incorrectly matched the non-class medium metachunk size. 4075 { 4076 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 4077 4078 ChunkIndex index = manager.list_index(MediumChunk); 4079 4080 assert(index == HumongousIndex, 4081 "Requested size is larger than ClassMediumChunk," 4082 " so should return HumongousIndex. Got index: %d", (int)index); 4083 } 4084 4085 // Check the specified sizes as well. 4086 { 4087 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 4088 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index); 4089 } 4090 { 4091 ChunkIndex index = manager.list_index(ClassSmallChunk); 4092 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index); 4093 } 4094 { 4095 ChunkIndex index = manager.list_index(ClassMediumChunk); 4096 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index); 4097 } 4098 { 4099 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 4100 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index); 4101 } 4102 } 4103 4104 #endif // !PRODUCT 4105 4106 #ifdef ASSERT 4107 4108 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and 4109 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager 4110 // content. 4111 class ChunkManagerReturnTestImpl { 4112 4113 VirtualSpaceNode _vsn; 4114 ChunkManager _cm; 4115 4116 // The expected content of the chunk manager. 4117 unsigned _chunks_in_chunkmanager; 4118 size_t _words_in_chunkmanager; 4119 4120 // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use). 4121 static const int num_chunks = 256; 4122 Metachunk* _pool[num_chunks]; 4123 4124 // Helper, return a random position into the chunk pool. 4125 static int get_random_position() { 4126 return os::random() % num_chunks; 4127 } 4128 4129 // Asserts that ChunkManager counters match expectations. 4130 void assert_counters() { 4131 assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch."); 4132 assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch."); 4133 assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch."); 4134 } 4135 4136 // Get a random chunk size. Equal chance to get spec/med/small chunk size or 4137 // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med). 4138 size_t get_random_chunk_size() { 4139 const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk }; 4140 const int rand = os::random() % 4; 4141 if (rand < 3) { 4142 return sizes[rand]; 4143 } else { 4144 // Note: this affects the max. size of space (see _vsn initialization in ctor). 4145 return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); 4146 } 4147 } 4148 4149 // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending 4150 // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found. 4151 int next_matching_chunk(int start, bool is_free) const { 4152 assert(start >= 0 && start < num_chunks, "invalid parameter"); 4153 int pos = start; 4154 do { 4155 if (++pos == num_chunks) { 4156 pos = 0; 4157 } 4158 if (_pool[pos]->is_tagged_free() == is_free) { 4159 return pos; 4160 } 4161 } while (pos != start); 4162 return -1; 4163 } 4164 4165 // A structure to keep information about a chunk list including which 4166 // chunks are part of this list. This is needed to keep information about a chunk list 4167 // we will to return to the ChunkManager, because the original list will be destroyed. 4168 struct AChunkList { 4169 Metachunk* head; 4170 Metachunk* all[num_chunks]; 4171 size_t size; 4172 int num; 4173 ChunkIndex index; 4174 }; 4175 4176 // Assemble, from the in-use chunks (not in the chunk manager) in the pool, 4177 // a random chunk list of max. length <list_size> of chunks with the same 4178 // ChunkIndex (chunk size). 4179 // Returns false if list cannot be assembled. List is returned in the <out> 4180 // structure. Returned list may be smaller than <list_size>. 4181 bool assemble_random_chunklist(AChunkList* out, int list_size) { 4182 // Choose a random in-use chunk from the pool... 4183 const int headpos = next_matching_chunk(get_random_position(), false); 4184 if (headpos == -1) { 4185 return false; 4186 } 4187 Metachunk* const head = _pool[headpos]; 4188 out->all[0] = head; 4189 assert(head->is_tagged_free() == false, "Chunk state mismatch"); 4190 // ..then go from there, chain it up with up to list_size - 1 number of other 4191 // in-use chunks of the same index. 4192 const ChunkIndex index = _cm.list_index(head->word_size()); 4193 int num_added = 1; 4194 size_t size_added = head->word_size(); 4195 int pos = headpos; 4196 Metachunk* tail = head; 4197 do { 4198 pos = next_matching_chunk(pos, false); 4199 if (pos != headpos) { 4200 Metachunk* c = _pool[pos]; 4201 assert(c->is_tagged_free() == false, "Chunk state mismatch"); 4202 if (index == _cm.list_index(c->word_size())) { 4203 tail->set_next(c); 4204 c->set_prev(tail); 4205 tail = c; 4206 out->all[num_added] = c; 4207 num_added ++; 4208 size_added += c->word_size(); 4209 } 4210 } 4211 } while (num_added < list_size && pos != headpos); 4212 out->head = head; 4213 out->index = index; 4214 out->size = size_added; 4215 out->num = num_added; 4216 return true; 4217 } 4218 4219 // Take a single random chunk from the ChunkManager. 4220 bool take_single_random_chunk_from_chunkmanager() { 4221 assert_counters(); 4222 _cm.locked_verify(); 4223 int pos = next_matching_chunk(get_random_position(), true); 4224 if (pos == -1) { 4225 return false; 4226 } 4227 Metachunk* c = _pool[pos]; 4228 assert(c->is_tagged_free(), "Chunk state mismatch"); 4229 // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call 4230 // ChunkManager::free_chunks_get() with this chunk's word size. We really want 4231 // to exercise ChunkManager::free_chunks_get() because that one gets called for 4232 // normal chunk allocation. 4233 Metachunk* c2 = _cm.free_chunks_get(c->word_size()); 4234 assert(c2 != NULL, "Unexpected."); 4235 assert(!c2->is_tagged_free(), "Chunk state mismatch"); 4236 assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list."); 4237 _chunks_in_chunkmanager --; 4238 _words_in_chunkmanager -= c->word_size(); 4239 assert_counters(); 4240 _cm.locked_verify(); 4241 return true; 4242 } 4243 4244 // Returns a single random chunk to the chunk manager. Returns false if that 4245 // was not possible (all chunks are already in the chunk manager). 4246 bool return_single_random_chunk_to_chunkmanager() { 4247 assert_counters(); 4248 _cm.locked_verify(); 4249 int pos = next_matching_chunk(get_random_position(), false); 4250 if (pos == -1) { 4251 return false; 4252 } 4253 Metachunk* c = _pool[pos]; 4254 assert(c->is_tagged_free() == false, "wrong chunk information"); 4255 _cm.return_single_chunk(_cm.list_index(c->word_size()), c); 4256 _chunks_in_chunkmanager ++; 4257 _words_in_chunkmanager += c->word_size(); 4258 assert(c->is_tagged_free() == true, "wrong chunk information"); 4259 assert_counters(); 4260 _cm.locked_verify(); 4261 return true; 4262 } 4263 4264 // Return a random chunk list to the chunk manager. Returns the length of the 4265 // returned list. 4266 int return_random_chunk_list_to_chunkmanager(int list_size) { 4267 assert_counters(); 4268 _cm.locked_verify(); 4269 AChunkList aChunkList; 4270 if (!assemble_random_chunklist(&aChunkList, list_size)) { 4271 return 0; 4272 } 4273 // Before returning chunks are returned, they should be tagged in use. 4274 for (int i = 0; i < aChunkList.num; i ++) { 4275 assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4276 } 4277 _cm.return_chunk_list(aChunkList.index, aChunkList.head); 4278 _chunks_in_chunkmanager += aChunkList.num; 4279 _words_in_chunkmanager += aChunkList.size; 4280 // After all chunks are returned, check that they are now tagged free. 4281 for (int i = 0; i < aChunkList.num; i ++) { 4282 assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4283 } 4284 assert_counters(); 4285 _cm.locked_verify(); 4286 return aChunkList.num; 4287 } 4288 4289 public: 4290 4291 ChunkManagerReturnTestImpl() 4292 : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) 4293 , _cm(SpecializedChunk, SmallChunk, MediumChunk) 4294 , _chunks_in_chunkmanager(0) 4295 , _words_in_chunkmanager(0) 4296 { 4297 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4298 // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are 4299 // "in use", because not yet added to any chunk manager. 4300 _vsn.initialize(); 4301 _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words()); 4302 for (int i = 0; i < num_chunks; i ++) { 4303 const size_t size = get_random_chunk_size(); 4304 _pool[i] = _vsn.get_chunk_vs(size); 4305 assert(_pool[i] != NULL, "allocation failed"); 4306 } 4307 assert_counters(); 4308 _cm.locked_verify(); 4309 } 4310 4311 // Test entry point. 4312 // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat. 4313 // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected 4314 // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between 4315 // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the 4316 // chunks manager, thereby emptying or filling it completely. 4317 void do_test(float phase_length_factor) { 4318 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4319 assert_counters(); 4320 // Execute n operations, and operation being the move of a single chunk to/from the chunk manager. 4321 const int num_max_ops = num_chunks * 100; 4322 int num_ops = num_max_ops; 4323 const int average_phase_length = (int)(phase_length_factor * num_chunks); 4324 int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4325 bool return_phase = true; 4326 while (num_ops > 0) { 4327 int chunks_moved = 0; 4328 if (return_phase) { 4329 // Randomly switch between returning a single chunk or a random length chunk list. 4330 if (os::random() % 2 == 0) { 4331 if (return_single_random_chunk_to_chunkmanager()) { 4332 chunks_moved = 1; 4333 } 4334 } else { 4335 const int list_length = MAX2(1, (os::random() % num_ops_until_switch)); 4336 chunks_moved = return_random_chunk_list_to_chunkmanager(list_length); 4337 } 4338 } else { 4339 // Breath out. 4340 if (take_single_random_chunk_from_chunkmanager()) { 4341 chunks_moved = 1; 4342 } 4343 } 4344 num_ops -= chunks_moved; 4345 num_ops_until_switch -= chunks_moved; 4346 if (chunks_moved == 0 || num_ops_until_switch <= 0) { 4347 return_phase = !return_phase; 4348 num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4349 } 4350 } 4351 } 4352 }; 4353 4354 void* setup_chunkmanager_returntests() { 4355 ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl(); 4356 return p; 4357 } 4358 4359 void teardown_chunkmanager_returntests(void* p) { 4360 delete (ChunkManagerReturnTestImpl*) p; 4361 } 4362 4363 void run_chunkmanager_returntests(void* p, float phase_length) { 4364 ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p; 4365 test->do_test(phase_length); 4366 } 4367 4368 // The following test is placed here instead of a gtest / unittest file 4369 // because the ChunkManager class is only available in this file. 4370 class SpaceManagerTest : AllStatic { 4371 friend void SpaceManager_test_adjust_initial_chunk_size(); 4372 4373 static void test_adjust_initial_chunk_size(bool is_class) { 4374 const size_t smallest = SpaceManager::smallest_chunk_size(is_class); 4375 const size_t normal = SpaceManager::small_chunk_size(is_class); 4376 const size_t medium = SpaceManager::medium_chunk_size(is_class); 4377 4378 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \ 4379 do { \ 4380 size_t v = value; \ 4381 size_t e = expected; \ 4382 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \ 4383 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \ 4384 } while (0) 4385 4386 // Smallest (specialized) 4387 test_adjust_initial_chunk_size(1, smallest, is_class); 4388 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class); 4389 test_adjust_initial_chunk_size(smallest, smallest, is_class); 4390 4391 // Small 4392 test_adjust_initial_chunk_size(smallest + 1, normal, is_class); 4393 test_adjust_initial_chunk_size(normal - 1, normal, is_class); 4394 test_adjust_initial_chunk_size(normal, normal, is_class); 4395 4396 // Medium 4397 test_adjust_initial_chunk_size(normal + 1, medium, is_class); 4398 test_adjust_initial_chunk_size(medium - 1, medium, is_class); 4399 test_adjust_initial_chunk_size(medium, medium, is_class); 4400 4401 // Humongous 4402 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class); 4403 4404 #undef test_adjust_initial_chunk_size 4405 } 4406 4407 static void test_adjust_initial_chunk_size() { 4408 test_adjust_initial_chunk_size(false); 4409 test_adjust_initial_chunk_size(true); 4410 } 4411 }; 4412 4413 void SpaceManager_test_adjust_initial_chunk_size() { 4414 SpaceManagerTest::test_adjust_initial_chunk_size(); 4415 } 4416 4417 #endif // ASSERT