1 /* 2 * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "aot/aotLoader.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectorPolicy.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/binaryTreeDictionary.hpp" 33 #include "memory/filemap.hpp" 34 #include "memory/freeList.hpp" 35 #include "memory/metachunk.hpp" 36 #include "memory/metaspace.hpp" 37 #include "memory/metaspaceGCThresholdUpdater.hpp" 38 #include "memory/metaspaceShared.hpp" 39 #include "memory/metaspaceTracer.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/globals.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutex.hpp" 47 #include "runtime/orderAccess.inline.hpp" 48 #include "services/memTracker.hpp" 49 #include "services/memoryService.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/debug.hpp" 53 #include "utilities/macros.hpp" 54 55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 57 58 // Set this constant to enable slow integrity checking of the free chunk lists 59 const bool metaspace_slow_verify = false; 60 61 size_t const allocation_from_dictionary_limit = 4 * K; 62 63 MetaWord* last_allocated = 0; 64 65 size_t Metaspace::_compressed_class_space_size; 66 const MetaspaceTracer* Metaspace::_tracer = NULL; 67 68 // Used in declarations in SpaceManager and ChunkManager 69 enum ChunkIndex { 70 ZeroIndex = 0, 71 SpecializedIndex = ZeroIndex, 72 SmallIndex = SpecializedIndex + 1, 73 MediumIndex = SmallIndex + 1, 74 HumongousIndex = MediumIndex + 1, 75 NumberOfFreeLists = 3, 76 NumberOfInUseLists = 4 77 }; 78 79 // Helper, returns a descriptive name for the given index. 80 static const char* chunk_size_name(ChunkIndex index) { 81 switch (index) { 82 case SpecializedIndex: 83 return "specialized"; 84 case SmallIndex: 85 return "small"; 86 case MediumIndex: 87 return "medium"; 88 case HumongousIndex: 89 return "humongous"; 90 default: 91 return "Invalid index"; 92 } 93 } 94 95 enum ChunkSizes { // in words. 96 ClassSpecializedChunk = 128, 97 SpecializedChunk = 128, 98 ClassSmallChunk = 256, 99 SmallChunk = 512, 100 ClassMediumChunk = 4 * K, 101 MediumChunk = 8 * K 102 }; 103 104 static ChunkIndex next_chunk_index(ChunkIndex i) { 105 assert(i < NumberOfInUseLists, "Out of bound"); 106 return (ChunkIndex) (i+1); 107 } 108 109 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 110 uint MetaspaceGC::_shrink_factor = 0; 111 bool MetaspaceGC::_should_concurrent_collect = false; 112 113 typedef class FreeList<Metachunk> ChunkList; 114 115 // Manages the global free lists of chunks. 116 class ChunkManager : public CHeapObj<mtInternal> { 117 friend class TestVirtualSpaceNodeTest; 118 119 // Free list of chunks of different sizes. 120 // SpecializedChunk 121 // SmallChunk 122 // MediumChunk 123 ChunkList _free_chunks[NumberOfFreeLists]; 124 125 // Return non-humongous chunk list by its index. 126 ChunkList* free_chunks(ChunkIndex index); 127 128 // Returns non-humongous chunk list for the given chunk word size. 129 ChunkList* find_free_chunks_list(size_t word_size); 130 131 // HumongousChunk 132 ChunkTreeDictionary _humongous_dictionary; 133 134 // Returns the humongous chunk dictionary. 135 ChunkTreeDictionary* humongous_dictionary() { 136 return &_humongous_dictionary; 137 } 138 139 // Size, in metaspace words, of all chunks managed by this ChunkManager 140 size_t _free_chunks_total; 141 // Number of chunks in this ChunkManager 142 size_t _free_chunks_count; 143 144 // Update counters after a chunk was added or removed removed. 145 void account_for_added_chunk(const Metachunk* c); 146 void account_for_removed_chunk(const Metachunk* c); 147 148 // Debug support 149 150 size_t sum_free_chunks(); 151 size_t sum_free_chunks_count(); 152 153 void locked_verify_free_chunks_total(); 154 void slow_locked_verify_free_chunks_total() { 155 if (metaspace_slow_verify) { 156 locked_verify_free_chunks_total(); 157 } 158 } 159 void locked_verify_free_chunks_count(); 160 void slow_locked_verify_free_chunks_count() { 161 if (metaspace_slow_verify) { 162 locked_verify_free_chunks_count(); 163 } 164 } 165 void verify_free_chunks_count(); 166 167 public: 168 169 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 170 : _free_chunks_total(0), _free_chunks_count(0) { 171 _free_chunks[SpecializedIndex].set_size(specialized_size); 172 _free_chunks[SmallIndex].set_size(small_size); 173 _free_chunks[MediumIndex].set_size(medium_size); 174 } 175 176 // add or delete (return) a chunk to the global freelist. 177 Metachunk* chunk_freelist_allocate(size_t word_size); 178 179 // Map a size to a list index assuming that there are lists 180 // for special, small, medium, and humongous chunks. 181 ChunkIndex list_index(size_t size); 182 183 // Map a given index to the chunk size. 184 size_t size_by_index(ChunkIndex index); 185 186 // Take a chunk from the ChunkManager. The chunk is expected to be in 187 // the chunk manager (the freelist if non-humongous, the dictionary if 188 // humongous). 189 void remove_chunk(Metachunk* chunk); 190 191 // Return a single chunk of type index to the ChunkManager. 192 void return_single_chunk(ChunkIndex index, Metachunk* chunk); 193 194 // Add the simple linked list of chunks to the freelist of chunks 195 // of type index. 196 void return_chunk_list(ChunkIndex index, Metachunk* chunk); 197 198 // Total of the space in the free chunks list 199 size_t free_chunks_total_words(); 200 size_t free_chunks_total_bytes(); 201 202 // Number of chunks in the free chunks list 203 size_t free_chunks_count(); 204 205 // Remove from a list by size. Selects list based on size of chunk. 206 Metachunk* free_chunks_get(size_t chunk_word_size); 207 208 #define index_bounds_check(index) \ 209 assert(index == SpecializedIndex || \ 210 index == SmallIndex || \ 211 index == MediumIndex || \ 212 index == HumongousIndex, "Bad index: %d", (int) index) 213 214 size_t num_free_chunks(ChunkIndex index) const { 215 index_bounds_check(index); 216 217 if (index == HumongousIndex) { 218 return _humongous_dictionary.total_free_blocks(); 219 } 220 221 ssize_t count = _free_chunks[index].count(); 222 return count == -1 ? 0 : (size_t) count; 223 } 224 225 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 226 index_bounds_check(index); 227 228 size_t word_size = 0; 229 if (index == HumongousIndex) { 230 word_size = _humongous_dictionary.total_size(); 231 } else { 232 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 233 word_size = size_per_chunk_in_words * num_free_chunks(index); 234 } 235 236 return word_size * BytesPerWord; 237 } 238 239 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 240 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 241 num_free_chunks(SmallIndex), 242 num_free_chunks(MediumIndex), 243 num_free_chunks(HumongousIndex), 244 size_free_chunks_in_bytes(SpecializedIndex), 245 size_free_chunks_in_bytes(SmallIndex), 246 size_free_chunks_in_bytes(MediumIndex), 247 size_free_chunks_in_bytes(HumongousIndex)); 248 } 249 250 // Debug support 251 void verify(); 252 void slow_verify() { 253 if (metaspace_slow_verify) { 254 verify(); 255 } 256 } 257 void locked_verify(); 258 void slow_locked_verify() { 259 if (metaspace_slow_verify) { 260 locked_verify(); 261 } 262 } 263 void verify_free_chunks_total(); 264 265 void locked_print_free_chunks(outputStream* st); 266 void locked_print_sum_free_chunks(outputStream* st); 267 268 void print_on(outputStream* st) const; 269 }; 270 271 class SmallBlocks : public CHeapObj<mtClass> { 272 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 273 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 274 275 private: 276 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 277 278 FreeList<Metablock>& list_at(size_t word_size) { 279 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 280 return _small_lists[word_size - _small_block_min_size]; 281 } 282 283 public: 284 SmallBlocks() { 285 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 286 uint k = i - _small_block_min_size; 287 _small_lists[k].set_size(i); 288 } 289 } 290 291 size_t total_size() const { 292 size_t result = 0; 293 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 294 uint k = i - _small_block_min_size; 295 result = result + _small_lists[k].count() * _small_lists[k].size(); 296 } 297 return result; 298 } 299 300 static uint small_block_max_size() { return _small_block_max_size; } 301 static uint small_block_min_size() { return _small_block_min_size; } 302 303 MetaWord* get_block(size_t word_size) { 304 if (list_at(word_size).count() > 0) { 305 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 306 return new_block; 307 } else { 308 return NULL; 309 } 310 } 311 void return_block(Metablock* free_chunk, size_t word_size) { 312 list_at(word_size).return_chunk_at_head(free_chunk, false); 313 assert(list_at(word_size).count() > 0, "Should have a chunk"); 314 } 315 316 void print_on(outputStream* st) const { 317 st->print_cr("SmallBlocks:"); 318 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 319 uint k = i - _small_block_min_size; 320 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 321 } 322 } 323 }; 324 325 // Used to manage the free list of Metablocks (a block corresponds 326 // to the allocation of a quantum of metadata). 327 class BlockFreelist : public CHeapObj<mtClass> { 328 BlockTreeDictionary* const _dictionary; 329 SmallBlocks* _small_blocks; 330 331 // Only allocate and split from freelist if the size of the allocation 332 // is at least 1/4th the size of the available block. 333 const static int WasteMultiplier = 4; 334 335 // Accessors 336 BlockTreeDictionary* dictionary() const { return _dictionary; } 337 SmallBlocks* small_blocks() { 338 if (_small_blocks == NULL) { 339 _small_blocks = new SmallBlocks(); 340 } 341 return _small_blocks; 342 } 343 344 public: 345 BlockFreelist(); 346 ~BlockFreelist(); 347 348 // Get and return a block to the free list 349 MetaWord* get_block(size_t word_size); 350 void return_block(MetaWord* p, size_t word_size); 351 352 size_t total_size() const { 353 size_t result = dictionary()->total_size(); 354 if (_small_blocks != NULL) { 355 result = result + _small_blocks->total_size(); 356 } 357 return result; 358 } 359 360 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 361 void print_on(outputStream* st) const; 362 }; 363 364 // A VirtualSpaceList node. 365 class VirtualSpaceNode : public CHeapObj<mtClass> { 366 friend class VirtualSpaceList; 367 368 // Link to next VirtualSpaceNode 369 VirtualSpaceNode* _next; 370 371 // total in the VirtualSpace 372 MemRegion _reserved; 373 ReservedSpace _rs; 374 VirtualSpace _virtual_space; 375 MetaWord* _top; 376 // count of chunks contained in this VirtualSpace 377 uintx _container_count; 378 379 // Convenience functions to access the _virtual_space 380 char* low() const { return virtual_space()->low(); } 381 char* high() const { return virtual_space()->high(); } 382 383 // The first Metachunk will be allocated at the bottom of the 384 // VirtualSpace 385 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 386 387 // Committed but unused space in the virtual space 388 size_t free_words_in_vs() const; 389 public: 390 391 VirtualSpaceNode(size_t byte_size); 392 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 393 ~VirtualSpaceNode(); 394 395 // Convenience functions for logical bottom and end 396 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 397 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 398 399 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 400 401 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 402 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 403 404 bool is_pre_committed() const { return _virtual_space.special(); } 405 406 // address of next available space in _virtual_space; 407 // Accessors 408 VirtualSpaceNode* next() { return _next; } 409 void set_next(VirtualSpaceNode* v) { _next = v; } 410 411 void set_reserved(MemRegion const v) { _reserved = v; } 412 void set_top(MetaWord* v) { _top = v; } 413 414 // Accessors 415 MemRegion* reserved() { return &_reserved; } 416 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 417 418 // Returns true if "word_size" is available in the VirtualSpace 419 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 420 421 MetaWord* top() const { return _top; } 422 void inc_top(size_t word_size) { _top += word_size; } 423 424 uintx container_count() { return _container_count; } 425 void inc_container_count(); 426 void dec_container_count(); 427 #ifdef ASSERT 428 uintx container_count_slow(); 429 void verify_container_count(); 430 #endif 431 432 // used and capacity in this single entry in the list 433 size_t used_words_in_vs() const; 434 size_t capacity_words_in_vs() const; 435 436 bool initialize(); 437 438 // get space from the virtual space 439 Metachunk* take_from_committed(size_t chunk_word_size); 440 441 // Allocate a chunk from the virtual space and return it. 442 Metachunk* get_chunk_vs(size_t chunk_word_size); 443 444 // Expands/shrinks the committed space in a virtual space. Delegates 445 // to Virtualspace 446 bool expand_by(size_t min_words, size_t preferred_words); 447 448 // In preparation for deleting this node, remove all the chunks 449 // in the node from any freelist. 450 void purge(ChunkManager* chunk_manager); 451 452 // If an allocation doesn't fit in the current node a new node is created. 453 // Allocate chunks out of the remaining committed space in this node 454 // to avoid wasting that memory. 455 // This always adds up because all the chunk sizes are multiples of 456 // the smallest chunk size. 457 void retire(ChunkManager* chunk_manager); 458 459 #ifdef ASSERT 460 // Debug support 461 void mangle(); 462 #endif 463 464 void print_on(outputStream* st) const; 465 }; 466 467 #define assert_is_aligned(value, alignment) \ 468 assert(is_aligned((value), (alignment)), \ 469 SIZE_FORMAT_HEX " is not aligned to " \ 470 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment)) 471 472 // Decide if large pages should be committed when the memory is reserved. 473 static bool should_commit_large_pages_when_reserving(size_t bytes) { 474 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 475 size_t words = bytes / BytesPerWord; 476 bool is_class = false; // We never reserve large pages for the class space. 477 if (MetaspaceGC::can_expand(words, is_class) && 478 MetaspaceGC::allowed_expansion() >= words) { 479 return true; 480 } 481 } 482 483 return false; 484 } 485 486 // byte_size is the size of the associated virtualspace. 487 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 488 assert_is_aligned(bytes, Metaspace::reserve_alignment()); 489 490 #if INCLUDE_CDS 491 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 492 // configurable address, generally at the top of the Java heap so other 493 // memory addresses don't conflict. 494 if (DumpSharedSpaces) { 495 bool large_pages = false; // No large pages when dumping the CDS archive. 496 char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 497 498 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 499 if (_rs.is_reserved()) { 500 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 501 } else { 502 // Get a mmap region anywhere if the SharedBaseAddress fails. 503 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 504 } 505 if (!_rs.is_reserved()) { 506 vm_exit_during_initialization("Unable to allocate memory for shared space", 507 err_msg(SIZE_FORMAT " bytes.", bytes)); 508 } 509 MetaspaceShared::initialize_shared_rs(&_rs); 510 } else 511 #endif 512 { 513 bool large_pages = should_commit_large_pages_when_reserving(bytes); 514 515 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 516 } 517 518 if (_rs.is_reserved()) { 519 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 520 assert(_rs.size() != 0, "Catch if we get a 0 size"); 521 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); 522 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); 523 524 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 525 } 526 } 527 528 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 529 Metachunk* chunk = first_chunk(); 530 Metachunk* invalid_chunk = (Metachunk*) top(); 531 while (chunk < invalid_chunk ) { 532 assert(chunk->is_tagged_free(), "Should be tagged free"); 533 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 534 chunk_manager->remove_chunk(chunk); 535 assert(chunk->next() == NULL && 536 chunk->prev() == NULL, 537 "Was not removed from its list"); 538 chunk = (Metachunk*) next; 539 } 540 } 541 542 #ifdef ASSERT 543 uintx VirtualSpaceNode::container_count_slow() { 544 uintx count = 0; 545 Metachunk* chunk = first_chunk(); 546 Metachunk* invalid_chunk = (Metachunk*) top(); 547 while (chunk < invalid_chunk ) { 548 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 549 // Don't count the chunks on the free lists. Those are 550 // still part of the VirtualSpaceNode but not currently 551 // counted. 552 if (!chunk->is_tagged_free()) { 553 count++; 554 } 555 chunk = (Metachunk*) next; 556 } 557 return count; 558 } 559 #endif 560 561 // List of VirtualSpaces for metadata allocation. 562 class VirtualSpaceList : public CHeapObj<mtClass> { 563 friend class VirtualSpaceNode; 564 565 enum VirtualSpaceSizes { 566 VirtualSpaceSize = 256 * K 567 }; 568 569 // Head of the list 570 VirtualSpaceNode* _virtual_space_list; 571 // virtual space currently being used for allocations 572 VirtualSpaceNode* _current_virtual_space; 573 574 // Is this VirtualSpaceList used for the compressed class space 575 bool _is_class; 576 577 // Sum of reserved and committed memory in the virtual spaces 578 size_t _reserved_words; 579 size_t _committed_words; 580 581 // Number of virtual spaces 582 size_t _virtual_space_count; 583 584 ~VirtualSpaceList(); 585 586 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 587 588 void set_virtual_space_list(VirtualSpaceNode* v) { 589 _virtual_space_list = v; 590 } 591 void set_current_virtual_space(VirtualSpaceNode* v) { 592 _current_virtual_space = v; 593 } 594 595 void link_vs(VirtualSpaceNode* new_entry); 596 597 // Get another virtual space and add it to the list. This 598 // is typically prompted by a failed attempt to allocate a chunk 599 // and is typically followed by the allocation of a chunk. 600 bool create_new_virtual_space(size_t vs_word_size); 601 602 // Chunk up the unused committed space in the current 603 // virtual space and add the chunks to the free list. 604 void retire_current_virtual_space(); 605 606 public: 607 VirtualSpaceList(size_t word_size); 608 VirtualSpaceList(ReservedSpace rs); 609 610 size_t free_bytes(); 611 612 Metachunk* get_new_chunk(size_t chunk_word_size, 613 size_t suggested_commit_granularity); 614 615 bool expand_node_by(VirtualSpaceNode* node, 616 size_t min_words, 617 size_t preferred_words); 618 619 bool expand_by(size_t min_words, 620 size_t preferred_words); 621 622 VirtualSpaceNode* current_virtual_space() { 623 return _current_virtual_space; 624 } 625 626 bool is_class() const { return _is_class; } 627 628 bool initialization_succeeded() { return _virtual_space_list != NULL; } 629 630 size_t reserved_words() { return _reserved_words; } 631 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 632 size_t committed_words() { return _committed_words; } 633 size_t committed_bytes() { return committed_words() * BytesPerWord; } 634 635 void inc_reserved_words(size_t v); 636 void dec_reserved_words(size_t v); 637 void inc_committed_words(size_t v); 638 void dec_committed_words(size_t v); 639 void inc_virtual_space_count(); 640 void dec_virtual_space_count(); 641 642 bool contains(const void* ptr); 643 644 // Unlink empty VirtualSpaceNodes and free it. 645 void purge(ChunkManager* chunk_manager); 646 647 void print_on(outputStream* st) const; 648 649 class VirtualSpaceListIterator : public StackObj { 650 VirtualSpaceNode* _virtual_spaces; 651 public: 652 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 653 _virtual_spaces(virtual_spaces) {} 654 655 bool repeat() { 656 return _virtual_spaces != NULL; 657 } 658 659 VirtualSpaceNode* get_next() { 660 VirtualSpaceNode* result = _virtual_spaces; 661 if (_virtual_spaces != NULL) { 662 _virtual_spaces = _virtual_spaces->next(); 663 } 664 return result; 665 } 666 }; 667 }; 668 669 class Metadebug : AllStatic { 670 // Debugging support for Metaspaces 671 static int _allocation_fail_alot_count; 672 673 public: 674 675 static void init_allocation_fail_alot_count(); 676 #ifdef ASSERT 677 static bool test_metadata_failure(); 678 #endif 679 }; 680 681 int Metadebug::_allocation_fail_alot_count = 0; 682 683 // SpaceManager - used by Metaspace to handle allocations 684 class SpaceManager : public CHeapObj<mtClass> { 685 friend class Metaspace; 686 friend class Metadebug; 687 688 private: 689 690 // protects allocations 691 Mutex* const _lock; 692 693 // Type of metadata allocated. 694 Metaspace::MetadataType _mdtype; 695 696 // List of chunks in use by this SpaceManager. Allocations 697 // are done from the current chunk. The list is used for deallocating 698 // chunks when the SpaceManager is freed. 699 Metachunk* _chunks_in_use[NumberOfInUseLists]; 700 Metachunk* _current_chunk; 701 702 // Maximum number of small chunks to allocate to a SpaceManager 703 static uint const _small_chunk_limit; 704 705 // Sum of all space in allocated chunks 706 size_t _allocated_blocks_words; 707 708 // Sum of all allocated chunks 709 size_t _allocated_chunks_words; 710 size_t _allocated_chunks_count; 711 712 // Free lists of blocks are per SpaceManager since they 713 // are assumed to be in chunks in use by the SpaceManager 714 // and all chunks in use by a SpaceManager are freed when 715 // the class loader using the SpaceManager is collected. 716 BlockFreelist* _block_freelists; 717 718 // protects virtualspace and chunk expansions 719 static const char* _expand_lock_name; 720 static const int _expand_lock_rank; 721 static Mutex* const _expand_lock; 722 723 private: 724 // Accessors 725 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 726 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 727 _chunks_in_use[index] = v; 728 } 729 730 BlockFreelist* block_freelists() const { return _block_freelists; } 731 732 Metaspace::MetadataType mdtype() { return _mdtype; } 733 734 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 735 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 736 737 Metachunk* current_chunk() const { return _current_chunk; } 738 void set_current_chunk(Metachunk* v) { 739 _current_chunk = v; 740 } 741 742 Metachunk* find_current_chunk(size_t word_size); 743 744 // Add chunk to the list of chunks in use 745 void add_chunk(Metachunk* v, bool make_current); 746 void retire_current_chunk(); 747 748 Mutex* lock() const { return _lock; } 749 750 protected: 751 void initialize(); 752 753 public: 754 SpaceManager(Metaspace::MetadataType mdtype, 755 Mutex* lock); 756 ~SpaceManager(); 757 758 enum ChunkMultiples { 759 MediumChunkMultiple = 4 760 }; 761 762 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } 763 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } 764 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } 765 766 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } 767 768 // Accessors 769 bool is_class() const { return _mdtype == Metaspace::ClassType; } 770 771 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } 772 size_t small_chunk_size() const { return small_chunk_size(is_class()); } 773 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } 774 775 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } 776 777 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } 778 779 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 780 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 781 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 782 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 783 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 784 785 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 786 787 static Mutex* expand_lock() { return _expand_lock; } 788 789 // Increment the per Metaspace and global running sums for Metachunks 790 // by the given size. This is used when a Metachunk to added to 791 // the in-use list. 792 void inc_size_metrics(size_t words); 793 // Increment the per Metaspace and global running sums Metablocks by the given 794 // size. This is used when a Metablock is allocated. 795 void inc_used_metrics(size_t words); 796 // Delete the portion of the running sums for this SpaceManager. That is, 797 // the globals running sums for the Metachunks and Metablocks are 798 // decremented for all the Metachunks in-use by this SpaceManager. 799 void dec_total_from_size_metrics(); 800 801 // Adjust the initial chunk size to match one of the fixed chunk list sizes, 802 // or return the unadjusted size if the requested size is humongous. 803 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); 804 size_t adjust_initial_chunk_size(size_t requested) const; 805 806 // Get the initial chunks size for this metaspace type. 807 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; 808 809 size_t sum_capacity_in_chunks_in_use() const; 810 size_t sum_used_in_chunks_in_use() const; 811 size_t sum_free_in_chunks_in_use() const; 812 size_t sum_waste_in_chunks_in_use() const; 813 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 814 815 size_t sum_count_in_chunks_in_use(); 816 size_t sum_count_in_chunks_in_use(ChunkIndex i); 817 818 Metachunk* get_new_chunk(size_t chunk_word_size); 819 820 // Block allocation and deallocation. 821 // Allocates a block from the current chunk 822 MetaWord* allocate(size_t word_size); 823 // Allocates a block from a small chunk 824 MetaWord* get_small_chunk_and_allocate(size_t word_size); 825 826 // Helper for allocations 827 MetaWord* allocate_work(size_t word_size); 828 829 // Returns a block to the per manager freelist 830 void deallocate(MetaWord* p, size_t word_size); 831 832 // Based on the allocation size and a minimum chunk size, 833 // returned chunk size (for expanding space for chunk allocation). 834 size_t calc_chunk_size(size_t allocation_word_size); 835 836 // Called when an allocation from the current chunk fails. 837 // Gets a new chunk (may require getting a new virtual space), 838 // and allocates from that chunk. 839 MetaWord* grow_and_allocate(size_t word_size); 840 841 // Notify memory usage to MemoryService. 842 void track_metaspace_memory_usage(); 843 844 // debugging support. 845 846 void dump(outputStream* const out) const; 847 void print_on(outputStream* st) const; 848 void locked_print_chunks_in_use_on(outputStream* st) const; 849 850 void verify(); 851 void verify_chunk_size(Metachunk* chunk); 852 #ifdef ASSERT 853 void verify_allocated_blocks_words(); 854 #endif 855 856 // This adjusts the size given to be greater than the minimum allocation size in 857 // words for data in metaspace. Esentially the minimum size is currently 3 words. 858 size_t get_allocation_word_size(size_t word_size) { 859 size_t byte_size = word_size * BytesPerWord; 860 861 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 862 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); 863 864 size_t raw_word_size = raw_bytes_size / BytesPerWord; 865 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 866 867 return raw_word_size; 868 } 869 }; 870 871 uint const SpaceManager::_small_chunk_limit = 4; 872 873 const char* SpaceManager::_expand_lock_name = 874 "SpaceManager chunk allocation lock"; 875 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 876 Mutex* const SpaceManager::_expand_lock = 877 new Mutex(SpaceManager::_expand_lock_rank, 878 SpaceManager::_expand_lock_name, 879 Mutex::_allow_vm_block_flag, 880 Monitor::_safepoint_check_never); 881 882 void VirtualSpaceNode::inc_container_count() { 883 assert_lock_strong(SpaceManager::expand_lock()); 884 _container_count++; 885 } 886 887 void VirtualSpaceNode::dec_container_count() { 888 assert_lock_strong(SpaceManager::expand_lock()); 889 _container_count--; 890 } 891 892 #ifdef ASSERT 893 void VirtualSpaceNode::verify_container_count() { 894 assert(_container_count == container_count_slow(), 895 "Inconsistency in container_count _container_count " UINTX_FORMAT 896 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 897 } 898 #endif 899 900 // BlockFreelist methods 901 902 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 903 904 BlockFreelist::~BlockFreelist() { 905 delete _dictionary; 906 if (_small_blocks != NULL) { 907 delete _small_blocks; 908 } 909 } 910 911 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 912 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 913 914 Metablock* free_chunk = ::new (p) Metablock(word_size); 915 if (word_size < SmallBlocks::small_block_max_size()) { 916 small_blocks()->return_block(free_chunk, word_size); 917 } else { 918 dictionary()->return_chunk(free_chunk); 919 } 920 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 921 SIZE_FORMAT, p2i(free_chunk), word_size); 922 } 923 924 MetaWord* BlockFreelist::get_block(size_t word_size) { 925 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 926 927 // Try small_blocks first. 928 if (word_size < SmallBlocks::small_block_max_size()) { 929 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 930 // this space manager. 931 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 932 if (new_block != NULL) { 933 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 934 p2i(new_block), word_size); 935 return new_block; 936 } 937 } 938 939 if (word_size < BlockFreelist::min_dictionary_size()) { 940 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 941 return NULL; 942 } 943 944 Metablock* free_block = 945 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 946 if (free_block == NULL) { 947 return NULL; 948 } 949 950 const size_t block_size = free_block->size(); 951 if (block_size > WasteMultiplier * word_size) { 952 return_block((MetaWord*)free_block, block_size); 953 return NULL; 954 } 955 956 MetaWord* new_block = (MetaWord*)free_block; 957 assert(block_size >= word_size, "Incorrect size of block from freelist"); 958 const size_t unused = block_size - word_size; 959 if (unused >= SmallBlocks::small_block_min_size()) { 960 return_block(new_block + word_size, unused); 961 } 962 963 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 964 p2i(new_block), word_size); 965 return new_block; 966 } 967 968 void BlockFreelist::print_on(outputStream* st) const { 969 dictionary()->print_free_lists(st); 970 if (_small_blocks != NULL) { 971 _small_blocks->print_on(st); 972 } 973 } 974 975 // VirtualSpaceNode methods 976 977 VirtualSpaceNode::~VirtualSpaceNode() { 978 _rs.release(); 979 #ifdef ASSERT 980 size_t word_size = sizeof(*this) / BytesPerWord; 981 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 982 #endif 983 } 984 985 size_t VirtualSpaceNode::used_words_in_vs() const { 986 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 987 } 988 989 // Space committed in the VirtualSpace 990 size_t VirtualSpaceNode::capacity_words_in_vs() const { 991 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 992 } 993 994 size_t VirtualSpaceNode::free_words_in_vs() const { 995 return pointer_delta(end(), top(), sizeof(MetaWord)); 996 } 997 998 // Allocates the chunk from the virtual space only. 999 // This interface is also used internally for debugging. Not all 1000 // chunks removed here are necessarily used for allocation. 1001 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 1002 // Bottom of the new chunk 1003 MetaWord* chunk_limit = top(); 1004 assert(chunk_limit != NULL, "Not safe to call this method"); 1005 1006 // The virtual spaces are always expanded by the 1007 // commit granularity to enforce the following condition. 1008 // Without this the is_available check will not work correctly. 1009 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 1010 "The committed memory doesn't match the expanded memory."); 1011 1012 if (!is_available(chunk_word_size)) { 1013 LogTarget(Debug, gc, metaspace, freelist) lt; 1014 if (lt.is_enabled()) { 1015 LogStream ls(lt); 1016 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 1017 // Dump some information about the virtual space that is nearly full 1018 print_on(&ls); 1019 } 1020 return NULL; 1021 } 1022 1023 // Take the space (bump top on the current virtual space). 1024 inc_top(chunk_word_size); 1025 1026 // Initialize the chunk 1027 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1028 return result; 1029 } 1030 1031 1032 // Expand the virtual space (commit more of the reserved space) 1033 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1034 size_t min_bytes = min_words * BytesPerWord; 1035 size_t preferred_bytes = preferred_words * BytesPerWord; 1036 1037 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1038 1039 if (uncommitted < min_bytes) { 1040 return false; 1041 } 1042 1043 size_t commit = MIN2(preferred_bytes, uncommitted); 1044 bool result = virtual_space()->expand_by(commit, false); 1045 1046 assert(result, "Failed to commit memory"); 1047 1048 return result; 1049 } 1050 1051 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1052 assert_lock_strong(SpaceManager::expand_lock()); 1053 Metachunk* result = take_from_committed(chunk_word_size); 1054 if (result != NULL) { 1055 inc_container_count(); 1056 } 1057 return result; 1058 } 1059 1060 bool VirtualSpaceNode::initialize() { 1061 1062 if (!_rs.is_reserved()) { 1063 return false; 1064 } 1065 1066 // These are necessary restriction to make sure that the virtual space always 1067 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1068 // aligned only the middle alignment of the VirtualSpace is used. 1069 assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); 1070 assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); 1071 1072 // ReservedSpaces marked as special will have the entire memory 1073 // pre-committed. Setting a committed size will make sure that 1074 // committed_size and actual_committed_size agrees. 1075 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1076 1077 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1078 Metaspace::commit_alignment()); 1079 if (result) { 1080 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1081 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1082 1083 set_top((MetaWord*)virtual_space()->low()); 1084 set_reserved(MemRegion((HeapWord*)_rs.base(), 1085 (HeapWord*)(_rs.base() + _rs.size()))); 1086 1087 assert(reserved()->start() == (HeapWord*) _rs.base(), 1088 "Reserved start was not set properly " PTR_FORMAT 1089 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1090 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1091 "Reserved size was not set properly " SIZE_FORMAT 1092 " != " SIZE_FORMAT, reserved()->word_size(), 1093 _rs.size() / BytesPerWord); 1094 } 1095 1096 return result; 1097 } 1098 1099 void VirtualSpaceNode::print_on(outputStream* st) const { 1100 size_t used = used_words_in_vs(); 1101 size_t capacity = capacity_words_in_vs(); 1102 VirtualSpace* vs = virtual_space(); 1103 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1104 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1105 PTR_FORMAT ", " PTR_FORMAT ")", 1106 p2i(vs), capacity / K, 1107 capacity == 0 ? 0 : used * 100 / capacity, 1108 p2i(bottom()), p2i(top()), p2i(end()), 1109 p2i(vs->high_boundary())); 1110 } 1111 1112 #ifdef ASSERT 1113 void VirtualSpaceNode::mangle() { 1114 size_t word_size = capacity_words_in_vs(); 1115 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1116 } 1117 #endif // ASSERT 1118 1119 // VirtualSpaceList methods 1120 // Space allocated from the VirtualSpace 1121 1122 VirtualSpaceList::~VirtualSpaceList() { 1123 VirtualSpaceListIterator iter(virtual_space_list()); 1124 while (iter.repeat()) { 1125 VirtualSpaceNode* vsl = iter.get_next(); 1126 delete vsl; 1127 } 1128 } 1129 1130 void VirtualSpaceList::inc_reserved_words(size_t v) { 1131 assert_lock_strong(SpaceManager::expand_lock()); 1132 _reserved_words = _reserved_words + v; 1133 } 1134 void VirtualSpaceList::dec_reserved_words(size_t v) { 1135 assert_lock_strong(SpaceManager::expand_lock()); 1136 _reserved_words = _reserved_words - v; 1137 } 1138 1139 #define assert_committed_below_limit() \ 1140 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1141 "Too much committed memory. Committed: " SIZE_FORMAT \ 1142 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1143 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1144 1145 void VirtualSpaceList::inc_committed_words(size_t v) { 1146 assert_lock_strong(SpaceManager::expand_lock()); 1147 _committed_words = _committed_words + v; 1148 1149 assert_committed_below_limit(); 1150 } 1151 void VirtualSpaceList::dec_committed_words(size_t v) { 1152 assert_lock_strong(SpaceManager::expand_lock()); 1153 _committed_words = _committed_words - v; 1154 1155 assert_committed_below_limit(); 1156 } 1157 1158 void VirtualSpaceList::inc_virtual_space_count() { 1159 assert_lock_strong(SpaceManager::expand_lock()); 1160 _virtual_space_count++; 1161 } 1162 void VirtualSpaceList::dec_virtual_space_count() { 1163 assert_lock_strong(SpaceManager::expand_lock()); 1164 _virtual_space_count--; 1165 } 1166 1167 void ChunkManager::remove_chunk(Metachunk* chunk) { 1168 size_t word_size = chunk->word_size(); 1169 ChunkIndex index = list_index(word_size); 1170 if (index != HumongousIndex) { 1171 free_chunks(index)->remove_chunk(chunk); 1172 } else { 1173 humongous_dictionary()->remove_chunk(chunk); 1174 } 1175 1176 // Chunk has been removed from the chunks free list, update counters. 1177 account_for_removed_chunk(chunk); 1178 } 1179 1180 // Walk the list of VirtualSpaceNodes and delete 1181 // nodes with a 0 container_count. Remove Metachunks in 1182 // the node from their respective freelists. 1183 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1184 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1185 assert_lock_strong(SpaceManager::expand_lock()); 1186 // Don't use a VirtualSpaceListIterator because this 1187 // list is being changed and a straightforward use of an iterator is not safe. 1188 VirtualSpaceNode* purged_vsl = NULL; 1189 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1190 VirtualSpaceNode* next_vsl = prev_vsl; 1191 while (next_vsl != NULL) { 1192 VirtualSpaceNode* vsl = next_vsl; 1193 DEBUG_ONLY(vsl->verify_container_count();) 1194 next_vsl = vsl->next(); 1195 // Don't free the current virtual space since it will likely 1196 // be needed soon. 1197 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1198 // Unlink it from the list 1199 if (prev_vsl == vsl) { 1200 // This is the case of the current node being the first node. 1201 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1202 set_virtual_space_list(vsl->next()); 1203 } else { 1204 prev_vsl->set_next(vsl->next()); 1205 } 1206 1207 vsl->purge(chunk_manager); 1208 dec_reserved_words(vsl->reserved_words()); 1209 dec_committed_words(vsl->committed_words()); 1210 dec_virtual_space_count(); 1211 purged_vsl = vsl; 1212 delete vsl; 1213 } else { 1214 prev_vsl = vsl; 1215 } 1216 } 1217 #ifdef ASSERT 1218 if (purged_vsl != NULL) { 1219 // List should be stable enough to use an iterator here. 1220 VirtualSpaceListIterator iter(virtual_space_list()); 1221 while (iter.repeat()) { 1222 VirtualSpaceNode* vsl = iter.get_next(); 1223 assert(vsl != purged_vsl, "Purge of vsl failed"); 1224 } 1225 } 1226 #endif 1227 } 1228 1229 1230 // This function looks at the mmap regions in the metaspace without locking. 1231 // The chunks are added with store ordering and not deleted except for at 1232 // unloading time during a safepoint. 1233 bool VirtualSpaceList::contains(const void* ptr) { 1234 // List should be stable enough to use an iterator here because removing virtual 1235 // space nodes is only allowed at a safepoint. 1236 VirtualSpaceListIterator iter(virtual_space_list()); 1237 while (iter.repeat()) { 1238 VirtualSpaceNode* vsn = iter.get_next(); 1239 if (vsn->contains(ptr)) { 1240 return true; 1241 } 1242 } 1243 return false; 1244 } 1245 1246 void VirtualSpaceList::retire_current_virtual_space() { 1247 assert_lock_strong(SpaceManager::expand_lock()); 1248 1249 VirtualSpaceNode* vsn = current_virtual_space(); 1250 1251 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1252 Metaspace::chunk_manager_metadata(); 1253 1254 vsn->retire(cm); 1255 } 1256 1257 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1258 DEBUG_ONLY(verify_container_count();) 1259 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1260 ChunkIndex index = (ChunkIndex)i; 1261 size_t chunk_size = chunk_manager->size_by_index(index); 1262 1263 while (free_words_in_vs() >= chunk_size) { 1264 Metachunk* chunk = get_chunk_vs(chunk_size); 1265 assert(chunk != NULL, "allocation should have been successful"); 1266 1267 chunk_manager->return_single_chunk(index, chunk); 1268 } 1269 DEBUG_ONLY(verify_container_count();) 1270 } 1271 assert(free_words_in_vs() == 0, "should be empty now"); 1272 } 1273 1274 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1275 _is_class(false), 1276 _virtual_space_list(NULL), 1277 _current_virtual_space(NULL), 1278 _reserved_words(0), 1279 _committed_words(0), 1280 _virtual_space_count(0) { 1281 MutexLockerEx cl(SpaceManager::expand_lock(), 1282 Mutex::_no_safepoint_check_flag); 1283 create_new_virtual_space(word_size); 1284 } 1285 1286 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1287 _is_class(true), 1288 _virtual_space_list(NULL), 1289 _current_virtual_space(NULL), 1290 _reserved_words(0), 1291 _committed_words(0), 1292 _virtual_space_count(0) { 1293 MutexLockerEx cl(SpaceManager::expand_lock(), 1294 Mutex::_no_safepoint_check_flag); 1295 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1296 bool succeeded = class_entry->initialize(); 1297 if (succeeded) { 1298 link_vs(class_entry); 1299 } 1300 } 1301 1302 size_t VirtualSpaceList::free_bytes() { 1303 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1304 } 1305 1306 // Allocate another meta virtual space and add it to the list. 1307 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1308 assert_lock_strong(SpaceManager::expand_lock()); 1309 1310 if (is_class()) { 1311 assert(false, "We currently don't support more than one VirtualSpace for" 1312 " the compressed class space. The initialization of the" 1313 " CCS uses another code path and should not hit this path."); 1314 return false; 1315 } 1316 1317 if (vs_word_size == 0) { 1318 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1319 return false; 1320 } 1321 1322 // Reserve the space 1323 size_t vs_byte_size = vs_word_size * BytesPerWord; 1324 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1325 1326 // Allocate the meta virtual space and initialize it. 1327 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1328 if (!new_entry->initialize()) { 1329 delete new_entry; 1330 return false; 1331 } else { 1332 assert(new_entry->reserved_words() == vs_word_size, 1333 "Reserved memory size differs from requested memory size"); 1334 // ensure lock-free iteration sees fully initialized node 1335 OrderAccess::storestore(); 1336 link_vs(new_entry); 1337 return true; 1338 } 1339 } 1340 1341 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1342 if (virtual_space_list() == NULL) { 1343 set_virtual_space_list(new_entry); 1344 } else { 1345 current_virtual_space()->set_next(new_entry); 1346 } 1347 set_current_virtual_space(new_entry); 1348 inc_reserved_words(new_entry->reserved_words()); 1349 inc_committed_words(new_entry->committed_words()); 1350 inc_virtual_space_count(); 1351 #ifdef ASSERT 1352 new_entry->mangle(); 1353 #endif 1354 LogTarget(Trace, gc, metaspace) lt; 1355 if (lt.is_enabled()) { 1356 LogStream ls(lt); 1357 VirtualSpaceNode* vsl = current_virtual_space(); 1358 ResourceMark rm; 1359 vsl->print_on(&ls); 1360 } 1361 } 1362 1363 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1364 size_t min_words, 1365 size_t preferred_words) { 1366 size_t before = node->committed_words(); 1367 1368 bool result = node->expand_by(min_words, preferred_words); 1369 1370 size_t after = node->committed_words(); 1371 1372 // after and before can be the same if the memory was pre-committed. 1373 assert(after >= before, "Inconsistency"); 1374 inc_committed_words(after - before); 1375 1376 return result; 1377 } 1378 1379 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1380 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 1381 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 1382 assert(min_words <= preferred_words, "Invalid arguments"); 1383 1384 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1385 return false; 1386 } 1387 1388 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1389 if (allowed_expansion_words < min_words) { 1390 return false; 1391 } 1392 1393 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1394 1395 // Commit more memory from the the current virtual space. 1396 bool vs_expanded = expand_node_by(current_virtual_space(), 1397 min_words, 1398 max_expansion_words); 1399 if (vs_expanded) { 1400 return true; 1401 } 1402 retire_current_virtual_space(); 1403 1404 // Get another virtual space. 1405 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1406 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1407 1408 if (create_new_virtual_space(grow_vs_words)) { 1409 if (current_virtual_space()->is_pre_committed()) { 1410 // The memory was pre-committed, so we are done here. 1411 assert(min_words <= current_virtual_space()->committed_words(), 1412 "The new VirtualSpace was pre-committed, so it" 1413 "should be large enough to fit the alloc request."); 1414 return true; 1415 } 1416 1417 return expand_node_by(current_virtual_space(), 1418 min_words, 1419 max_expansion_words); 1420 } 1421 1422 return false; 1423 } 1424 1425 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 1426 1427 // Allocate a chunk out of the current virtual space. 1428 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1429 1430 if (next != NULL) { 1431 return next; 1432 } 1433 1434 // The expand amount is currently only determined by the requested sizes 1435 // and not how much committed memory is left in the current virtual space. 1436 1437 size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words()); 1438 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 1439 if (min_word_size >= preferred_word_size) { 1440 // Can happen when humongous chunks are allocated. 1441 preferred_word_size = min_word_size; 1442 } 1443 1444 bool expanded = expand_by(min_word_size, preferred_word_size); 1445 if (expanded) { 1446 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1447 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1448 } 1449 1450 return next; 1451 } 1452 1453 void VirtualSpaceList::print_on(outputStream* st) const { 1454 VirtualSpaceListIterator iter(virtual_space_list()); 1455 while (iter.repeat()) { 1456 VirtualSpaceNode* node = iter.get_next(); 1457 node->print_on(st); 1458 } 1459 } 1460 1461 // MetaspaceGC methods 1462 1463 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1464 // Within the VM operation after the GC the attempt to allocate the metadata 1465 // should succeed. If the GC did not free enough space for the metaspace 1466 // allocation, the HWM is increased so that another virtualspace will be 1467 // allocated for the metadata. With perm gen the increase in the perm 1468 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1469 // metaspace policy uses those as the small and large steps for the HWM. 1470 // 1471 // After the GC the compute_new_size() for MetaspaceGC is called to 1472 // resize the capacity of the metaspaces. The current implementation 1473 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1474 // to resize the Java heap by some GC's. New flags can be implemented 1475 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1476 // free space is desirable in the metaspace capacity to decide how much 1477 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1478 // free space is desirable in the metaspace capacity before decreasing 1479 // the HWM. 1480 1481 // Calculate the amount to increase the high water mark (HWM). 1482 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1483 // another expansion is not requested too soon. If that is not 1484 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1485 // If that is still not enough, expand by the size of the allocation 1486 // plus some. 1487 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1488 size_t min_delta = MinMetaspaceExpansion; 1489 size_t max_delta = MaxMetaspaceExpansion; 1490 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 1491 1492 if (delta <= min_delta) { 1493 delta = min_delta; 1494 } else if (delta <= max_delta) { 1495 // Don't want to hit the high water mark on the next 1496 // allocation so make the delta greater than just enough 1497 // for this allocation. 1498 delta = max_delta; 1499 } else { 1500 // This allocation is large but the next ones are probably not 1501 // so increase by the minimum. 1502 delta = delta + min_delta; 1503 } 1504 1505 assert_is_aligned(delta, Metaspace::commit_alignment()); 1506 1507 return delta; 1508 } 1509 1510 size_t MetaspaceGC::capacity_until_GC() { 1511 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1512 assert(value >= MetaspaceSize, "Not initialized properly?"); 1513 return value; 1514 } 1515 1516 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1517 assert_is_aligned(v, Metaspace::commit_alignment()); 1518 1519 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1520 size_t new_value = capacity_until_GC + v; 1521 1522 if (new_value < capacity_until_GC) { 1523 // The addition wrapped around, set new_value to aligned max value. 1524 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 1525 } 1526 1527 intptr_t expected = (intptr_t) capacity_until_GC; 1528 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1529 1530 if (expected != actual) { 1531 return false; 1532 } 1533 1534 if (new_cap_until_GC != NULL) { 1535 *new_cap_until_GC = new_value; 1536 } 1537 if (old_cap_until_GC != NULL) { 1538 *old_cap_until_GC = capacity_until_GC; 1539 } 1540 return true; 1541 } 1542 1543 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1544 assert_is_aligned(v, Metaspace::commit_alignment()); 1545 1546 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1547 } 1548 1549 void MetaspaceGC::initialize() { 1550 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1551 // we can't do a GC during initialization. 1552 _capacity_until_GC = MaxMetaspaceSize; 1553 } 1554 1555 void MetaspaceGC::post_initialize() { 1556 // Reset the high-water mark once the VM initialization is done. 1557 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1558 } 1559 1560 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1561 // Check if the compressed class space is full. 1562 if (is_class && Metaspace::using_class_space()) { 1563 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1564 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1565 return false; 1566 } 1567 } 1568 1569 // Check if the user has imposed a limit on the metaspace memory. 1570 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1571 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1572 return false; 1573 } 1574 1575 return true; 1576 } 1577 1578 size_t MetaspaceGC::allowed_expansion() { 1579 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1580 size_t capacity_until_gc = capacity_until_GC(); 1581 1582 assert(capacity_until_gc >= committed_bytes, 1583 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1584 capacity_until_gc, committed_bytes); 1585 1586 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1587 size_t left_until_GC = capacity_until_gc - committed_bytes; 1588 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1589 1590 return left_to_commit / BytesPerWord; 1591 } 1592 1593 void MetaspaceGC::compute_new_size() { 1594 assert(_shrink_factor <= 100, "invalid shrink factor"); 1595 uint current_shrink_factor = _shrink_factor; 1596 _shrink_factor = 0; 1597 1598 // Using committed_bytes() for used_after_gc is an overestimation, since the 1599 // chunk free lists are included in committed_bytes() and the memory in an 1600 // un-fragmented chunk free list is available for future allocations. 1601 // However, if the chunk free lists becomes fragmented, then the memory may 1602 // not be available for future allocations and the memory is therefore "in use". 1603 // Including the chunk free lists in the definition of "in use" is therefore 1604 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1605 // shrink below committed_bytes() and this has caused serious bugs in the past. 1606 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1607 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1608 1609 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1610 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1611 1612 const double min_tmp = used_after_gc / maximum_used_percentage; 1613 size_t minimum_desired_capacity = 1614 (size_t)MIN2(min_tmp, double(max_uintx)); 1615 // Don't shrink less than the initial generation size 1616 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1617 MetaspaceSize); 1618 1619 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1620 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1621 minimum_free_percentage, maximum_used_percentage); 1622 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1623 1624 1625 size_t shrink_bytes = 0; 1626 if (capacity_until_GC < minimum_desired_capacity) { 1627 // If we have less capacity below the metaspace HWM, then 1628 // increment the HWM. 1629 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1630 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 1631 // Don't expand unless it's significant 1632 if (expand_bytes >= MinMetaspaceExpansion) { 1633 size_t new_capacity_until_GC = 0; 1634 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1635 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1636 1637 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1638 new_capacity_until_GC, 1639 MetaspaceGCThresholdUpdater::ComputeNewSize); 1640 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1641 minimum_desired_capacity / (double) K, 1642 expand_bytes / (double) K, 1643 MinMetaspaceExpansion / (double) K, 1644 new_capacity_until_GC / (double) K); 1645 } 1646 return; 1647 } 1648 1649 // No expansion, now see if we want to shrink 1650 // We would never want to shrink more than this 1651 assert(capacity_until_GC >= minimum_desired_capacity, 1652 SIZE_FORMAT " >= " SIZE_FORMAT, 1653 capacity_until_GC, minimum_desired_capacity); 1654 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1655 1656 // Should shrinking be considered? 1657 if (MaxMetaspaceFreeRatio < 100) { 1658 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1659 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1660 const double max_tmp = used_after_gc / minimum_used_percentage; 1661 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1662 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1663 MetaspaceSize); 1664 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1665 maximum_free_percentage, minimum_used_percentage); 1666 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1667 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1668 1669 assert(minimum_desired_capacity <= maximum_desired_capacity, 1670 "sanity check"); 1671 1672 if (capacity_until_GC > maximum_desired_capacity) { 1673 // Capacity too large, compute shrinking size 1674 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1675 // We don't want shrink all the way back to initSize if people call 1676 // System.gc(), because some programs do that between "phases" and then 1677 // we'd just have to grow the heap up again for the next phase. So we 1678 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1679 // on the third call, and 100% by the fourth call. But if we recompute 1680 // size without shrinking, it goes back to 0%. 1681 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1682 1683 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 1684 1685 assert(shrink_bytes <= max_shrink_bytes, 1686 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1687 shrink_bytes, max_shrink_bytes); 1688 if (current_shrink_factor == 0) { 1689 _shrink_factor = 10; 1690 } else { 1691 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1692 } 1693 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1694 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1695 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1696 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1697 } 1698 } 1699 1700 // Don't shrink unless it's significant 1701 if (shrink_bytes >= MinMetaspaceExpansion && 1702 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1703 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1704 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1705 new_capacity_until_GC, 1706 MetaspaceGCThresholdUpdater::ComputeNewSize); 1707 } 1708 } 1709 1710 // Metadebug methods 1711 1712 void Metadebug::init_allocation_fail_alot_count() { 1713 if (MetadataAllocationFailALot) { 1714 _allocation_fail_alot_count = 1715 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1716 } 1717 } 1718 1719 #ifdef ASSERT 1720 bool Metadebug::test_metadata_failure() { 1721 if (MetadataAllocationFailALot && 1722 Threads::is_vm_complete()) { 1723 if (_allocation_fail_alot_count > 0) { 1724 _allocation_fail_alot_count--; 1725 } else { 1726 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1727 init_allocation_fail_alot_count(); 1728 return true; 1729 } 1730 } 1731 return false; 1732 } 1733 #endif 1734 1735 // ChunkManager methods 1736 1737 size_t ChunkManager::free_chunks_total_words() { 1738 return _free_chunks_total; 1739 } 1740 1741 size_t ChunkManager::free_chunks_total_bytes() { 1742 return free_chunks_total_words() * BytesPerWord; 1743 } 1744 1745 // Update internal accounting after a chunk was added 1746 void ChunkManager::account_for_added_chunk(const Metachunk* c) { 1747 assert_lock_strong(SpaceManager::expand_lock()); 1748 _free_chunks_count ++; 1749 _free_chunks_total += c->word_size(); 1750 } 1751 1752 // Update internal accounting after a chunk was removed 1753 void ChunkManager::account_for_removed_chunk(const Metachunk* c) { 1754 assert_lock_strong(SpaceManager::expand_lock()); 1755 assert(_free_chunks_count >= 1, 1756 "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); 1757 assert(_free_chunks_total >= c->word_size(), 1758 "ChunkManager::_free_chunks_total: about to go negative" 1759 "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size()); 1760 _free_chunks_count --; 1761 _free_chunks_total -= c->word_size(); 1762 } 1763 1764 size_t ChunkManager::free_chunks_count() { 1765 #ifdef ASSERT 1766 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1767 MutexLockerEx cl(SpaceManager::expand_lock(), 1768 Mutex::_no_safepoint_check_flag); 1769 // This lock is only needed in debug because the verification 1770 // of the _free_chunks_totals walks the list of free chunks 1771 slow_locked_verify_free_chunks_count(); 1772 } 1773 #endif 1774 return _free_chunks_count; 1775 } 1776 1777 ChunkIndex ChunkManager::list_index(size_t size) { 1778 if (size_by_index(SpecializedIndex) == size) { 1779 return SpecializedIndex; 1780 } 1781 if (size_by_index(SmallIndex) == size) { 1782 return SmallIndex; 1783 } 1784 const size_t med_size = size_by_index(MediumIndex); 1785 if (med_size == size) { 1786 return MediumIndex; 1787 } 1788 1789 assert(size > med_size, "Not a humongous chunk"); 1790 return HumongousIndex; 1791 } 1792 1793 size_t ChunkManager::size_by_index(ChunkIndex index) { 1794 index_bounds_check(index); 1795 assert(index != HumongousIndex, "Do not call for humongous chunks."); 1796 return free_chunks(index)->size(); 1797 } 1798 1799 void ChunkManager::locked_verify_free_chunks_total() { 1800 assert_lock_strong(SpaceManager::expand_lock()); 1801 assert(sum_free_chunks() == _free_chunks_total, 1802 "_free_chunks_total " SIZE_FORMAT " is not the" 1803 " same as sum " SIZE_FORMAT, _free_chunks_total, 1804 sum_free_chunks()); 1805 } 1806 1807 void ChunkManager::verify_free_chunks_total() { 1808 MutexLockerEx cl(SpaceManager::expand_lock(), 1809 Mutex::_no_safepoint_check_flag); 1810 locked_verify_free_chunks_total(); 1811 } 1812 1813 void ChunkManager::locked_verify_free_chunks_count() { 1814 assert_lock_strong(SpaceManager::expand_lock()); 1815 assert(sum_free_chunks_count() == _free_chunks_count, 1816 "_free_chunks_count " SIZE_FORMAT " is not the" 1817 " same as sum " SIZE_FORMAT, _free_chunks_count, 1818 sum_free_chunks_count()); 1819 } 1820 1821 void ChunkManager::verify_free_chunks_count() { 1822 #ifdef ASSERT 1823 MutexLockerEx cl(SpaceManager::expand_lock(), 1824 Mutex::_no_safepoint_check_flag); 1825 locked_verify_free_chunks_count(); 1826 #endif 1827 } 1828 1829 void ChunkManager::verify() { 1830 MutexLockerEx cl(SpaceManager::expand_lock(), 1831 Mutex::_no_safepoint_check_flag); 1832 locked_verify(); 1833 } 1834 1835 void ChunkManager::locked_verify() { 1836 locked_verify_free_chunks_count(); 1837 locked_verify_free_chunks_total(); 1838 } 1839 1840 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1841 assert_lock_strong(SpaceManager::expand_lock()); 1842 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1843 _free_chunks_total, _free_chunks_count); 1844 } 1845 1846 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1847 assert_lock_strong(SpaceManager::expand_lock()); 1848 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1849 sum_free_chunks(), sum_free_chunks_count()); 1850 } 1851 1852 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1853 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 1854 "Bad index: %d", (int)index); 1855 1856 return &_free_chunks[index]; 1857 } 1858 1859 // These methods that sum the free chunk lists are used in printing 1860 // methods that are used in product builds. 1861 size_t ChunkManager::sum_free_chunks() { 1862 assert_lock_strong(SpaceManager::expand_lock()); 1863 size_t result = 0; 1864 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1865 ChunkList* list = free_chunks(i); 1866 1867 if (list == NULL) { 1868 continue; 1869 } 1870 1871 result = result + list->count() * list->size(); 1872 } 1873 result = result + humongous_dictionary()->total_size(); 1874 return result; 1875 } 1876 1877 size_t ChunkManager::sum_free_chunks_count() { 1878 assert_lock_strong(SpaceManager::expand_lock()); 1879 size_t count = 0; 1880 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1881 ChunkList* list = free_chunks(i); 1882 if (list == NULL) { 1883 continue; 1884 } 1885 count = count + list->count(); 1886 } 1887 count = count + humongous_dictionary()->total_free_blocks(); 1888 return count; 1889 } 1890 1891 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1892 ChunkIndex index = list_index(word_size); 1893 assert(index < HumongousIndex, "No humongous list"); 1894 return free_chunks(index); 1895 } 1896 1897 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1898 assert_lock_strong(SpaceManager::expand_lock()); 1899 1900 slow_locked_verify(); 1901 1902 Metachunk* chunk = NULL; 1903 if (list_index(word_size) != HumongousIndex) { 1904 ChunkList* free_list = find_free_chunks_list(word_size); 1905 assert(free_list != NULL, "Sanity check"); 1906 1907 chunk = free_list->head(); 1908 1909 if (chunk == NULL) { 1910 return NULL; 1911 } 1912 1913 // Remove the chunk as the head of the list. 1914 free_list->remove_chunk(chunk); 1915 1916 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1917 p2i(free_list), p2i(chunk), chunk->word_size()); 1918 } else { 1919 chunk = humongous_dictionary()->get_chunk( 1920 word_size, 1921 FreeBlockDictionary<Metachunk>::atLeast); 1922 1923 if (chunk == NULL) { 1924 return NULL; 1925 } 1926 1927 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1928 chunk->word_size(), word_size, chunk->word_size() - word_size); 1929 } 1930 1931 // Chunk has been removed from the chunk manager; update counters. 1932 account_for_removed_chunk(chunk); 1933 1934 // Remove it from the links to this freelist 1935 chunk->set_next(NULL); 1936 chunk->set_prev(NULL); 1937 #ifdef ASSERT 1938 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1939 // work. 1940 chunk->set_is_tagged_free(false); 1941 #endif 1942 chunk->container()->inc_container_count(); 1943 1944 slow_locked_verify(); 1945 return chunk; 1946 } 1947 1948 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1949 assert_lock_strong(SpaceManager::expand_lock()); 1950 slow_locked_verify(); 1951 1952 // Take from the beginning of the list 1953 Metachunk* chunk = free_chunks_get(word_size); 1954 if (chunk == NULL) { 1955 return NULL; 1956 } 1957 1958 assert((word_size <= chunk->word_size()) || 1959 (list_index(chunk->word_size()) == HumongousIndex), 1960 "Non-humongous variable sized chunk"); 1961 LogTarget(Debug, gc, metaspace, freelist) lt; 1962 if (lt.is_enabled()) { 1963 size_t list_count; 1964 if (list_index(word_size) < HumongousIndex) { 1965 ChunkList* list = find_free_chunks_list(word_size); 1966 list_count = list->count(); 1967 } else { 1968 list_count = humongous_dictionary()->total_count(); 1969 } 1970 LogStream ls(lt); 1971 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1972 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1973 ResourceMark rm; 1974 locked_print_free_chunks(&ls); 1975 } 1976 1977 return chunk; 1978 } 1979 1980 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { 1981 assert_lock_strong(SpaceManager::expand_lock()); 1982 assert(chunk != NULL, "Expected chunk."); 1983 assert(chunk->container() != NULL, "Container should have been set."); 1984 assert(chunk->is_tagged_free() == false, "Chunk should be in use."); 1985 index_bounds_check(index); 1986 1987 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not 1988 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary 1989 // keeps tree node pointers in the chunk payload area which mangle will overwrite. 1990 NOT_PRODUCT(chunk->mangle(badMetaWordVal);) 1991 1992 if (index != HumongousIndex) { 1993 // Return non-humongous chunk to freelist. 1994 ChunkList* list = free_chunks(index); 1995 assert(list->size() == chunk->word_size(), "Wrong chunk type."); 1996 list->return_chunk_at_head(chunk); 1997 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", 1998 chunk_size_name(index), p2i(chunk)); 1999 } else { 2000 // Return humongous chunk to dictionary. 2001 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); 2002 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, 2003 "Humongous chunk has wrong alignment."); 2004 _humongous_dictionary.return_chunk(chunk); 2005 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", 2006 chunk_size_name(index), p2i(chunk), chunk->word_size()); 2007 } 2008 chunk->container()->dec_container_count(); 2009 DEBUG_ONLY(chunk->set_is_tagged_free(true);) 2010 2011 // Chunk has been added; update counters. 2012 account_for_added_chunk(chunk); 2013 2014 } 2015 2016 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { 2017 index_bounds_check(index); 2018 if (chunks == NULL) { 2019 return; 2020 } 2021 LogTarget(Trace, gc, metaspace, freelist) log; 2022 if (log.is_enabled()) { // tracing 2023 log.print("returning list of %s chunks...", chunk_size_name(index)); 2024 } 2025 unsigned num_chunks_returned = 0; 2026 size_t size_chunks_returned = 0; 2027 Metachunk* cur = chunks; 2028 while (cur != NULL) { 2029 // Capture the next link before it is changed 2030 // by the call to return_chunk_at_head(); 2031 Metachunk* next = cur->next(); 2032 if (log.is_enabled()) { // tracing 2033 num_chunks_returned ++; 2034 size_chunks_returned += cur->word_size(); 2035 } 2036 return_single_chunk(index, cur); 2037 cur = next; 2038 } 2039 if (log.is_enabled()) { // tracing 2040 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".", 2041 num_chunks_returned, chunk_size_name(index), size_chunks_returned); 2042 if (index != HumongousIndex) { 2043 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size()); 2044 } else { 2045 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count()); 2046 } 2047 } 2048 } 2049 2050 void ChunkManager::print_on(outputStream* out) const { 2051 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 2052 } 2053 2054 // SpaceManager methods 2055 2056 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { 2057 size_t chunk_sizes[] = { 2058 specialized_chunk_size(is_class_space), 2059 small_chunk_size(is_class_space), 2060 medium_chunk_size(is_class_space) 2061 }; 2062 2063 // Adjust up to one of the fixed chunk sizes ... 2064 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { 2065 if (requested <= chunk_sizes[i]) { 2066 return chunk_sizes[i]; 2067 } 2068 } 2069 2070 // ... or return the size as a humongous chunk. 2071 return requested; 2072 } 2073 2074 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { 2075 return adjust_initial_chunk_size(requested, is_class()); 2076 } 2077 2078 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { 2079 size_t requested; 2080 2081 if (is_class()) { 2082 switch (type) { 2083 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; 2084 case Metaspace::ROMetaspaceType: requested = ClassSpecializedChunk; break; 2085 case Metaspace::ReadWriteMetaspaceType: requested = ClassSpecializedChunk; break; 2086 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break; 2087 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; 2088 default: requested = ClassSmallChunk; break; 2089 } 2090 } else { 2091 switch (type) { 2092 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; 2093 case Metaspace::ROMetaspaceType: requested = SharedReadOnlySize / wordSize; break; 2094 case Metaspace::ReadWriteMetaspaceType: requested = SharedReadWriteSize / wordSize; break; 2095 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break; 2096 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; 2097 default: requested = SmallChunk; break; 2098 } 2099 } 2100 2101 // Adjust to one of the fixed chunk sizes (unless humongous) 2102 const size_t adjusted = adjust_initial_chunk_size(requested); 2103 2104 assert(adjusted != 0, "Incorrect initial chunk size. Requested: " 2105 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted); 2106 2107 return adjusted; 2108 } 2109 2110 size_t SpaceManager::sum_free_in_chunks_in_use() const { 2111 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2112 size_t free = 0; 2113 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2114 Metachunk* chunk = chunks_in_use(i); 2115 while (chunk != NULL) { 2116 free += chunk->free_word_size(); 2117 chunk = chunk->next(); 2118 } 2119 } 2120 return free; 2121 } 2122 2123 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 2124 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2125 size_t result = 0; 2126 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2127 result += sum_waste_in_chunks_in_use(i); 2128 } 2129 2130 return result; 2131 } 2132 2133 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 2134 size_t result = 0; 2135 Metachunk* chunk = chunks_in_use(index); 2136 // Count the free space in all the chunk but not the 2137 // current chunk from which allocations are still being done. 2138 while (chunk != NULL) { 2139 if (chunk != current_chunk()) { 2140 result += chunk->free_word_size(); 2141 } 2142 chunk = chunk->next(); 2143 } 2144 return result; 2145 } 2146 2147 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 2148 // For CMS use "allocated_chunks_words()" which does not need the 2149 // Metaspace lock. For the other collectors sum over the 2150 // lists. Use both methods as a check that "allocated_chunks_words()" 2151 // is correct. That is, sum_capacity_in_chunks() is too expensive 2152 // to use in the product and allocated_chunks_words() should be used 2153 // but allow for checking that allocated_chunks_words() returns the same 2154 // value as sum_capacity_in_chunks_in_use() which is the definitive 2155 // answer. 2156 if (UseConcMarkSweepGC) { 2157 return allocated_chunks_words(); 2158 } else { 2159 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2160 size_t sum = 0; 2161 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2162 Metachunk* chunk = chunks_in_use(i); 2163 while (chunk != NULL) { 2164 sum += chunk->word_size(); 2165 chunk = chunk->next(); 2166 } 2167 } 2168 return sum; 2169 } 2170 } 2171 2172 size_t SpaceManager::sum_count_in_chunks_in_use() { 2173 size_t count = 0; 2174 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2175 count = count + sum_count_in_chunks_in_use(i); 2176 } 2177 2178 return count; 2179 } 2180 2181 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2182 size_t count = 0; 2183 Metachunk* chunk = chunks_in_use(i); 2184 while (chunk != NULL) { 2185 count++; 2186 chunk = chunk->next(); 2187 } 2188 return count; 2189 } 2190 2191 2192 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2193 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2194 size_t used = 0; 2195 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2196 Metachunk* chunk = chunks_in_use(i); 2197 while (chunk != NULL) { 2198 used += chunk->used_word_size(); 2199 chunk = chunk->next(); 2200 } 2201 } 2202 return used; 2203 } 2204 2205 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2206 2207 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2208 Metachunk* chunk = chunks_in_use(i); 2209 st->print("SpaceManager: %s " PTR_FORMAT, 2210 chunk_size_name(i), p2i(chunk)); 2211 if (chunk != NULL) { 2212 st->print_cr(" free " SIZE_FORMAT, 2213 chunk->free_word_size()); 2214 } else { 2215 st->cr(); 2216 } 2217 } 2218 2219 chunk_manager()->locked_print_free_chunks(st); 2220 chunk_manager()->locked_print_sum_free_chunks(st); 2221 } 2222 2223 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2224 2225 // Decide between a small chunk and a medium chunk. Up to 2226 // _small_chunk_limit small chunks can be allocated. 2227 // After that a medium chunk is preferred. 2228 size_t chunk_word_size; 2229 if (chunks_in_use(MediumIndex) == NULL && 2230 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2231 chunk_word_size = (size_t) small_chunk_size(); 2232 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2233 chunk_word_size = medium_chunk_size(); 2234 } 2235 } else { 2236 chunk_word_size = medium_chunk_size(); 2237 } 2238 2239 // Might still need a humongous chunk. Enforce 2240 // humongous allocations sizes to be aligned up to 2241 // the smallest chunk size. 2242 size_t if_humongous_sized_chunk = 2243 align_up(word_size + Metachunk::overhead(), 2244 smallest_chunk_size()); 2245 chunk_word_size = 2246 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2247 2248 assert(!SpaceManager::is_humongous(word_size) || 2249 chunk_word_size == if_humongous_sized_chunk, 2250 "Size calculation is wrong, word_size " SIZE_FORMAT 2251 " chunk_word_size " SIZE_FORMAT, 2252 word_size, chunk_word_size); 2253 Log(gc, metaspace, alloc) log; 2254 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2255 log.debug("Metadata humongous allocation:"); 2256 log.debug(" word_size " PTR_FORMAT, word_size); 2257 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2258 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2259 } 2260 return chunk_word_size; 2261 } 2262 2263 void SpaceManager::track_metaspace_memory_usage() { 2264 if (is_init_completed()) { 2265 if (is_class()) { 2266 MemoryService::track_compressed_class_memory_usage(); 2267 } 2268 MemoryService::track_metaspace_memory_usage(); 2269 } 2270 } 2271 2272 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2273 assert(vs_list()->current_virtual_space() != NULL, 2274 "Should have been set"); 2275 assert(current_chunk() == NULL || 2276 current_chunk()->allocate(word_size) == NULL, 2277 "Don't need to expand"); 2278 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2279 2280 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2281 size_t words_left = 0; 2282 size_t words_used = 0; 2283 if (current_chunk() != NULL) { 2284 words_left = current_chunk()->free_word_size(); 2285 words_used = current_chunk()->used_word_size(); 2286 } 2287 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2288 word_size, words_used, words_left); 2289 } 2290 2291 // Get another chunk 2292 size_t chunk_word_size = calc_chunk_size(word_size); 2293 Metachunk* next = get_new_chunk(chunk_word_size); 2294 2295 MetaWord* mem = NULL; 2296 2297 // If a chunk was available, add it to the in-use chunk list 2298 // and do an allocation from it. 2299 if (next != NULL) { 2300 // Add to this manager's list of chunks in use. 2301 add_chunk(next, false); 2302 mem = next->allocate(word_size); 2303 } 2304 2305 // Track metaspace memory usage statistic. 2306 track_metaspace_memory_usage(); 2307 2308 return mem; 2309 } 2310 2311 void SpaceManager::print_on(outputStream* st) const { 2312 2313 for (ChunkIndex i = ZeroIndex; 2314 i < NumberOfInUseLists ; 2315 i = next_chunk_index(i) ) { 2316 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2317 p2i(chunks_in_use(i)), 2318 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2319 } 2320 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2321 " Humongous " SIZE_FORMAT, 2322 sum_waste_in_chunks_in_use(SmallIndex), 2323 sum_waste_in_chunks_in_use(MediumIndex), 2324 sum_waste_in_chunks_in_use(HumongousIndex)); 2325 // block free lists 2326 if (block_freelists() != NULL) { 2327 st->print_cr("total in block free lists " SIZE_FORMAT, 2328 block_freelists()->total_size()); 2329 } 2330 } 2331 2332 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2333 Mutex* lock) : 2334 _mdtype(mdtype), 2335 _allocated_blocks_words(0), 2336 _allocated_chunks_words(0), 2337 _allocated_chunks_count(0), 2338 _block_freelists(NULL), 2339 _lock(lock) 2340 { 2341 initialize(); 2342 } 2343 2344 void SpaceManager::inc_size_metrics(size_t words) { 2345 assert_lock_strong(SpaceManager::expand_lock()); 2346 // Total of allocated Metachunks and allocated Metachunks count 2347 // for each SpaceManager 2348 _allocated_chunks_words = _allocated_chunks_words + words; 2349 _allocated_chunks_count++; 2350 // Global total of capacity in allocated Metachunks 2351 MetaspaceAux::inc_capacity(mdtype(), words); 2352 // Global total of allocated Metablocks. 2353 // used_words_slow() includes the overhead in each 2354 // Metachunk so include it in the used when the 2355 // Metachunk is first added (so only added once per 2356 // Metachunk). 2357 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2358 } 2359 2360 void SpaceManager::inc_used_metrics(size_t words) { 2361 // Add to the per SpaceManager total 2362 Atomic::add_ptr(words, &_allocated_blocks_words); 2363 // Add to the global total 2364 MetaspaceAux::inc_used(mdtype(), words); 2365 } 2366 2367 void SpaceManager::dec_total_from_size_metrics() { 2368 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2369 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2370 // Also deduct the overhead per Metachunk 2371 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2372 } 2373 2374 void SpaceManager::initialize() { 2375 Metadebug::init_allocation_fail_alot_count(); 2376 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2377 _chunks_in_use[i] = NULL; 2378 } 2379 _current_chunk = NULL; 2380 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2381 } 2382 2383 SpaceManager::~SpaceManager() { 2384 // This call this->_lock which can't be done while holding expand_lock() 2385 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2386 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2387 " allocated_chunks_words() " SIZE_FORMAT, 2388 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2389 2390 MutexLockerEx fcl(SpaceManager::expand_lock(), 2391 Mutex::_no_safepoint_check_flag); 2392 2393 chunk_manager()->slow_locked_verify(); 2394 2395 dec_total_from_size_metrics(); 2396 2397 Log(gc, metaspace, freelist) log; 2398 if (log.is_trace()) { 2399 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2400 ResourceMark rm; 2401 LogStream ls(log.trace()); 2402 locked_print_chunks_in_use_on(&ls); 2403 if (block_freelists() != NULL) { 2404 block_freelists()->print_on(&ls); 2405 } 2406 } 2407 2408 // Add all the chunks in use by this space manager 2409 // to the global list of free chunks. 2410 2411 // Follow each list of chunks-in-use and add them to the 2412 // free lists. Each list is NULL terminated. 2413 2414 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) { 2415 Metachunk* chunks = chunks_in_use(i); 2416 chunk_manager()->return_chunk_list(i, chunks); 2417 set_chunks_in_use(i, NULL); 2418 } 2419 2420 chunk_manager()->slow_locked_verify(); 2421 2422 if (_block_freelists != NULL) { 2423 delete _block_freelists; 2424 } 2425 } 2426 2427 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2428 assert_lock_strong(_lock); 2429 // Allocations and deallocations are in raw_word_size 2430 size_t raw_word_size = get_allocation_word_size(word_size); 2431 // Lazily create a block_freelist 2432 if (block_freelists() == NULL) { 2433 _block_freelists = new BlockFreelist(); 2434 } 2435 block_freelists()->return_block(p, raw_word_size); 2436 } 2437 2438 // Adds a chunk to the list of chunks in use. 2439 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2440 2441 assert(new_chunk != NULL, "Should not be NULL"); 2442 assert(new_chunk->next() == NULL, "Should not be on a list"); 2443 2444 new_chunk->reset_empty(); 2445 2446 // Find the correct list and and set the current 2447 // chunk for that list. 2448 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2449 2450 if (index != HumongousIndex) { 2451 retire_current_chunk(); 2452 set_current_chunk(new_chunk); 2453 new_chunk->set_next(chunks_in_use(index)); 2454 set_chunks_in_use(index, new_chunk); 2455 } else { 2456 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2457 // small, so small will be null. Link this first chunk as the current 2458 // chunk. 2459 if (make_current) { 2460 // Set as the current chunk but otherwise treat as a humongous chunk. 2461 set_current_chunk(new_chunk); 2462 } 2463 // Link at head. The _current_chunk only points to a humongous chunk for 2464 // the null class loader metaspace (class and data virtual space managers) 2465 // any humongous chunks so will not point to the tail 2466 // of the humongous chunks list. 2467 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2468 set_chunks_in_use(HumongousIndex, new_chunk); 2469 2470 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2471 } 2472 2473 // Add to the running sum of capacity 2474 inc_size_metrics(new_chunk->word_size()); 2475 2476 assert(new_chunk->is_empty(), "Not ready for reuse"); 2477 Log(gc, metaspace, freelist) log; 2478 if (log.is_trace()) { 2479 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2480 ResourceMark rm; 2481 LogStream ls(log.trace()); 2482 new_chunk->print_on(&ls); 2483 chunk_manager()->locked_print_free_chunks(&ls); 2484 } 2485 } 2486 2487 void SpaceManager::retire_current_chunk() { 2488 if (current_chunk() != NULL) { 2489 size_t remaining_words = current_chunk()->free_word_size(); 2490 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2491 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2492 deallocate(ptr, remaining_words); 2493 inc_used_metrics(remaining_words); 2494 } 2495 } 2496 } 2497 2498 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { 2499 // Get a chunk from the chunk freelist 2500 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); 2501 2502 if (next == NULL) { 2503 next = vs_list()->get_new_chunk(chunk_word_size, 2504 medium_chunk_bunch()); 2505 } 2506 2507 Log(gc, metaspace, alloc) log; 2508 if (log.is_debug() && next != NULL && 2509 SpaceManager::is_humongous(next->word_size())) { 2510 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2511 } 2512 2513 return next; 2514 } 2515 2516 /* 2517 * The policy is to allocate up to _small_chunk_limit small chunks 2518 * after which only medium chunks are allocated. This is done to 2519 * reduce fragmentation. In some cases, this can result in a lot 2520 * of small chunks being allocated to the point where it's not 2521 * possible to expand. If this happens, there may be no medium chunks 2522 * available and OOME would be thrown. Instead of doing that, 2523 * if the allocation request size fits in a small chunk, an attempt 2524 * will be made to allocate a small chunk. 2525 */ 2526 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2527 size_t raw_word_size = get_allocation_word_size(word_size); 2528 2529 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2530 return NULL; 2531 } 2532 2533 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2534 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2535 2536 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2537 2538 MetaWord* mem = NULL; 2539 2540 if (chunk != NULL) { 2541 // Add chunk to the in-use chunk list and do an allocation from it. 2542 // Add to this manager's list of chunks in use. 2543 add_chunk(chunk, false); 2544 mem = chunk->allocate(raw_word_size); 2545 2546 inc_used_metrics(raw_word_size); 2547 2548 // Track metaspace memory usage statistic. 2549 track_metaspace_memory_usage(); 2550 } 2551 2552 return mem; 2553 } 2554 2555 MetaWord* SpaceManager::allocate(size_t word_size) { 2556 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2557 size_t raw_word_size = get_allocation_word_size(word_size); 2558 BlockFreelist* fl = block_freelists(); 2559 MetaWord* p = NULL; 2560 // Allocation from the dictionary is expensive in the sense that 2561 // the dictionary has to be searched for a size. Don't allocate 2562 // from the dictionary until it starts to get fat. Is this 2563 // a reasonable policy? Maybe an skinny dictionary is fast enough 2564 // for allocations. Do some profiling. JJJ 2565 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2566 p = fl->get_block(raw_word_size); 2567 } 2568 if (p == NULL) { 2569 p = allocate_work(raw_word_size); 2570 } 2571 2572 return p; 2573 } 2574 2575 // Returns the address of spaced allocated for "word_size". 2576 // This methods does not know about blocks (Metablocks) 2577 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2578 assert_lock_strong(_lock); 2579 #ifdef ASSERT 2580 if (Metadebug::test_metadata_failure()) { 2581 return NULL; 2582 } 2583 #endif 2584 // Is there space in the current chunk? 2585 MetaWord* result = NULL; 2586 2587 // For DumpSharedSpaces, only allocate out of the current chunk which is 2588 // never null because we gave it the size we wanted. Caller reports out 2589 // of memory if this returns null. 2590 if (DumpSharedSpaces) { 2591 assert(current_chunk() != NULL, "should never happen"); 2592 inc_used_metrics(word_size); 2593 return current_chunk()->allocate(word_size); // caller handles null result 2594 } 2595 2596 if (current_chunk() != NULL) { 2597 result = current_chunk()->allocate(word_size); 2598 } 2599 2600 if (result == NULL) { 2601 result = grow_and_allocate(word_size); 2602 } 2603 2604 if (result != NULL) { 2605 inc_used_metrics(word_size); 2606 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2607 "Head of the list is being allocated"); 2608 } 2609 2610 return result; 2611 } 2612 2613 void SpaceManager::verify() { 2614 // If there are blocks in the dictionary, then 2615 // verification of chunks does not work since 2616 // being in the dictionary alters a chunk. 2617 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2618 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2619 Metachunk* curr = chunks_in_use(i); 2620 while (curr != NULL) { 2621 curr->verify(); 2622 verify_chunk_size(curr); 2623 curr = curr->next(); 2624 } 2625 } 2626 } 2627 } 2628 2629 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2630 assert(is_humongous(chunk->word_size()) || 2631 chunk->word_size() == medium_chunk_size() || 2632 chunk->word_size() == small_chunk_size() || 2633 chunk->word_size() == specialized_chunk_size(), 2634 "Chunk size is wrong"); 2635 return; 2636 } 2637 2638 #ifdef ASSERT 2639 void SpaceManager::verify_allocated_blocks_words() { 2640 // Verification is only guaranteed at a safepoint. 2641 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2642 "Verification can fail if the applications is running"); 2643 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2644 "allocation total is not consistent " SIZE_FORMAT 2645 " vs " SIZE_FORMAT, 2646 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2647 } 2648 2649 #endif 2650 2651 void SpaceManager::dump(outputStream* const out) const { 2652 size_t curr_total = 0; 2653 size_t waste = 0; 2654 uint i = 0; 2655 size_t used = 0; 2656 size_t capacity = 0; 2657 2658 // Add up statistics for all chunks in this SpaceManager. 2659 for (ChunkIndex index = ZeroIndex; 2660 index < NumberOfInUseLists; 2661 index = next_chunk_index(index)) { 2662 for (Metachunk* curr = chunks_in_use(index); 2663 curr != NULL; 2664 curr = curr->next()) { 2665 out->print("%d) ", i++); 2666 curr->print_on(out); 2667 curr_total += curr->word_size(); 2668 used += curr->used_word_size(); 2669 capacity += curr->word_size(); 2670 waste += curr->free_word_size() + curr->overhead();; 2671 } 2672 } 2673 2674 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2675 if (block_freelists() != NULL) block_freelists()->print_on(out); 2676 } 2677 2678 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2679 // Free space isn't wasted. 2680 waste -= free; 2681 2682 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2683 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2684 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2685 } 2686 2687 // MetaspaceAux 2688 2689 2690 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2691 size_t MetaspaceAux::_used_words[] = {0, 0}; 2692 2693 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2694 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2695 return list == NULL ? 0 : list->free_bytes(); 2696 } 2697 2698 size_t MetaspaceAux::free_bytes() { 2699 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2700 } 2701 2702 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2703 assert_lock_strong(SpaceManager::expand_lock()); 2704 assert(words <= capacity_words(mdtype), 2705 "About to decrement below 0: words " SIZE_FORMAT 2706 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2707 words, mdtype, capacity_words(mdtype)); 2708 _capacity_words[mdtype] -= words; 2709 } 2710 2711 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2712 assert_lock_strong(SpaceManager::expand_lock()); 2713 // Needs to be atomic 2714 _capacity_words[mdtype] += words; 2715 } 2716 2717 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2718 assert(words <= used_words(mdtype), 2719 "About to decrement below 0: words " SIZE_FORMAT 2720 " is greater than _used_words[%u] " SIZE_FORMAT, 2721 words, mdtype, used_words(mdtype)); 2722 // For CMS deallocation of the Metaspaces occurs during the 2723 // sweep which is a concurrent phase. Protection by the expand_lock() 2724 // is not enough since allocation is on a per Metaspace basis 2725 // and protected by the Metaspace lock. 2726 jlong minus_words = (jlong) - (jlong) words; 2727 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2728 } 2729 2730 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2731 // _used_words tracks allocations for 2732 // each piece of metadata. Those allocations are 2733 // generally done concurrently by different application 2734 // threads so must be done atomically. 2735 Atomic::add_ptr(words, &_used_words[mdtype]); 2736 } 2737 2738 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2739 size_t used = 0; 2740 ClassLoaderDataGraphMetaspaceIterator iter; 2741 while (iter.repeat()) { 2742 Metaspace* msp = iter.get_next(); 2743 // Sum allocated_blocks_words for each metaspace 2744 if (msp != NULL) { 2745 used += msp->used_words_slow(mdtype); 2746 } 2747 } 2748 return used * BytesPerWord; 2749 } 2750 2751 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2752 size_t free = 0; 2753 ClassLoaderDataGraphMetaspaceIterator iter; 2754 while (iter.repeat()) { 2755 Metaspace* msp = iter.get_next(); 2756 if (msp != NULL) { 2757 free += msp->free_words_slow(mdtype); 2758 } 2759 } 2760 return free * BytesPerWord; 2761 } 2762 2763 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2764 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2765 return 0; 2766 } 2767 // Don't count the space in the freelists. That space will be 2768 // added to the capacity calculation as needed. 2769 size_t capacity = 0; 2770 ClassLoaderDataGraphMetaspaceIterator iter; 2771 while (iter.repeat()) { 2772 Metaspace* msp = iter.get_next(); 2773 if (msp != NULL) { 2774 capacity += msp->capacity_words_slow(mdtype); 2775 } 2776 } 2777 return capacity * BytesPerWord; 2778 } 2779 2780 size_t MetaspaceAux::capacity_bytes_slow() { 2781 #ifdef PRODUCT 2782 // Use capacity_bytes() in PRODUCT instead of this function. 2783 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2784 #endif 2785 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2786 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2787 assert(capacity_bytes() == class_capacity + non_class_capacity, 2788 "bad accounting: capacity_bytes() " SIZE_FORMAT 2789 " class_capacity + non_class_capacity " SIZE_FORMAT 2790 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2791 capacity_bytes(), class_capacity + non_class_capacity, 2792 class_capacity, non_class_capacity); 2793 2794 return class_capacity + non_class_capacity; 2795 } 2796 2797 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2798 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2799 return list == NULL ? 0 : list->reserved_bytes(); 2800 } 2801 2802 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2803 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2804 return list == NULL ? 0 : list->committed_bytes(); 2805 } 2806 2807 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2808 2809 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2810 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2811 if (chunk_manager == NULL) { 2812 return 0; 2813 } 2814 chunk_manager->slow_verify(); 2815 return chunk_manager->free_chunks_total_words(); 2816 } 2817 2818 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2819 return free_chunks_total_words(mdtype) * BytesPerWord; 2820 } 2821 2822 size_t MetaspaceAux::free_chunks_total_words() { 2823 return free_chunks_total_words(Metaspace::ClassType) + 2824 free_chunks_total_words(Metaspace::NonClassType); 2825 } 2826 2827 size_t MetaspaceAux::free_chunks_total_bytes() { 2828 return free_chunks_total_words() * BytesPerWord; 2829 } 2830 2831 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2832 return Metaspace::get_chunk_manager(mdtype) != NULL; 2833 } 2834 2835 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2836 if (!has_chunk_free_list(mdtype)) { 2837 return MetaspaceChunkFreeListSummary(); 2838 } 2839 2840 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2841 return cm->chunk_free_list_summary(); 2842 } 2843 2844 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2845 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2846 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2847 } 2848 2849 void MetaspaceAux::print_on(outputStream* out) { 2850 Metaspace::MetadataType nct = Metaspace::NonClassType; 2851 2852 out->print_cr(" Metaspace " 2853 "used " SIZE_FORMAT "K, " 2854 "capacity " SIZE_FORMAT "K, " 2855 "committed " SIZE_FORMAT "K, " 2856 "reserved " SIZE_FORMAT "K", 2857 used_bytes()/K, 2858 capacity_bytes()/K, 2859 committed_bytes()/K, 2860 reserved_bytes()/K); 2861 2862 if (Metaspace::using_class_space()) { 2863 Metaspace::MetadataType ct = Metaspace::ClassType; 2864 out->print_cr(" class space " 2865 "used " SIZE_FORMAT "K, " 2866 "capacity " SIZE_FORMAT "K, " 2867 "committed " SIZE_FORMAT "K, " 2868 "reserved " SIZE_FORMAT "K", 2869 used_bytes(ct)/K, 2870 capacity_bytes(ct)/K, 2871 committed_bytes(ct)/K, 2872 reserved_bytes(ct)/K); 2873 } 2874 } 2875 2876 // Print information for class space and data space separately. 2877 // This is almost the same as above. 2878 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2879 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2880 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2881 size_t used_bytes = used_bytes_slow(mdtype); 2882 size_t free_bytes = free_bytes_slow(mdtype); 2883 size_t used_and_free = used_bytes + free_bytes + 2884 free_chunks_capacity_bytes; 2885 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2886 "K + unused in chunks " SIZE_FORMAT "K + " 2887 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2888 "K capacity in allocated chunks " SIZE_FORMAT "K", 2889 used_bytes / K, 2890 free_bytes / K, 2891 free_chunks_capacity_bytes / K, 2892 used_and_free / K, 2893 capacity_bytes / K); 2894 // Accounting can only be correct if we got the values during a safepoint 2895 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2896 } 2897 2898 // Print total fragmentation for class metaspaces 2899 void MetaspaceAux::print_class_waste(outputStream* out) { 2900 assert(Metaspace::using_class_space(), "class metaspace not used"); 2901 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2902 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2903 ClassLoaderDataGraphMetaspaceIterator iter; 2904 while (iter.repeat()) { 2905 Metaspace* msp = iter.get_next(); 2906 if (msp != NULL) { 2907 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2908 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2909 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2910 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2911 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2912 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2913 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2914 } 2915 } 2916 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2917 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2918 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2919 "large count " SIZE_FORMAT, 2920 cls_specialized_count, cls_specialized_waste, 2921 cls_small_count, cls_small_waste, 2922 cls_medium_count, cls_medium_waste, cls_humongous_count); 2923 } 2924 2925 // Print total fragmentation for data and class metaspaces separately 2926 void MetaspaceAux::print_waste(outputStream* out) { 2927 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2928 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2929 2930 ClassLoaderDataGraphMetaspaceIterator iter; 2931 while (iter.repeat()) { 2932 Metaspace* msp = iter.get_next(); 2933 if (msp != NULL) { 2934 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2935 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2936 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2937 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2938 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2939 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2940 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2941 } 2942 } 2943 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2944 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2945 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2946 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2947 "large count " SIZE_FORMAT, 2948 specialized_count, specialized_waste, small_count, 2949 small_waste, medium_count, medium_waste, humongous_count); 2950 if (Metaspace::using_class_space()) { 2951 print_class_waste(out); 2952 } 2953 } 2954 2955 // Dump global metaspace things from the end of ClassLoaderDataGraph 2956 void MetaspaceAux::dump(outputStream* out) { 2957 out->print_cr("All Metaspace:"); 2958 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2959 out->print("class space: "); print_on(out, Metaspace::ClassType); 2960 print_waste(out); 2961 } 2962 2963 void MetaspaceAux::verify_free_chunks() { 2964 Metaspace::chunk_manager_metadata()->verify(); 2965 if (Metaspace::using_class_space()) { 2966 Metaspace::chunk_manager_class()->verify(); 2967 } 2968 } 2969 2970 void MetaspaceAux::verify_capacity() { 2971 #ifdef ASSERT 2972 size_t running_sum_capacity_bytes = capacity_bytes(); 2973 // For purposes of the running sum of capacity, verify against capacity 2974 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2975 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2976 "capacity_words() * BytesPerWord " SIZE_FORMAT 2977 " capacity_bytes_slow()" SIZE_FORMAT, 2978 running_sum_capacity_bytes, capacity_in_use_bytes); 2979 for (Metaspace::MetadataType i = Metaspace::ClassType; 2980 i < Metaspace:: MetadataTypeCount; 2981 i = (Metaspace::MetadataType)(i + 1)) { 2982 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2983 assert(capacity_bytes(i) == capacity_in_use_bytes, 2984 "capacity_bytes(%u) " SIZE_FORMAT 2985 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2986 i, capacity_bytes(i), i, capacity_in_use_bytes); 2987 } 2988 #endif 2989 } 2990 2991 void MetaspaceAux::verify_used() { 2992 #ifdef ASSERT 2993 size_t running_sum_used_bytes = used_bytes(); 2994 // For purposes of the running sum of used, verify against used 2995 size_t used_in_use_bytes = used_bytes_slow(); 2996 assert(used_bytes() == used_in_use_bytes, 2997 "used_bytes() " SIZE_FORMAT 2998 " used_bytes_slow()" SIZE_FORMAT, 2999 used_bytes(), used_in_use_bytes); 3000 for (Metaspace::MetadataType i = Metaspace::ClassType; 3001 i < Metaspace:: MetadataTypeCount; 3002 i = (Metaspace::MetadataType)(i + 1)) { 3003 size_t used_in_use_bytes = used_bytes_slow(i); 3004 assert(used_bytes(i) == used_in_use_bytes, 3005 "used_bytes(%u) " SIZE_FORMAT 3006 " used_bytes_slow(%u)" SIZE_FORMAT, 3007 i, used_bytes(i), i, used_in_use_bytes); 3008 } 3009 #endif 3010 } 3011 3012 void MetaspaceAux::verify_metrics() { 3013 verify_capacity(); 3014 verify_used(); 3015 } 3016 3017 3018 // Metaspace methods 3019 3020 size_t Metaspace::_first_chunk_word_size = 0; 3021 size_t Metaspace::_first_class_chunk_word_size = 0; 3022 3023 size_t Metaspace::_commit_alignment = 0; 3024 size_t Metaspace::_reserve_alignment = 0; 3025 3026 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 3027 initialize(lock, type); 3028 } 3029 3030 Metaspace::~Metaspace() { 3031 delete _vsm; 3032 if (using_class_space()) { 3033 delete _class_vsm; 3034 } 3035 } 3036 3037 VirtualSpaceList* Metaspace::_space_list = NULL; 3038 VirtualSpaceList* Metaspace::_class_space_list = NULL; 3039 3040 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 3041 ChunkManager* Metaspace::_chunk_manager_class = NULL; 3042 3043 #define VIRTUALSPACEMULTIPLIER 2 3044 3045 #ifdef _LP64 3046 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 3047 3048 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 3049 // Figure out the narrow_klass_base and the narrow_klass_shift. The 3050 // narrow_klass_base is the lower of the metaspace base and the cds base 3051 // (if cds is enabled). The narrow_klass_shift depends on the distance 3052 // between the lower base and higher address. 3053 address lower_base; 3054 address higher_address; 3055 #if INCLUDE_CDS 3056 if (UseSharedSpaces) { 3057 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3058 (address)(metaspace_base + compressed_class_space_size())); 3059 lower_base = MIN2(metaspace_base, cds_base); 3060 } else 3061 #endif 3062 { 3063 higher_address = metaspace_base + compressed_class_space_size(); 3064 lower_base = metaspace_base; 3065 3066 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3067 // If compressed class space fits in lower 32G, we don't need a base. 3068 if (higher_address <= (address)klass_encoding_max) { 3069 lower_base = 0; // Effectively lower base is zero. 3070 } 3071 } 3072 3073 Universe::set_narrow_klass_base(lower_base); 3074 3075 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3076 Universe::set_narrow_klass_shift(0); 3077 } else { 3078 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3079 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3080 } 3081 AOTLoader::set_narrow_klass_shift(); 3082 } 3083 3084 #if INCLUDE_CDS 3085 // Return TRUE if the specified metaspace_base and cds_base are close enough 3086 // to work with compressed klass pointers. 3087 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3088 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3089 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3090 address lower_base = MIN2((address)metaspace_base, cds_base); 3091 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3092 (address)(metaspace_base + compressed_class_space_size())); 3093 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3094 } 3095 #endif 3096 3097 // Try to allocate the metaspace at the requested addr. 3098 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3099 assert(using_class_space(), "called improperly"); 3100 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3101 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3102 "Metaspace size is too big"); 3103 assert_is_aligned(requested_addr, _reserve_alignment); 3104 assert_is_aligned(cds_base, _reserve_alignment); 3105 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 3106 3107 // Don't use large pages for the class space. 3108 bool large_pages = false; 3109 3110 #if !(defined(AARCH64) || defined(AIX)) 3111 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3112 _reserve_alignment, 3113 large_pages, 3114 requested_addr); 3115 #else // AARCH64 3116 ReservedSpace metaspace_rs; 3117 3118 // Our compressed klass pointers may fit nicely into the lower 32 3119 // bits. 3120 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3121 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3122 _reserve_alignment, 3123 large_pages, 3124 requested_addr); 3125 } 3126 3127 if (! metaspace_rs.is_reserved()) { 3128 // Aarch64: Try to align metaspace so that we can decode a compressed 3129 // klass with a single MOVK instruction. We can do this iff the 3130 // compressed class base is a multiple of 4G. 3131 // Aix: Search for a place where we can find memory. If we need to load 3132 // the base, 4G alignment is helpful, too. 3133 size_t increment = AARCH64_ONLY(4*)G; 3134 for (char *a = align_up(requested_addr, increment); 3135 a < (char*)(1024*G); 3136 a += increment) { 3137 if (a == (char *)(32*G)) { 3138 // Go faster from here on. Zero-based is no longer possible. 3139 increment = 4*G; 3140 } 3141 3142 #if INCLUDE_CDS 3143 if (UseSharedSpaces 3144 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3145 // We failed to find an aligned base that will reach. Fall 3146 // back to using our requested addr. 3147 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3148 _reserve_alignment, 3149 large_pages, 3150 requested_addr); 3151 break; 3152 } 3153 #endif 3154 3155 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3156 _reserve_alignment, 3157 large_pages, 3158 a); 3159 if (metaspace_rs.is_reserved()) 3160 break; 3161 } 3162 } 3163 3164 #endif // AARCH64 3165 3166 if (!metaspace_rs.is_reserved()) { 3167 #if INCLUDE_CDS 3168 if (UseSharedSpaces) { 3169 size_t increment = align_up(1*G, _reserve_alignment); 3170 3171 // Keep trying to allocate the metaspace, increasing the requested_addr 3172 // by 1GB each time, until we reach an address that will no longer allow 3173 // use of CDS with compressed klass pointers. 3174 char *addr = requested_addr; 3175 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3176 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3177 addr = addr + increment; 3178 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3179 _reserve_alignment, large_pages, addr); 3180 } 3181 } 3182 #endif 3183 // If no successful allocation then try to allocate the space anywhere. If 3184 // that fails then OOM doom. At this point we cannot try allocating the 3185 // metaspace as if UseCompressedClassPointers is off because too much 3186 // initialization has happened that depends on UseCompressedClassPointers. 3187 // So, UseCompressedClassPointers cannot be turned off at this point. 3188 if (!metaspace_rs.is_reserved()) { 3189 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3190 _reserve_alignment, large_pages); 3191 if (!metaspace_rs.is_reserved()) { 3192 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3193 compressed_class_space_size())); 3194 } 3195 } 3196 } 3197 3198 // If we got here then the metaspace got allocated. 3199 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3200 3201 #if INCLUDE_CDS 3202 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3203 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3204 FileMapInfo::stop_sharing_and_unmap( 3205 "Could not allocate metaspace at a compatible address"); 3206 } 3207 #endif 3208 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3209 UseSharedSpaces ? (address)cds_base : 0); 3210 3211 initialize_class_space(metaspace_rs); 3212 3213 LogTarget(Trace, gc, metaspace) lt; 3214 if (lt.is_enabled()) { 3215 ResourceMark rm; 3216 LogStream ls(lt); 3217 print_compressed_class_space(&ls, requested_addr); 3218 } 3219 } 3220 3221 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3222 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3223 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3224 if (_class_space_list != NULL) { 3225 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3226 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3227 compressed_class_space_size(), p2i(base)); 3228 if (requested_addr != 0) { 3229 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3230 } 3231 st->cr(); 3232 } 3233 } 3234 3235 // For UseCompressedClassPointers the class space is reserved above the top of 3236 // the Java heap. The argument passed in is at the base of the compressed space. 3237 void Metaspace::initialize_class_space(ReservedSpace rs) { 3238 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3239 assert(rs.size() >= CompressedClassSpaceSize, 3240 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3241 assert(using_class_space(), "Must be using class space"); 3242 _class_space_list = new VirtualSpaceList(rs); 3243 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3244 3245 if (!_class_space_list->initialization_succeeded()) { 3246 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3247 } 3248 } 3249 3250 #endif 3251 3252 void Metaspace::ergo_initialize() { 3253 if (DumpSharedSpaces) { 3254 // Using large pages when dumping the shared archive is currently not implemented. 3255 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3256 } 3257 3258 size_t page_size = os::vm_page_size(); 3259 if (UseLargePages && UseLargePagesInMetaspace) { 3260 page_size = os::large_page_size(); 3261 } 3262 3263 _commit_alignment = page_size; 3264 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3265 3266 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3267 // override if MaxMetaspaceSize was set on the command line or not. 3268 // This information is needed later to conform to the specification of the 3269 // java.lang.management.MemoryUsage API. 3270 // 3271 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3272 // globals.hpp to the aligned value, but this is not possible, since the 3273 // alignment depends on other flags being parsed. 3274 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3275 3276 if (MetaspaceSize > MaxMetaspaceSize) { 3277 MetaspaceSize = MaxMetaspaceSize; 3278 } 3279 3280 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 3281 3282 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3283 3284 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3285 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3286 3287 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3288 set_compressed_class_space_size(CompressedClassSpaceSize); 3289 } 3290 3291 void Metaspace::global_initialize() { 3292 MetaspaceGC::initialize(); 3293 3294 // Initialize the alignment for shared spaces. 3295 int max_alignment = os::vm_allocation_granularity(); 3296 size_t cds_total = 0; 3297 3298 MetaspaceShared::set_max_alignment(max_alignment); 3299 3300 if (DumpSharedSpaces) { 3301 #if INCLUDE_CDS 3302 MetaspaceShared::estimate_regions_size(); 3303 3304 SharedReadOnlySize = align_up(SharedReadOnlySize, max_alignment); 3305 SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment); 3306 SharedMiscDataSize = align_up(SharedMiscDataSize, max_alignment); 3307 SharedMiscCodeSize = align_up(SharedMiscCodeSize, max_alignment); 3308 3309 // Initialize with the sum of the shared space sizes. The read-only 3310 // and read write metaspace chunks will be allocated out of this and the 3311 // remainder is the misc code and data chunks. 3312 cds_total = FileMapInfo::shared_spaces_size(); 3313 cds_total = align_up(cds_total, _reserve_alignment); 3314 _space_list = new VirtualSpaceList(cds_total/wordSize); 3315 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3316 3317 if (!_space_list->initialization_succeeded()) { 3318 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3319 } 3320 3321 #ifdef _LP64 3322 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3323 vm_exit_during_initialization("Unable to dump shared archive.", 3324 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3325 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3326 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3327 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3328 } 3329 3330 // Set the compressed klass pointer base so that decoding of these pointers works 3331 // properly when creating the shared archive. 3332 assert(UseCompressedOops && UseCompressedClassPointers, 3333 "UseCompressedOops and UseCompressedClassPointers must be set"); 3334 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3335 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3336 p2i(_space_list->current_virtual_space()->bottom())); 3337 3338 Universe::set_narrow_klass_shift(0); 3339 #endif // _LP64 3340 #endif // INCLUDE_CDS 3341 } else { 3342 #if INCLUDE_CDS 3343 if (UseSharedSpaces) { 3344 // If using shared space, open the file that contains the shared space 3345 // and map in the memory before initializing the rest of metaspace (so 3346 // the addresses don't conflict) 3347 address cds_address = NULL; 3348 FileMapInfo* mapinfo = new FileMapInfo(); 3349 3350 // Open the shared archive file, read and validate the header. If 3351 // initialization fails, shared spaces [UseSharedSpaces] are 3352 // disabled and the file is closed. 3353 // Map in spaces now also 3354 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3355 cds_total = FileMapInfo::shared_spaces_size(); 3356 cds_address = (address)mapinfo->header()->region_addr(0); 3357 #ifdef _LP64 3358 if (using_class_space()) { 3359 char* cds_end = (char*)(cds_address + cds_total); 3360 cds_end = align_up(cds_end, _reserve_alignment); 3361 // If UseCompressedClassPointers is set then allocate the metaspace area 3362 // above the heap and above the CDS area (if it exists). 3363 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3364 // Map the shared string space after compressed pointers 3365 // because it relies on compressed class pointers setting to work 3366 mapinfo->map_string_regions(); 3367 } 3368 #endif // _LP64 3369 } else { 3370 assert(!mapinfo->is_open() && !UseSharedSpaces, 3371 "archive file not closed or shared spaces not disabled."); 3372 } 3373 } 3374 #endif // INCLUDE_CDS 3375 3376 #ifdef _LP64 3377 if (!UseSharedSpaces && using_class_space()) { 3378 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3379 allocate_metaspace_compressed_klass_ptrs(base, 0); 3380 } 3381 #endif // _LP64 3382 3383 // Initialize these before initializing the VirtualSpaceList 3384 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3385 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3386 // Make the first class chunk bigger than a medium chunk so it's not put 3387 // on the medium chunk list. The next chunk will be small and progress 3388 // from there. This size calculated by -version. 3389 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3390 (CompressedClassSpaceSize/BytesPerWord)*2); 3391 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3392 // Arbitrarily set the initial virtual space to a multiple 3393 // of the boot class loader size. 3394 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3395 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 3396 3397 // Initialize the list of virtual spaces. 3398 _space_list = new VirtualSpaceList(word_size); 3399 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3400 3401 if (!_space_list->initialization_succeeded()) { 3402 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3403 } 3404 } 3405 3406 _tracer = new MetaspaceTracer(); 3407 } 3408 3409 void Metaspace::post_initialize() { 3410 MetaspaceGC::post_initialize(); 3411 } 3412 3413 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) { 3414 Metachunk* chunk = get_initialization_chunk(type, mdtype); 3415 if (chunk != NULL) { 3416 // Add to this manager's list of chunks in use and current_chunk(). 3417 get_space_manager(mdtype)->add_chunk(chunk, true); 3418 } 3419 } 3420 3421 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) { 3422 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 3423 3424 // Get a chunk from the chunk freelist 3425 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3426 3427 if (chunk == NULL) { 3428 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size, 3429 get_space_manager(mdtype)->medium_chunk_bunch()); 3430 } 3431 3432 // For dumping shared archive, report error if allocation has failed. 3433 if (DumpSharedSpaces && chunk == NULL) { 3434 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord); 3435 } 3436 3437 return chunk; 3438 } 3439 3440 void Metaspace::verify_global_initialization() { 3441 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 3442 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 3443 3444 if (using_class_space()) { 3445 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 3446 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 3447 } 3448 } 3449 3450 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3451 verify_global_initialization(); 3452 3453 // Allocate SpaceManager for metadata objects. 3454 _vsm = new SpaceManager(NonClassType, lock); 3455 3456 if (using_class_space()) { 3457 // Allocate SpaceManager for classes. 3458 _class_vsm = new SpaceManager(ClassType, lock); 3459 } 3460 3461 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3462 3463 // Allocate chunk for metadata objects 3464 initialize_first_chunk(type, NonClassType); 3465 3466 // Allocate chunk for class metadata objects 3467 if (using_class_space()) { 3468 initialize_first_chunk(type, ClassType); 3469 } 3470 3471 _alloc_record_head = NULL; 3472 _alloc_record_tail = NULL; 3473 } 3474 3475 size_t Metaspace::align_word_size_up(size_t word_size) { 3476 size_t byte_size = word_size * wordSize; 3477 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3478 } 3479 3480 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3481 // DumpSharedSpaces doesn't use class metadata area (yet) 3482 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3483 if (is_class_space_allocation(mdtype)) { 3484 return class_vsm()->allocate(word_size); 3485 } else { 3486 return vsm()->allocate(word_size); 3487 } 3488 } 3489 3490 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3491 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3492 assert(delta_bytes > 0, "Must be"); 3493 3494 size_t before = 0; 3495 size_t after = 0; 3496 MetaWord* res; 3497 bool incremented; 3498 3499 // Each thread increments the HWM at most once. Even if the thread fails to increment 3500 // the HWM, an allocation is still attempted. This is because another thread must then 3501 // have incremented the HWM and therefore the allocation might still succeed. 3502 do { 3503 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3504 res = allocate(word_size, mdtype); 3505 } while (!incremented && res == NULL); 3506 3507 if (incremented) { 3508 tracer()->report_gc_threshold(before, after, 3509 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3510 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3511 } 3512 3513 return res; 3514 } 3515 3516 // Space allocated in the Metaspace. This may 3517 // be across several metadata virtual spaces. 3518 char* Metaspace::bottom() const { 3519 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3520 return (char*)vsm()->current_chunk()->bottom(); 3521 } 3522 3523 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3524 if (mdtype == ClassType) { 3525 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3526 } else { 3527 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3528 } 3529 } 3530 3531 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3532 if (mdtype == ClassType) { 3533 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3534 } else { 3535 return vsm()->sum_free_in_chunks_in_use(); 3536 } 3537 } 3538 3539 // Space capacity in the Metaspace. It includes 3540 // space in the list of chunks from which allocations 3541 // have been made. Don't include space in the global freelist and 3542 // in the space available in the dictionary which 3543 // is already counted in some chunk. 3544 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3545 if (mdtype == ClassType) { 3546 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3547 } else { 3548 return vsm()->sum_capacity_in_chunks_in_use(); 3549 } 3550 } 3551 3552 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3553 return used_words_slow(mdtype) * BytesPerWord; 3554 } 3555 3556 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3557 return capacity_words_slow(mdtype) * BytesPerWord; 3558 } 3559 3560 size_t Metaspace::allocated_blocks_bytes() const { 3561 return vsm()->allocated_blocks_bytes() + 3562 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3563 } 3564 3565 size_t Metaspace::allocated_chunks_bytes() const { 3566 return vsm()->allocated_chunks_bytes() + 3567 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3568 } 3569 3570 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3571 assert(!SafepointSynchronize::is_at_safepoint() 3572 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3573 3574 if (DumpSharedSpaces && log_is_enabled(Info, cds)) { 3575 record_deallocation(ptr, vsm()->get_allocation_word_size(word_size)); 3576 } 3577 3578 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3579 3580 if (is_class && using_class_space()) { 3581 class_vsm()->deallocate(ptr, word_size); 3582 } else { 3583 vsm()->deallocate(ptr, word_size); 3584 } 3585 } 3586 3587 3588 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3589 bool read_only, MetaspaceObj::Type type, TRAPS) { 3590 if (HAS_PENDING_EXCEPTION) { 3591 assert(false, "Should not allocate with exception pending"); 3592 return NULL; // caller does a CHECK_NULL too 3593 } 3594 3595 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3596 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3597 3598 // Allocate in metaspaces without taking out a lock, because it deadlocks 3599 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3600 // to revisit this for application class data sharing. 3601 if (DumpSharedSpaces) { 3602 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3603 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3604 MetaWord* result = space->allocate(word_size, NonClassType); 3605 if (result == NULL) { 3606 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3607 } 3608 if (log_is_enabled(Info, cds)) { 3609 space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size)); 3610 } 3611 3612 // Zero initialize. 3613 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3614 3615 return result; 3616 } 3617 3618 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3619 3620 // Try to allocate metadata. 3621 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3622 3623 if (result == NULL) { 3624 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3625 3626 // Allocation failed. 3627 if (is_init_completed()) { 3628 // Only start a GC if the bootstrapping has completed. 3629 3630 // Try to clean out some memory and retry. 3631 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3632 loader_data, word_size, mdtype); 3633 } 3634 } 3635 3636 if (result == NULL) { 3637 SpaceManager* sm; 3638 if (is_class_space_allocation(mdtype)) { 3639 sm = loader_data->metaspace_non_null()->class_vsm(); 3640 } else { 3641 sm = loader_data->metaspace_non_null()->vsm(); 3642 } 3643 3644 result = sm->get_small_chunk_and_allocate(word_size); 3645 3646 if (result == NULL) { 3647 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3648 } 3649 } 3650 3651 // Zero initialize. 3652 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3653 3654 return result; 3655 } 3656 3657 size_t Metaspace::class_chunk_size(size_t word_size) { 3658 assert(using_class_space(), "Has to use class space"); 3659 return class_vsm()->calc_chunk_size(word_size); 3660 } 3661 3662 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3663 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3664 3665 // If result is still null, we are out of memory. 3666 Log(gc, metaspace, freelist) log; 3667 if (log.is_info()) { 3668 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3669 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3670 ResourceMark rm; 3671 LogStream ls(log.info()); 3672 if (loader_data->metaspace_or_null() != NULL) { 3673 loader_data->dump(&ls); 3674 } 3675 MetaspaceAux::dump(&ls); 3676 } 3677 3678 bool out_of_compressed_class_space = false; 3679 if (is_class_space_allocation(mdtype)) { 3680 Metaspace* metaspace = loader_data->metaspace_non_null(); 3681 out_of_compressed_class_space = 3682 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3683 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3684 CompressedClassSpaceSize; 3685 } 3686 3687 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3688 const char* space_string = out_of_compressed_class_space ? 3689 "Compressed class space" : "Metaspace"; 3690 3691 report_java_out_of_memory(space_string); 3692 3693 if (JvmtiExport::should_post_resource_exhausted()) { 3694 JvmtiExport::post_resource_exhausted( 3695 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3696 space_string); 3697 } 3698 3699 if (!is_init_completed()) { 3700 vm_exit_during_initialization("OutOfMemoryError", space_string); 3701 } 3702 3703 if (out_of_compressed_class_space) { 3704 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3705 } else { 3706 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3707 } 3708 } 3709 3710 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3711 switch (mdtype) { 3712 case Metaspace::ClassType: return "Class"; 3713 case Metaspace::NonClassType: return "Metadata"; 3714 default: 3715 assert(false, "Got bad mdtype: %d", (int) mdtype); 3716 return NULL; 3717 } 3718 } 3719 3720 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3721 assert(DumpSharedSpaces, "sanity"); 3722 3723 int byte_size = (int)word_size * wordSize; 3724 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3725 3726 if (_alloc_record_head == NULL) { 3727 _alloc_record_head = _alloc_record_tail = rec; 3728 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3729 _alloc_record_tail->_next = rec; 3730 _alloc_record_tail = rec; 3731 } else { 3732 // slow linear search, but this doesn't happen that often, and only when dumping 3733 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3734 if (old->_ptr == ptr) { 3735 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3736 int remain_bytes = old->_byte_size - byte_size; 3737 assert(remain_bytes >= 0, "sanity"); 3738 old->_type = type; 3739 3740 if (remain_bytes == 0) { 3741 delete(rec); 3742 } else { 3743 address remain_ptr = address(ptr) + byte_size; 3744 rec->_ptr = remain_ptr; 3745 rec->_byte_size = remain_bytes; 3746 rec->_type = MetaspaceObj::DeallocatedType; 3747 rec->_next = old->_next; 3748 old->_byte_size = byte_size; 3749 old->_next = rec; 3750 } 3751 return; 3752 } 3753 } 3754 assert(0, "reallocating a freed pointer that was not recorded"); 3755 } 3756 } 3757 3758 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3759 assert(DumpSharedSpaces, "sanity"); 3760 3761 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3762 if (rec->_ptr == ptr) { 3763 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3764 rec->_type = MetaspaceObj::DeallocatedType; 3765 return; 3766 } 3767 } 3768 3769 assert(0, "deallocating a pointer that was not recorded"); 3770 } 3771 3772 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3773 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3774 3775 address last_addr = (address)bottom(); 3776 3777 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3778 address ptr = rec->_ptr; 3779 if (last_addr < ptr) { 3780 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3781 } 3782 closure->doit(ptr, rec->_type, rec->_byte_size); 3783 last_addr = ptr + rec->_byte_size; 3784 } 3785 3786 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3787 if (last_addr < top) { 3788 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3789 } 3790 } 3791 3792 void Metaspace::purge(MetadataType mdtype) { 3793 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3794 } 3795 3796 void Metaspace::purge() { 3797 MutexLockerEx cl(SpaceManager::expand_lock(), 3798 Mutex::_no_safepoint_check_flag); 3799 purge(NonClassType); 3800 if (using_class_space()) { 3801 purge(ClassType); 3802 } 3803 } 3804 3805 void Metaspace::print_on(outputStream* out) const { 3806 // Print both class virtual space counts and metaspace. 3807 if (Verbose) { 3808 vsm()->print_on(out); 3809 if (using_class_space()) { 3810 class_vsm()->print_on(out); 3811 } 3812 } 3813 } 3814 3815 bool Metaspace::contains(const void* ptr) { 3816 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3817 return true; 3818 } 3819 return contains_non_shared(ptr); 3820 } 3821 3822 bool Metaspace::contains_non_shared(const void* ptr) { 3823 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3824 return true; 3825 } 3826 3827 return get_space_list(NonClassType)->contains(ptr); 3828 } 3829 3830 void Metaspace::verify() { 3831 vsm()->verify(); 3832 if (using_class_space()) { 3833 class_vsm()->verify(); 3834 } 3835 } 3836 3837 void Metaspace::dump(outputStream* const out) const { 3838 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3839 vsm()->dump(out); 3840 if (using_class_space()) { 3841 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3842 class_vsm()->dump(out); 3843 } 3844 } 3845 3846 /////////////// Unit tests /////////////// 3847 3848 #ifndef PRODUCT 3849 3850 class TestMetaspaceAuxTest : AllStatic { 3851 public: 3852 static void test_reserved() { 3853 size_t reserved = MetaspaceAux::reserved_bytes(); 3854 3855 assert(reserved > 0, "assert"); 3856 3857 size_t committed = MetaspaceAux::committed_bytes(); 3858 assert(committed <= reserved, "assert"); 3859 3860 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3861 assert(reserved_metadata > 0, "assert"); 3862 assert(reserved_metadata <= reserved, "assert"); 3863 3864 if (UseCompressedClassPointers) { 3865 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3866 assert(reserved_class > 0, "assert"); 3867 assert(reserved_class < reserved, "assert"); 3868 } 3869 } 3870 3871 static void test_committed() { 3872 size_t committed = MetaspaceAux::committed_bytes(); 3873 3874 assert(committed > 0, "assert"); 3875 3876 size_t reserved = MetaspaceAux::reserved_bytes(); 3877 assert(committed <= reserved, "assert"); 3878 3879 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3880 assert(committed_metadata > 0, "assert"); 3881 assert(committed_metadata <= committed, "assert"); 3882 3883 if (UseCompressedClassPointers) { 3884 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3885 assert(committed_class > 0, "assert"); 3886 assert(committed_class < committed, "assert"); 3887 } 3888 } 3889 3890 static void test_virtual_space_list_large_chunk() { 3891 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3892 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3893 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3894 // vm_allocation_granularity aligned on Windows. 3895 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3896 large_size += (os::vm_page_size()/BytesPerWord); 3897 vs_list->get_new_chunk(large_size, 0); 3898 } 3899 3900 static void test() { 3901 test_reserved(); 3902 test_committed(); 3903 test_virtual_space_list_large_chunk(); 3904 } 3905 }; 3906 3907 void TestMetaspaceAux_test() { 3908 TestMetaspaceAuxTest::test(); 3909 } 3910 3911 class TestVirtualSpaceNodeTest { 3912 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3913 size_t& num_small_chunks, 3914 size_t& num_specialized_chunks) { 3915 num_medium_chunks = words_left / MediumChunk; 3916 words_left = words_left % MediumChunk; 3917 3918 num_small_chunks = words_left / SmallChunk; 3919 words_left = words_left % SmallChunk; 3920 // how many specialized chunks can we get? 3921 num_specialized_chunks = words_left / SpecializedChunk; 3922 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3923 } 3924 3925 public: 3926 static void test() { 3927 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3928 const size_t vsn_test_size_words = MediumChunk * 4; 3929 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3930 3931 // The chunk sizes must be multiples of eachother, or this will fail 3932 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3933 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3934 3935 { // No committed memory in VSN 3936 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3937 VirtualSpaceNode vsn(vsn_test_size_bytes); 3938 vsn.initialize(); 3939 vsn.retire(&cm); 3940 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3941 } 3942 3943 { // All of VSN is committed, half is used by chunks 3944 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3945 VirtualSpaceNode vsn(vsn_test_size_bytes); 3946 vsn.initialize(); 3947 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3948 vsn.get_chunk_vs(MediumChunk); 3949 vsn.get_chunk_vs(MediumChunk); 3950 vsn.retire(&cm); 3951 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3952 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3953 } 3954 3955 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3956 // This doesn't work for systems with vm_page_size >= 16K. 3957 if (page_chunks < MediumChunk) { 3958 // 4 pages of VSN is committed, some is used by chunks 3959 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3960 VirtualSpaceNode vsn(vsn_test_size_bytes); 3961 3962 vsn.initialize(); 3963 vsn.expand_by(page_chunks, page_chunks); 3964 vsn.get_chunk_vs(SmallChunk); 3965 vsn.get_chunk_vs(SpecializedChunk); 3966 vsn.retire(&cm); 3967 3968 // committed - used = words left to retire 3969 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3970 3971 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3972 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3973 3974 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3975 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3976 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3977 } 3978 3979 { // Half of VSN is committed, a humongous chunk is used 3980 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3981 VirtualSpaceNode vsn(vsn_test_size_bytes); 3982 vsn.initialize(); 3983 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3984 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3985 vsn.retire(&cm); 3986 3987 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3988 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3989 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3990 3991 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3992 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3993 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3994 } 3995 3996 } 3997 3998 #define assert_is_available_positive(word_size) \ 3999 assert(vsn.is_available(word_size), \ 4000 #word_size ": " PTR_FORMAT " bytes were not available in " \ 4001 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 4002 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 4003 4004 #define assert_is_available_negative(word_size) \ 4005 assert(!vsn.is_available(word_size), \ 4006 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 4007 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 4008 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 4009 4010 static void test_is_available_positive() { 4011 // Reserve some memory. 4012 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4013 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4014 4015 // Commit some memory. 4016 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4017 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4018 assert(expanded, "Failed to commit"); 4019 4020 // Check that is_available accepts the committed size. 4021 assert_is_available_positive(commit_word_size); 4022 4023 // Check that is_available accepts half the committed size. 4024 size_t expand_word_size = commit_word_size / 2; 4025 assert_is_available_positive(expand_word_size); 4026 } 4027 4028 static void test_is_available_negative() { 4029 // Reserve some memory. 4030 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4031 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4032 4033 // Commit some memory. 4034 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4035 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4036 assert(expanded, "Failed to commit"); 4037 4038 // Check that is_available doesn't accept a too large size. 4039 size_t two_times_commit_word_size = commit_word_size * 2; 4040 assert_is_available_negative(two_times_commit_word_size); 4041 } 4042 4043 static void test_is_available_overflow() { 4044 // Reserve some memory. 4045 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4046 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4047 4048 // Commit some memory. 4049 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4050 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4051 assert(expanded, "Failed to commit"); 4052 4053 // Calculate a size that will overflow the virtual space size. 4054 void* virtual_space_max = (void*)(uintptr_t)-1; 4055 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4056 size_t overflow_size = bottom_to_max + BytesPerWord; 4057 size_t overflow_word_size = overflow_size / BytesPerWord; 4058 4059 // Check that is_available can handle the overflow. 4060 assert_is_available_negative(overflow_word_size); 4061 } 4062 4063 static void test_is_available() { 4064 TestVirtualSpaceNodeTest::test_is_available_positive(); 4065 TestVirtualSpaceNodeTest::test_is_available_negative(); 4066 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4067 } 4068 }; 4069 4070 void TestVirtualSpaceNode_test() { 4071 TestVirtualSpaceNodeTest::test(); 4072 TestVirtualSpaceNodeTest::test_is_available(); 4073 } 4074 4075 // The following test is placed here instead of a gtest / unittest file 4076 // because the ChunkManager class is only available in this file. 4077 void ChunkManager_test_list_index() { 4078 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 4079 4080 // Test previous bug where a query for a humongous class metachunk, 4081 // incorrectly matched the non-class medium metachunk size. 4082 { 4083 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 4084 4085 ChunkIndex index = manager.list_index(MediumChunk); 4086 4087 assert(index == HumongousIndex, 4088 "Requested size is larger than ClassMediumChunk," 4089 " so should return HumongousIndex. Got index: %d", (int)index); 4090 } 4091 4092 // Check the specified sizes as well. 4093 { 4094 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 4095 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index); 4096 } 4097 { 4098 ChunkIndex index = manager.list_index(ClassSmallChunk); 4099 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index); 4100 } 4101 { 4102 ChunkIndex index = manager.list_index(ClassMediumChunk); 4103 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index); 4104 } 4105 { 4106 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 4107 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index); 4108 } 4109 } 4110 4111 #endif // !PRODUCT 4112 4113 #ifdef ASSERT 4114 4115 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and 4116 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager 4117 // content. 4118 class ChunkManagerReturnTestImpl { 4119 4120 VirtualSpaceNode _vsn; 4121 ChunkManager _cm; 4122 4123 // The expected content of the chunk manager. 4124 unsigned _chunks_in_chunkmanager; 4125 size_t _words_in_chunkmanager; 4126 4127 // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use). 4128 static const int num_chunks = 256; 4129 Metachunk* _pool[num_chunks]; 4130 4131 // Helper, return a random position into the chunk pool. 4132 static int get_random_position() { 4133 return os::random() % num_chunks; 4134 } 4135 4136 // Asserts that ChunkManager counters match expectations. 4137 void assert_counters() { 4138 assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch."); 4139 assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch."); 4140 assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch."); 4141 } 4142 4143 // Get a random chunk size. Equal chance to get spec/med/small chunk size or 4144 // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med). 4145 size_t get_random_chunk_size() { 4146 const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk }; 4147 const int rand = os::random() % 4; 4148 if (rand < 3) { 4149 return sizes[rand]; 4150 } else { 4151 // Note: this affects the max. size of space (see _vsn initialization in ctor). 4152 return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); 4153 } 4154 } 4155 4156 // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending 4157 // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found. 4158 int next_matching_chunk(int start, bool is_free) const { 4159 assert(start >= 0 && start < num_chunks, "invalid parameter"); 4160 int pos = start; 4161 do { 4162 if (++pos == num_chunks) { 4163 pos = 0; 4164 } 4165 if (_pool[pos]->is_tagged_free() == is_free) { 4166 return pos; 4167 } 4168 } while (pos != start); 4169 return -1; 4170 } 4171 4172 // A structure to keep information about a chunk list including which 4173 // chunks are part of this list. This is needed to keep information about a chunk list 4174 // we will to return to the ChunkManager, because the original list will be destroyed. 4175 struct AChunkList { 4176 Metachunk* head; 4177 Metachunk* all[num_chunks]; 4178 size_t size; 4179 int num; 4180 ChunkIndex index; 4181 }; 4182 4183 // Assemble, from the in-use chunks (not in the chunk manager) in the pool, 4184 // a random chunk list of max. length <list_size> of chunks with the same 4185 // ChunkIndex (chunk size). 4186 // Returns false if list cannot be assembled. List is returned in the <out> 4187 // structure. Returned list may be smaller than <list_size>. 4188 bool assemble_random_chunklist(AChunkList* out, int list_size) { 4189 // Choose a random in-use chunk from the pool... 4190 const int headpos = next_matching_chunk(get_random_position(), false); 4191 if (headpos == -1) { 4192 return false; 4193 } 4194 Metachunk* const head = _pool[headpos]; 4195 out->all[0] = head; 4196 assert(head->is_tagged_free() == false, "Chunk state mismatch"); 4197 // ..then go from there, chain it up with up to list_size - 1 number of other 4198 // in-use chunks of the same index. 4199 const ChunkIndex index = _cm.list_index(head->word_size()); 4200 int num_added = 1; 4201 size_t size_added = head->word_size(); 4202 int pos = headpos; 4203 Metachunk* tail = head; 4204 do { 4205 pos = next_matching_chunk(pos, false); 4206 if (pos != headpos) { 4207 Metachunk* c = _pool[pos]; 4208 assert(c->is_tagged_free() == false, "Chunk state mismatch"); 4209 if (index == _cm.list_index(c->word_size())) { 4210 tail->set_next(c); 4211 c->set_prev(tail); 4212 tail = c; 4213 out->all[num_added] = c; 4214 num_added ++; 4215 size_added += c->word_size(); 4216 } 4217 } 4218 } while (num_added < list_size && pos != headpos); 4219 out->head = head; 4220 out->index = index; 4221 out->size = size_added; 4222 out->num = num_added; 4223 return true; 4224 } 4225 4226 // Take a single random chunk from the ChunkManager. 4227 bool take_single_random_chunk_from_chunkmanager() { 4228 assert_counters(); 4229 _cm.locked_verify(); 4230 int pos = next_matching_chunk(get_random_position(), true); 4231 if (pos == -1) { 4232 return false; 4233 } 4234 Metachunk* c = _pool[pos]; 4235 assert(c->is_tagged_free(), "Chunk state mismatch"); 4236 // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call 4237 // ChunkManager::free_chunks_get() with this chunk's word size. We really want 4238 // to exercise ChunkManager::free_chunks_get() because that one gets called for 4239 // normal chunk allocation. 4240 Metachunk* c2 = _cm.free_chunks_get(c->word_size()); 4241 assert(c2 != NULL, "Unexpected."); 4242 assert(!c2->is_tagged_free(), "Chunk state mismatch"); 4243 assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list."); 4244 _chunks_in_chunkmanager --; 4245 _words_in_chunkmanager -= c->word_size(); 4246 assert_counters(); 4247 _cm.locked_verify(); 4248 return true; 4249 } 4250 4251 // Returns a single random chunk to the chunk manager. Returns false if that 4252 // was not possible (all chunks are already in the chunk manager). 4253 bool return_single_random_chunk_to_chunkmanager() { 4254 assert_counters(); 4255 _cm.locked_verify(); 4256 int pos = next_matching_chunk(get_random_position(), false); 4257 if (pos == -1) { 4258 return false; 4259 } 4260 Metachunk* c = _pool[pos]; 4261 assert(c->is_tagged_free() == false, "wrong chunk information"); 4262 _cm.return_single_chunk(_cm.list_index(c->word_size()), c); 4263 _chunks_in_chunkmanager ++; 4264 _words_in_chunkmanager += c->word_size(); 4265 assert(c->is_tagged_free() == true, "wrong chunk information"); 4266 assert_counters(); 4267 _cm.locked_verify(); 4268 return true; 4269 } 4270 4271 // Return a random chunk list to the chunk manager. Returns the length of the 4272 // returned list. 4273 int return_random_chunk_list_to_chunkmanager(int list_size) { 4274 assert_counters(); 4275 _cm.locked_verify(); 4276 AChunkList aChunkList; 4277 if (!assemble_random_chunklist(&aChunkList, list_size)) { 4278 return 0; 4279 } 4280 // Before returning chunks are returned, they should be tagged in use. 4281 for (int i = 0; i < aChunkList.num; i ++) { 4282 assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4283 } 4284 _cm.return_chunk_list(aChunkList.index, aChunkList.head); 4285 _chunks_in_chunkmanager += aChunkList.num; 4286 _words_in_chunkmanager += aChunkList.size; 4287 // After all chunks are returned, check that they are now tagged free. 4288 for (int i = 0; i < aChunkList.num; i ++) { 4289 assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4290 } 4291 assert_counters(); 4292 _cm.locked_verify(); 4293 return aChunkList.num; 4294 } 4295 4296 public: 4297 4298 ChunkManagerReturnTestImpl() 4299 : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) 4300 , _cm(SpecializedChunk, SmallChunk, MediumChunk) 4301 , _chunks_in_chunkmanager(0) 4302 , _words_in_chunkmanager(0) 4303 { 4304 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4305 // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are 4306 // "in use", because not yet added to any chunk manager. 4307 _vsn.initialize(); 4308 _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words()); 4309 for (int i = 0; i < num_chunks; i ++) { 4310 const size_t size = get_random_chunk_size(); 4311 _pool[i] = _vsn.get_chunk_vs(size); 4312 assert(_pool[i] != NULL, "allocation failed"); 4313 } 4314 assert_counters(); 4315 _cm.locked_verify(); 4316 } 4317 4318 // Test entry point. 4319 // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat. 4320 // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected 4321 // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between 4322 // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the 4323 // chunks manager, thereby emptying or filling it completely. 4324 void do_test(float phase_length_factor) { 4325 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4326 assert_counters(); 4327 // Execute n operations, and operation being the move of a single chunk to/from the chunk manager. 4328 const int num_max_ops = num_chunks * 100; 4329 int num_ops = num_max_ops; 4330 const int average_phase_length = (int)(phase_length_factor * num_chunks); 4331 int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4332 bool return_phase = true; 4333 while (num_ops > 0) { 4334 int chunks_moved = 0; 4335 if (return_phase) { 4336 // Randomly switch between returning a single chunk or a random length chunk list. 4337 if (os::random() % 2 == 0) { 4338 if (return_single_random_chunk_to_chunkmanager()) { 4339 chunks_moved = 1; 4340 } 4341 } else { 4342 const int list_length = MAX2(1, (os::random() % num_ops_until_switch)); 4343 chunks_moved = return_random_chunk_list_to_chunkmanager(list_length); 4344 } 4345 } else { 4346 // Breath out. 4347 if (take_single_random_chunk_from_chunkmanager()) { 4348 chunks_moved = 1; 4349 } 4350 } 4351 num_ops -= chunks_moved; 4352 num_ops_until_switch -= chunks_moved; 4353 if (chunks_moved == 0 || num_ops_until_switch <= 0) { 4354 return_phase = !return_phase; 4355 num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4356 } 4357 } 4358 } 4359 }; 4360 4361 void* setup_chunkmanager_returntests() { 4362 ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl(); 4363 return p; 4364 } 4365 4366 void teardown_chunkmanager_returntests(void* p) { 4367 delete (ChunkManagerReturnTestImpl*) p; 4368 } 4369 4370 void run_chunkmanager_returntests(void* p, float phase_length) { 4371 ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p; 4372 test->do_test(phase_length); 4373 } 4374 4375 // The following test is placed here instead of a gtest / unittest file 4376 // because the ChunkManager class is only available in this file. 4377 class SpaceManagerTest : AllStatic { 4378 friend void SpaceManager_test_adjust_initial_chunk_size(); 4379 4380 static void test_adjust_initial_chunk_size(bool is_class) { 4381 const size_t smallest = SpaceManager::smallest_chunk_size(is_class); 4382 const size_t normal = SpaceManager::small_chunk_size(is_class); 4383 const size_t medium = SpaceManager::medium_chunk_size(is_class); 4384 4385 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \ 4386 do { \ 4387 size_t v = value; \ 4388 size_t e = expected; \ 4389 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \ 4390 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \ 4391 } while (0) 4392 4393 // Smallest (specialized) 4394 test_adjust_initial_chunk_size(1, smallest, is_class); 4395 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class); 4396 test_adjust_initial_chunk_size(smallest, smallest, is_class); 4397 4398 // Small 4399 test_adjust_initial_chunk_size(smallest + 1, normal, is_class); 4400 test_adjust_initial_chunk_size(normal - 1, normal, is_class); 4401 test_adjust_initial_chunk_size(normal, normal, is_class); 4402 4403 // Medium 4404 test_adjust_initial_chunk_size(normal + 1, medium, is_class); 4405 test_adjust_initial_chunk_size(medium - 1, medium, is_class); 4406 test_adjust_initial_chunk_size(medium, medium, is_class); 4407 4408 // Humongous 4409 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class); 4410 4411 #undef test_adjust_initial_chunk_size 4412 } 4413 4414 static void test_adjust_initial_chunk_size() { 4415 test_adjust_initial_chunk_size(false); 4416 test_adjust_initial_chunk_size(true); 4417 } 4418 }; 4419 4420 void SpaceManager_test_adjust_initial_chunk_size() { 4421 SpaceManagerTest::test_adjust_initial_chunk_size(); 4422 } 4423 4424 #endif // ASSERT