1 /* 2 * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "aot/aotLoader.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectorPolicy.hpp" 28 #include "gc/shared/gcLocker.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/binaryTreeDictionary.hpp" 33 #include "memory/filemap.hpp" 34 #include "memory/freeList.hpp" 35 #include "memory/metachunk.hpp" 36 #include "memory/metaspace.hpp" 37 #include "memory/metaspaceGCThresholdUpdater.hpp" 38 #include "memory/metaspaceShared.hpp" 39 #include "memory/metaspaceTracer.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/globals.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutex.hpp" 47 #include "runtime/orderAccess.inline.hpp" 48 #include "services/memTracker.hpp" 49 #include "services/memoryService.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/debug.hpp" 53 #include "utilities/macros.hpp" 54 55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 57 58 // Set this constant to enable slow integrity checking of the free chunk lists 59 const bool metaspace_slow_verify = false; 60 61 size_t const allocation_from_dictionary_limit = 4 * K; 62 63 MetaWord* last_allocated = 0; 64 65 size_t Metaspace::_compressed_class_space_size; 66 const MetaspaceTracer* Metaspace::_tracer = NULL; 67 68 DEBUG_ONLY(bool Metaspace::_frozen = false;) 69 70 // Used in declarations in SpaceManager and ChunkManager 71 enum ChunkIndex { 72 ZeroIndex = 0, 73 SpecializedIndex = ZeroIndex, 74 SmallIndex = SpecializedIndex + 1, 75 MediumIndex = SmallIndex + 1, 76 HumongousIndex = MediumIndex + 1, 77 NumberOfFreeLists = 3, 78 NumberOfInUseLists = 4 79 }; 80 81 // Helper, returns a descriptive name for the given index. 82 static const char* chunk_size_name(ChunkIndex index) { 83 switch (index) { 84 case SpecializedIndex: 85 return "specialized"; 86 case SmallIndex: 87 return "small"; 88 case MediumIndex: 89 return "medium"; 90 case HumongousIndex: 91 return "humongous"; 92 default: 93 return "Invalid index"; 94 } 95 } 96 97 enum ChunkSizes { // in words. 98 ClassSpecializedChunk = 128, 99 SpecializedChunk = 128, 100 ClassSmallChunk = 256, 101 SmallChunk = 512, 102 ClassMediumChunk = 4 * K, 103 MediumChunk = 8 * K 104 }; 105 106 static ChunkIndex next_chunk_index(ChunkIndex i) { 107 assert(i < NumberOfInUseLists, "Out of bound"); 108 return (ChunkIndex) (i+1); 109 } 110 111 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 112 uint MetaspaceGC::_shrink_factor = 0; 113 bool MetaspaceGC::_should_concurrent_collect = false; 114 115 typedef class FreeList<Metachunk> ChunkList; 116 117 // Manages the global free lists of chunks. 118 class ChunkManager : public CHeapObj<mtInternal> { 119 friend class TestVirtualSpaceNodeTest; 120 121 // Free list of chunks of different sizes. 122 // SpecializedChunk 123 // SmallChunk 124 // MediumChunk 125 ChunkList _free_chunks[NumberOfFreeLists]; 126 127 // Return non-humongous chunk list by its index. 128 ChunkList* free_chunks(ChunkIndex index); 129 130 // Returns non-humongous chunk list for the given chunk word size. 131 ChunkList* find_free_chunks_list(size_t word_size); 132 133 // HumongousChunk 134 ChunkTreeDictionary _humongous_dictionary; 135 136 // Returns the humongous chunk dictionary. 137 ChunkTreeDictionary* humongous_dictionary() { 138 return &_humongous_dictionary; 139 } 140 141 // Size, in metaspace words, of all chunks managed by this ChunkManager 142 size_t _free_chunks_total; 143 // Number of chunks in this ChunkManager 144 size_t _free_chunks_count; 145 146 // Update counters after a chunk was added or removed removed. 147 void account_for_added_chunk(const Metachunk* c); 148 void account_for_removed_chunk(const Metachunk* c); 149 150 // Debug support 151 152 size_t sum_free_chunks(); 153 size_t sum_free_chunks_count(); 154 155 void locked_verify_free_chunks_total(); 156 void slow_locked_verify_free_chunks_total() { 157 if (metaspace_slow_verify) { 158 locked_verify_free_chunks_total(); 159 } 160 } 161 void locked_verify_free_chunks_count(); 162 void slow_locked_verify_free_chunks_count() { 163 if (metaspace_slow_verify) { 164 locked_verify_free_chunks_count(); 165 } 166 } 167 void verify_free_chunks_count(); 168 169 struct ChunkManagerStatistics { 170 size_t num_by_type[NumberOfFreeLists]; 171 size_t single_size_by_type[NumberOfFreeLists]; 172 size_t total_size_by_type[NumberOfFreeLists]; 173 size_t num_humongous_chunks; 174 size_t total_size_humongous_chunks; 175 }; 176 177 void locked_get_statistics(ChunkManagerStatistics* stat) const; 178 void get_statistics(ChunkManagerStatistics* stat) const; 179 static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out); 180 181 public: 182 183 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 184 : _free_chunks_total(0), _free_chunks_count(0) { 185 _free_chunks[SpecializedIndex].set_size(specialized_size); 186 _free_chunks[SmallIndex].set_size(small_size); 187 _free_chunks[MediumIndex].set_size(medium_size); 188 } 189 190 // add or delete (return) a chunk to the global freelist. 191 Metachunk* chunk_freelist_allocate(size_t word_size); 192 193 // Map a size to a list index assuming that there are lists 194 // for special, small, medium, and humongous chunks. 195 ChunkIndex list_index(size_t size); 196 197 // Map a given index to the chunk size. 198 size_t size_by_index(ChunkIndex index) const; 199 200 // Take a chunk from the ChunkManager. The chunk is expected to be in 201 // the chunk manager (the freelist if non-humongous, the dictionary if 202 // humongous). 203 void remove_chunk(Metachunk* chunk); 204 205 // Return a single chunk of type index to the ChunkManager. 206 void return_single_chunk(ChunkIndex index, Metachunk* chunk); 207 208 // Add the simple linked list of chunks to the freelist of chunks 209 // of type index. 210 void return_chunk_list(ChunkIndex index, Metachunk* chunk); 211 212 // Total of the space in the free chunks list 213 size_t free_chunks_total_words(); 214 size_t free_chunks_total_bytes(); 215 216 // Number of chunks in the free chunks list 217 size_t free_chunks_count(); 218 219 // Remove from a list by size. Selects list based on size of chunk. 220 Metachunk* free_chunks_get(size_t chunk_word_size); 221 222 #define index_bounds_check(index) \ 223 assert(index == SpecializedIndex || \ 224 index == SmallIndex || \ 225 index == MediumIndex || \ 226 index == HumongousIndex, "Bad index: %d", (int) index) 227 228 size_t num_free_chunks(ChunkIndex index) const { 229 index_bounds_check(index); 230 231 if (index == HumongousIndex) { 232 return _humongous_dictionary.total_free_blocks(); 233 } 234 235 ssize_t count = _free_chunks[index].count(); 236 return count == -1 ? 0 : (size_t) count; 237 } 238 239 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 240 index_bounds_check(index); 241 242 size_t word_size = 0; 243 if (index == HumongousIndex) { 244 word_size = _humongous_dictionary.total_size(); 245 } else { 246 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 247 word_size = size_per_chunk_in_words * num_free_chunks(index); 248 } 249 250 return word_size * BytesPerWord; 251 } 252 253 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 254 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 255 num_free_chunks(SmallIndex), 256 num_free_chunks(MediumIndex), 257 num_free_chunks(HumongousIndex), 258 size_free_chunks_in_bytes(SpecializedIndex), 259 size_free_chunks_in_bytes(SmallIndex), 260 size_free_chunks_in_bytes(MediumIndex), 261 size_free_chunks_in_bytes(HumongousIndex)); 262 } 263 264 // Debug support 265 void verify(); 266 void slow_verify() { 267 if (metaspace_slow_verify) { 268 verify(); 269 } 270 } 271 void locked_verify(); 272 void slow_locked_verify() { 273 if (metaspace_slow_verify) { 274 locked_verify(); 275 } 276 } 277 void verify_free_chunks_total(); 278 279 void locked_print_free_chunks(outputStream* st); 280 void locked_print_sum_free_chunks(outputStream* st); 281 282 void print_on(outputStream* st) const; 283 284 // Prints composition for both non-class and (if available) 285 // class chunk manager. 286 static void print_all_chunkmanagers(outputStream* out); 287 }; 288 289 class SmallBlocks : public CHeapObj<mtClass> { 290 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 291 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 292 293 private: 294 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 295 296 FreeList<Metablock>& list_at(size_t word_size) { 297 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 298 return _small_lists[word_size - _small_block_min_size]; 299 } 300 301 public: 302 SmallBlocks() { 303 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 304 uint k = i - _small_block_min_size; 305 _small_lists[k].set_size(i); 306 } 307 } 308 309 size_t total_size() const { 310 size_t result = 0; 311 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 312 uint k = i - _small_block_min_size; 313 result = result + _small_lists[k].count() * _small_lists[k].size(); 314 } 315 return result; 316 } 317 318 static uint small_block_max_size() { return _small_block_max_size; } 319 static uint small_block_min_size() { return _small_block_min_size; } 320 321 MetaWord* get_block(size_t word_size) { 322 if (list_at(word_size).count() > 0) { 323 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 324 return new_block; 325 } else { 326 return NULL; 327 } 328 } 329 void return_block(Metablock* free_chunk, size_t word_size) { 330 list_at(word_size).return_chunk_at_head(free_chunk, false); 331 assert(list_at(word_size).count() > 0, "Should have a chunk"); 332 } 333 334 void print_on(outputStream* st) const { 335 st->print_cr("SmallBlocks:"); 336 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 337 uint k = i - _small_block_min_size; 338 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 339 } 340 } 341 }; 342 343 // Used to manage the free list of Metablocks (a block corresponds 344 // to the allocation of a quantum of metadata). 345 class BlockFreelist : public CHeapObj<mtClass> { 346 BlockTreeDictionary* const _dictionary; 347 SmallBlocks* _small_blocks; 348 349 // Only allocate and split from freelist if the size of the allocation 350 // is at least 1/4th the size of the available block. 351 const static int WasteMultiplier = 4; 352 353 // Accessors 354 BlockTreeDictionary* dictionary() const { return _dictionary; } 355 SmallBlocks* small_blocks() { 356 if (_small_blocks == NULL) { 357 _small_blocks = new SmallBlocks(); 358 } 359 return _small_blocks; 360 } 361 362 public: 363 BlockFreelist(); 364 ~BlockFreelist(); 365 366 // Get and return a block to the free list 367 MetaWord* get_block(size_t word_size); 368 void return_block(MetaWord* p, size_t word_size); 369 370 size_t total_size() const { 371 size_t result = dictionary()->total_size(); 372 if (_small_blocks != NULL) { 373 result = result + _small_blocks->total_size(); 374 } 375 return result; 376 } 377 378 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 379 void print_on(outputStream* st) const; 380 }; 381 382 // A VirtualSpaceList node. 383 class VirtualSpaceNode : public CHeapObj<mtClass> { 384 friend class VirtualSpaceList; 385 386 // Link to next VirtualSpaceNode 387 VirtualSpaceNode* _next; 388 389 // total in the VirtualSpace 390 MemRegion _reserved; 391 ReservedSpace _rs; 392 VirtualSpace _virtual_space; 393 MetaWord* _top; 394 // count of chunks contained in this VirtualSpace 395 uintx _container_count; 396 397 // Convenience functions to access the _virtual_space 398 char* low() const { return virtual_space()->low(); } 399 char* high() const { return virtual_space()->high(); } 400 401 // The first Metachunk will be allocated at the bottom of the 402 // VirtualSpace 403 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 404 405 // Committed but unused space in the virtual space 406 size_t free_words_in_vs() const; 407 public: 408 409 VirtualSpaceNode(size_t byte_size); 410 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 411 ~VirtualSpaceNode(); 412 413 // Convenience functions for logical bottom and end 414 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 415 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 416 417 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 418 419 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 420 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 421 422 bool is_pre_committed() const { return _virtual_space.special(); } 423 424 // address of next available space in _virtual_space; 425 // Accessors 426 VirtualSpaceNode* next() { return _next; } 427 void set_next(VirtualSpaceNode* v) { _next = v; } 428 429 void set_reserved(MemRegion const v) { _reserved = v; } 430 void set_top(MetaWord* v) { _top = v; } 431 432 // Accessors 433 MemRegion* reserved() { return &_reserved; } 434 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 435 436 // Returns true if "word_size" is available in the VirtualSpace 437 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 438 439 MetaWord* top() const { return _top; } 440 void inc_top(size_t word_size) { _top += word_size; } 441 442 uintx container_count() { return _container_count; } 443 void inc_container_count(); 444 void dec_container_count(); 445 #ifdef ASSERT 446 uintx container_count_slow(); 447 void verify_container_count(); 448 #endif 449 450 // used and capacity in this single entry in the list 451 size_t used_words_in_vs() const; 452 size_t capacity_words_in_vs() const; 453 454 bool initialize(); 455 456 // get space from the virtual space 457 Metachunk* take_from_committed(size_t chunk_word_size); 458 459 // Allocate a chunk from the virtual space and return it. 460 Metachunk* get_chunk_vs(size_t chunk_word_size); 461 462 // Expands/shrinks the committed space in a virtual space. Delegates 463 // to Virtualspace 464 bool expand_by(size_t min_words, size_t preferred_words); 465 466 // In preparation for deleting this node, remove all the chunks 467 // in the node from any freelist. 468 void purge(ChunkManager* chunk_manager); 469 470 // If an allocation doesn't fit in the current node a new node is created. 471 // Allocate chunks out of the remaining committed space in this node 472 // to avoid wasting that memory. 473 // This always adds up because all the chunk sizes are multiples of 474 // the smallest chunk size. 475 void retire(ChunkManager* chunk_manager); 476 477 #ifdef ASSERT 478 // Debug support 479 void mangle(); 480 #endif 481 482 void print_on(outputStream* st) const; 483 }; 484 485 #define assert_is_aligned(value, alignment) \ 486 assert(is_aligned((value), (alignment)), \ 487 SIZE_FORMAT_HEX " is not aligned to " \ 488 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment)) 489 490 // Decide if large pages should be committed when the memory is reserved. 491 static bool should_commit_large_pages_when_reserving(size_t bytes) { 492 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 493 size_t words = bytes / BytesPerWord; 494 bool is_class = false; // We never reserve large pages for the class space. 495 if (MetaspaceGC::can_expand(words, is_class) && 496 MetaspaceGC::allowed_expansion() >= words) { 497 return true; 498 } 499 } 500 501 return false; 502 } 503 504 // byte_size is the size of the associated virtualspace. 505 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 506 assert_is_aligned(bytes, Metaspace::reserve_alignment()); 507 bool large_pages = should_commit_large_pages_when_reserving(bytes); 508 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 509 510 if (_rs.is_reserved()) { 511 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 512 assert(_rs.size() != 0, "Catch if we get a 0 size"); 513 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); 514 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); 515 516 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 517 } 518 } 519 520 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 521 Metachunk* chunk = first_chunk(); 522 Metachunk* invalid_chunk = (Metachunk*) top(); 523 while (chunk < invalid_chunk ) { 524 assert(chunk->is_tagged_free(), "Should be tagged free"); 525 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 526 chunk_manager->remove_chunk(chunk); 527 assert(chunk->next() == NULL && 528 chunk->prev() == NULL, 529 "Was not removed from its list"); 530 chunk = (Metachunk*) next; 531 } 532 } 533 534 #ifdef ASSERT 535 uintx VirtualSpaceNode::container_count_slow() { 536 uintx count = 0; 537 Metachunk* chunk = first_chunk(); 538 Metachunk* invalid_chunk = (Metachunk*) top(); 539 while (chunk < invalid_chunk ) { 540 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 541 // Don't count the chunks on the free lists. Those are 542 // still part of the VirtualSpaceNode but not currently 543 // counted. 544 if (!chunk->is_tagged_free()) { 545 count++; 546 } 547 chunk = (Metachunk*) next; 548 } 549 return count; 550 } 551 #endif 552 553 // List of VirtualSpaces for metadata allocation. 554 class VirtualSpaceList : public CHeapObj<mtClass> { 555 friend class VirtualSpaceNode; 556 557 enum VirtualSpaceSizes { 558 VirtualSpaceSize = 256 * K 559 }; 560 561 // Head of the list 562 VirtualSpaceNode* _virtual_space_list; 563 // virtual space currently being used for allocations 564 VirtualSpaceNode* _current_virtual_space; 565 566 // Is this VirtualSpaceList used for the compressed class space 567 bool _is_class; 568 569 // Sum of reserved and committed memory in the virtual spaces 570 size_t _reserved_words; 571 size_t _committed_words; 572 573 // Number of virtual spaces 574 size_t _virtual_space_count; 575 576 ~VirtualSpaceList(); 577 578 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 579 580 void set_virtual_space_list(VirtualSpaceNode* v) { 581 _virtual_space_list = v; 582 } 583 void set_current_virtual_space(VirtualSpaceNode* v) { 584 _current_virtual_space = v; 585 } 586 587 void link_vs(VirtualSpaceNode* new_entry); 588 589 // Get another virtual space and add it to the list. This 590 // is typically prompted by a failed attempt to allocate a chunk 591 // and is typically followed by the allocation of a chunk. 592 bool create_new_virtual_space(size_t vs_word_size); 593 594 // Chunk up the unused committed space in the current 595 // virtual space and add the chunks to the free list. 596 void retire_current_virtual_space(); 597 598 public: 599 VirtualSpaceList(size_t word_size); 600 VirtualSpaceList(ReservedSpace rs); 601 602 size_t free_bytes(); 603 604 Metachunk* get_new_chunk(size_t chunk_word_size, 605 size_t suggested_commit_granularity); 606 607 bool expand_node_by(VirtualSpaceNode* node, 608 size_t min_words, 609 size_t preferred_words); 610 611 bool expand_by(size_t min_words, 612 size_t preferred_words); 613 614 VirtualSpaceNode* current_virtual_space() { 615 return _current_virtual_space; 616 } 617 618 bool is_class() const { return _is_class; } 619 620 bool initialization_succeeded() { return _virtual_space_list != NULL; } 621 622 size_t reserved_words() { return _reserved_words; } 623 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 624 size_t committed_words() { return _committed_words; } 625 size_t committed_bytes() { return committed_words() * BytesPerWord; } 626 627 void inc_reserved_words(size_t v); 628 void dec_reserved_words(size_t v); 629 void inc_committed_words(size_t v); 630 void dec_committed_words(size_t v); 631 void inc_virtual_space_count(); 632 void dec_virtual_space_count(); 633 634 bool contains(const void* ptr); 635 636 // Unlink empty VirtualSpaceNodes and free it. 637 void purge(ChunkManager* chunk_manager); 638 639 void print_on(outputStream* st) const; 640 641 class VirtualSpaceListIterator : public StackObj { 642 VirtualSpaceNode* _virtual_spaces; 643 public: 644 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 645 _virtual_spaces(virtual_spaces) {} 646 647 bool repeat() { 648 return _virtual_spaces != NULL; 649 } 650 651 VirtualSpaceNode* get_next() { 652 VirtualSpaceNode* result = _virtual_spaces; 653 if (_virtual_spaces != NULL) { 654 _virtual_spaces = _virtual_spaces->next(); 655 } 656 return result; 657 } 658 }; 659 }; 660 661 class Metadebug : AllStatic { 662 // Debugging support for Metaspaces 663 static int _allocation_fail_alot_count; 664 665 public: 666 667 static void init_allocation_fail_alot_count(); 668 #ifdef ASSERT 669 static bool test_metadata_failure(); 670 #endif 671 }; 672 673 int Metadebug::_allocation_fail_alot_count = 0; 674 675 // SpaceManager - used by Metaspace to handle allocations 676 class SpaceManager : public CHeapObj<mtClass> { 677 friend class Metaspace; 678 friend class Metadebug; 679 680 private: 681 682 // protects allocations 683 Mutex* const _lock; 684 685 // Type of metadata allocated. 686 Metaspace::MetadataType _mdtype; 687 688 // List of chunks in use by this SpaceManager. Allocations 689 // are done from the current chunk. The list is used for deallocating 690 // chunks when the SpaceManager is freed. 691 Metachunk* _chunks_in_use[NumberOfInUseLists]; 692 Metachunk* _current_chunk; 693 694 // Maximum number of small chunks to allocate to a SpaceManager 695 static uint const _small_chunk_limit; 696 697 // Sum of all space in allocated chunks 698 size_t _allocated_blocks_words; 699 700 // Sum of all allocated chunks 701 size_t _allocated_chunks_words; 702 size_t _allocated_chunks_count; 703 704 // Free lists of blocks are per SpaceManager since they 705 // are assumed to be in chunks in use by the SpaceManager 706 // and all chunks in use by a SpaceManager are freed when 707 // the class loader using the SpaceManager is collected. 708 BlockFreelist* _block_freelists; 709 710 // protects virtualspace and chunk expansions 711 static const char* _expand_lock_name; 712 static const int _expand_lock_rank; 713 static Mutex* const _expand_lock; 714 715 private: 716 // Accessors 717 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 718 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 719 _chunks_in_use[index] = v; 720 } 721 722 BlockFreelist* block_freelists() const { return _block_freelists; } 723 724 Metaspace::MetadataType mdtype() { return _mdtype; } 725 726 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 727 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 728 729 Metachunk* current_chunk() const { return _current_chunk; } 730 void set_current_chunk(Metachunk* v) { 731 _current_chunk = v; 732 } 733 734 Metachunk* find_current_chunk(size_t word_size); 735 736 // Add chunk to the list of chunks in use 737 void add_chunk(Metachunk* v, bool make_current); 738 void retire_current_chunk(); 739 740 Mutex* lock() const { return _lock; } 741 742 protected: 743 void initialize(); 744 745 public: 746 SpaceManager(Metaspace::MetadataType mdtype, 747 Mutex* lock); 748 ~SpaceManager(); 749 750 enum ChunkMultiples { 751 MediumChunkMultiple = 4 752 }; 753 754 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } 755 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } 756 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } 757 758 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } 759 760 // Accessors 761 bool is_class() const { return _mdtype == Metaspace::ClassType; } 762 763 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } 764 size_t small_chunk_size() const { return small_chunk_size(is_class()); } 765 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } 766 767 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } 768 769 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } 770 771 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 772 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 773 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 774 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 775 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 776 777 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 778 779 static Mutex* expand_lock() { return _expand_lock; } 780 781 // Increment the per Metaspace and global running sums for Metachunks 782 // by the given size. This is used when a Metachunk to added to 783 // the in-use list. 784 void inc_size_metrics(size_t words); 785 // Increment the per Metaspace and global running sums Metablocks by the given 786 // size. This is used when a Metablock is allocated. 787 void inc_used_metrics(size_t words); 788 // Delete the portion of the running sums for this SpaceManager. That is, 789 // the globals running sums for the Metachunks and Metablocks are 790 // decremented for all the Metachunks in-use by this SpaceManager. 791 void dec_total_from_size_metrics(); 792 793 // Adjust the initial chunk size to match one of the fixed chunk list sizes, 794 // or return the unadjusted size if the requested size is humongous. 795 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); 796 size_t adjust_initial_chunk_size(size_t requested) const; 797 798 // Get the initial chunks size for this metaspace type. 799 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; 800 801 size_t sum_capacity_in_chunks_in_use() const; 802 size_t sum_used_in_chunks_in_use() const; 803 size_t sum_free_in_chunks_in_use() const; 804 size_t sum_waste_in_chunks_in_use() const; 805 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 806 807 size_t sum_count_in_chunks_in_use(); 808 size_t sum_count_in_chunks_in_use(ChunkIndex i); 809 810 Metachunk* get_new_chunk(size_t chunk_word_size); 811 812 // Block allocation and deallocation. 813 // Allocates a block from the current chunk 814 MetaWord* allocate(size_t word_size); 815 // Allocates a block from a small chunk 816 MetaWord* get_small_chunk_and_allocate(size_t word_size); 817 818 // Helper for allocations 819 MetaWord* allocate_work(size_t word_size); 820 821 // Returns a block to the per manager freelist 822 void deallocate(MetaWord* p, size_t word_size); 823 824 // Based on the allocation size and a minimum chunk size, 825 // returned chunk size (for expanding space for chunk allocation). 826 size_t calc_chunk_size(size_t allocation_word_size); 827 828 // Called when an allocation from the current chunk fails. 829 // Gets a new chunk (may require getting a new virtual space), 830 // and allocates from that chunk. 831 MetaWord* grow_and_allocate(size_t word_size); 832 833 // Notify memory usage to MemoryService. 834 void track_metaspace_memory_usage(); 835 836 // debugging support. 837 838 void dump(outputStream* const out) const; 839 void print_on(outputStream* st) const; 840 void locked_print_chunks_in_use_on(outputStream* st) const; 841 842 void verify(); 843 void verify_chunk_size(Metachunk* chunk); 844 #ifdef ASSERT 845 void verify_allocated_blocks_words(); 846 #endif 847 848 // This adjusts the size given to be greater than the minimum allocation size in 849 // words for data in metaspace. Esentially the minimum size is currently 3 words. 850 size_t get_allocation_word_size(size_t word_size) { 851 size_t byte_size = word_size * BytesPerWord; 852 853 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 854 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); 855 856 size_t raw_word_size = raw_bytes_size / BytesPerWord; 857 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 858 859 return raw_word_size; 860 } 861 }; 862 863 uint const SpaceManager::_small_chunk_limit = 4; 864 865 const char* SpaceManager::_expand_lock_name = 866 "SpaceManager chunk allocation lock"; 867 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 868 Mutex* const SpaceManager::_expand_lock = 869 new Mutex(SpaceManager::_expand_lock_rank, 870 SpaceManager::_expand_lock_name, 871 Mutex::_allow_vm_block_flag, 872 Monitor::_safepoint_check_never); 873 874 void VirtualSpaceNode::inc_container_count() { 875 assert_lock_strong(SpaceManager::expand_lock()); 876 _container_count++; 877 } 878 879 void VirtualSpaceNode::dec_container_count() { 880 assert_lock_strong(SpaceManager::expand_lock()); 881 _container_count--; 882 } 883 884 #ifdef ASSERT 885 void VirtualSpaceNode::verify_container_count() { 886 assert(_container_count == container_count_slow(), 887 "Inconsistency in container_count _container_count " UINTX_FORMAT 888 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 889 } 890 #endif 891 892 // BlockFreelist methods 893 894 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 895 896 BlockFreelist::~BlockFreelist() { 897 delete _dictionary; 898 if (_small_blocks != NULL) { 899 delete _small_blocks; 900 } 901 } 902 903 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 904 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 905 906 Metablock* free_chunk = ::new (p) Metablock(word_size); 907 if (word_size < SmallBlocks::small_block_max_size()) { 908 small_blocks()->return_block(free_chunk, word_size); 909 } else { 910 dictionary()->return_chunk(free_chunk); 911 } 912 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 913 SIZE_FORMAT, p2i(free_chunk), word_size); 914 } 915 916 MetaWord* BlockFreelist::get_block(size_t word_size) { 917 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 918 919 // Try small_blocks first. 920 if (word_size < SmallBlocks::small_block_max_size()) { 921 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 922 // this space manager. 923 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 924 if (new_block != NULL) { 925 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 926 p2i(new_block), word_size); 927 return new_block; 928 } 929 } 930 931 if (word_size < BlockFreelist::min_dictionary_size()) { 932 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 933 return NULL; 934 } 935 936 Metablock* free_block = dictionary()->get_chunk(word_size); 937 if (free_block == NULL) { 938 return NULL; 939 } 940 941 const size_t block_size = free_block->size(); 942 if (block_size > WasteMultiplier * word_size) { 943 return_block((MetaWord*)free_block, block_size); 944 return NULL; 945 } 946 947 MetaWord* new_block = (MetaWord*)free_block; 948 assert(block_size >= word_size, "Incorrect size of block from freelist"); 949 const size_t unused = block_size - word_size; 950 if (unused >= SmallBlocks::small_block_min_size()) { 951 return_block(new_block + word_size, unused); 952 } 953 954 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 955 p2i(new_block), word_size); 956 return new_block; 957 } 958 959 void BlockFreelist::print_on(outputStream* st) const { 960 dictionary()->print_free_lists(st); 961 if (_small_blocks != NULL) { 962 _small_blocks->print_on(st); 963 } 964 } 965 966 // VirtualSpaceNode methods 967 968 VirtualSpaceNode::~VirtualSpaceNode() { 969 _rs.release(); 970 #ifdef ASSERT 971 size_t word_size = sizeof(*this) / BytesPerWord; 972 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 973 #endif 974 } 975 976 size_t VirtualSpaceNode::used_words_in_vs() const { 977 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 978 } 979 980 // Space committed in the VirtualSpace 981 size_t VirtualSpaceNode::capacity_words_in_vs() const { 982 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 983 } 984 985 size_t VirtualSpaceNode::free_words_in_vs() const { 986 return pointer_delta(end(), top(), sizeof(MetaWord)); 987 } 988 989 // Allocates the chunk from the virtual space only. 990 // This interface is also used internally for debugging. Not all 991 // chunks removed here are necessarily used for allocation. 992 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 993 // Bottom of the new chunk 994 MetaWord* chunk_limit = top(); 995 assert(chunk_limit != NULL, "Not safe to call this method"); 996 997 // The virtual spaces are always expanded by the 998 // commit granularity to enforce the following condition. 999 // Without this the is_available check will not work correctly. 1000 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 1001 "The committed memory doesn't match the expanded memory."); 1002 1003 if (!is_available(chunk_word_size)) { 1004 LogTarget(Debug, gc, metaspace, freelist) lt; 1005 if (lt.is_enabled()) { 1006 LogStream ls(lt); 1007 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 1008 // Dump some information about the virtual space that is nearly full 1009 print_on(&ls); 1010 } 1011 return NULL; 1012 } 1013 1014 // Take the space (bump top on the current virtual space). 1015 inc_top(chunk_word_size); 1016 1017 // Initialize the chunk 1018 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1019 return result; 1020 } 1021 1022 1023 // Expand the virtual space (commit more of the reserved space) 1024 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1025 size_t min_bytes = min_words * BytesPerWord; 1026 size_t preferred_bytes = preferred_words * BytesPerWord; 1027 1028 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1029 1030 if (uncommitted < min_bytes) { 1031 return false; 1032 } 1033 1034 size_t commit = MIN2(preferred_bytes, uncommitted); 1035 bool result = virtual_space()->expand_by(commit, false); 1036 1037 assert(result, "Failed to commit memory"); 1038 1039 return result; 1040 } 1041 1042 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1043 assert_lock_strong(SpaceManager::expand_lock()); 1044 Metachunk* result = take_from_committed(chunk_word_size); 1045 if (result != NULL) { 1046 inc_container_count(); 1047 } 1048 return result; 1049 } 1050 1051 bool VirtualSpaceNode::initialize() { 1052 1053 if (!_rs.is_reserved()) { 1054 return false; 1055 } 1056 1057 // These are necessary restriction to make sure that the virtual space always 1058 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1059 // aligned only the middle alignment of the VirtualSpace is used. 1060 assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); 1061 assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); 1062 1063 // ReservedSpaces marked as special will have the entire memory 1064 // pre-committed. Setting a committed size will make sure that 1065 // committed_size and actual_committed_size agrees. 1066 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1067 1068 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1069 Metaspace::commit_alignment()); 1070 if (result) { 1071 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1072 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1073 1074 set_top((MetaWord*)virtual_space()->low()); 1075 set_reserved(MemRegion((HeapWord*)_rs.base(), 1076 (HeapWord*)(_rs.base() + _rs.size()))); 1077 1078 assert(reserved()->start() == (HeapWord*) _rs.base(), 1079 "Reserved start was not set properly " PTR_FORMAT 1080 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1081 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1082 "Reserved size was not set properly " SIZE_FORMAT 1083 " != " SIZE_FORMAT, reserved()->word_size(), 1084 _rs.size() / BytesPerWord); 1085 } 1086 1087 return result; 1088 } 1089 1090 void VirtualSpaceNode::print_on(outputStream* st) const { 1091 size_t used = used_words_in_vs(); 1092 size_t capacity = capacity_words_in_vs(); 1093 VirtualSpace* vs = virtual_space(); 1094 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1095 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1096 PTR_FORMAT ", " PTR_FORMAT ")", 1097 p2i(vs), capacity / K, 1098 capacity == 0 ? 0 : used * 100 / capacity, 1099 p2i(bottom()), p2i(top()), p2i(end()), 1100 p2i(vs->high_boundary())); 1101 } 1102 1103 #ifdef ASSERT 1104 void VirtualSpaceNode::mangle() { 1105 size_t word_size = capacity_words_in_vs(); 1106 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1107 } 1108 #endif // ASSERT 1109 1110 // VirtualSpaceList methods 1111 // Space allocated from the VirtualSpace 1112 1113 VirtualSpaceList::~VirtualSpaceList() { 1114 VirtualSpaceListIterator iter(virtual_space_list()); 1115 while (iter.repeat()) { 1116 VirtualSpaceNode* vsl = iter.get_next(); 1117 delete vsl; 1118 } 1119 } 1120 1121 void VirtualSpaceList::inc_reserved_words(size_t v) { 1122 assert_lock_strong(SpaceManager::expand_lock()); 1123 _reserved_words = _reserved_words + v; 1124 } 1125 void VirtualSpaceList::dec_reserved_words(size_t v) { 1126 assert_lock_strong(SpaceManager::expand_lock()); 1127 _reserved_words = _reserved_words - v; 1128 } 1129 1130 #define assert_committed_below_limit() \ 1131 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1132 "Too much committed memory. Committed: " SIZE_FORMAT \ 1133 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1134 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1135 1136 void VirtualSpaceList::inc_committed_words(size_t v) { 1137 assert_lock_strong(SpaceManager::expand_lock()); 1138 _committed_words = _committed_words + v; 1139 1140 assert_committed_below_limit(); 1141 } 1142 void VirtualSpaceList::dec_committed_words(size_t v) { 1143 assert_lock_strong(SpaceManager::expand_lock()); 1144 _committed_words = _committed_words - v; 1145 1146 assert_committed_below_limit(); 1147 } 1148 1149 void VirtualSpaceList::inc_virtual_space_count() { 1150 assert_lock_strong(SpaceManager::expand_lock()); 1151 _virtual_space_count++; 1152 } 1153 void VirtualSpaceList::dec_virtual_space_count() { 1154 assert_lock_strong(SpaceManager::expand_lock()); 1155 _virtual_space_count--; 1156 } 1157 1158 void ChunkManager::remove_chunk(Metachunk* chunk) { 1159 size_t word_size = chunk->word_size(); 1160 ChunkIndex index = list_index(word_size); 1161 if (index != HumongousIndex) { 1162 free_chunks(index)->remove_chunk(chunk); 1163 } else { 1164 humongous_dictionary()->remove_chunk(chunk); 1165 } 1166 1167 // Chunk has been removed from the chunks free list, update counters. 1168 account_for_removed_chunk(chunk); 1169 } 1170 1171 // Walk the list of VirtualSpaceNodes and delete 1172 // nodes with a 0 container_count. Remove Metachunks in 1173 // the node from their respective freelists. 1174 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1175 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1176 assert_lock_strong(SpaceManager::expand_lock()); 1177 // Don't use a VirtualSpaceListIterator because this 1178 // list is being changed and a straightforward use of an iterator is not safe. 1179 VirtualSpaceNode* purged_vsl = NULL; 1180 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1181 VirtualSpaceNode* next_vsl = prev_vsl; 1182 while (next_vsl != NULL) { 1183 VirtualSpaceNode* vsl = next_vsl; 1184 DEBUG_ONLY(vsl->verify_container_count();) 1185 next_vsl = vsl->next(); 1186 // Don't free the current virtual space since it will likely 1187 // be needed soon. 1188 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1189 // Unlink it from the list 1190 if (prev_vsl == vsl) { 1191 // This is the case of the current node being the first node. 1192 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1193 set_virtual_space_list(vsl->next()); 1194 } else { 1195 prev_vsl->set_next(vsl->next()); 1196 } 1197 1198 vsl->purge(chunk_manager); 1199 dec_reserved_words(vsl->reserved_words()); 1200 dec_committed_words(vsl->committed_words()); 1201 dec_virtual_space_count(); 1202 purged_vsl = vsl; 1203 delete vsl; 1204 } else { 1205 prev_vsl = vsl; 1206 } 1207 } 1208 #ifdef ASSERT 1209 if (purged_vsl != NULL) { 1210 // List should be stable enough to use an iterator here. 1211 VirtualSpaceListIterator iter(virtual_space_list()); 1212 while (iter.repeat()) { 1213 VirtualSpaceNode* vsl = iter.get_next(); 1214 assert(vsl != purged_vsl, "Purge of vsl failed"); 1215 } 1216 } 1217 #endif 1218 } 1219 1220 1221 // This function looks at the mmap regions in the metaspace without locking. 1222 // The chunks are added with store ordering and not deleted except for at 1223 // unloading time during a safepoint. 1224 bool VirtualSpaceList::contains(const void* ptr) { 1225 // List should be stable enough to use an iterator here because removing virtual 1226 // space nodes is only allowed at a safepoint. 1227 VirtualSpaceListIterator iter(virtual_space_list()); 1228 while (iter.repeat()) { 1229 VirtualSpaceNode* vsn = iter.get_next(); 1230 if (vsn->contains(ptr)) { 1231 return true; 1232 } 1233 } 1234 return false; 1235 } 1236 1237 void VirtualSpaceList::retire_current_virtual_space() { 1238 assert_lock_strong(SpaceManager::expand_lock()); 1239 1240 VirtualSpaceNode* vsn = current_virtual_space(); 1241 1242 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1243 Metaspace::chunk_manager_metadata(); 1244 1245 vsn->retire(cm); 1246 } 1247 1248 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1249 DEBUG_ONLY(verify_container_count();) 1250 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1251 ChunkIndex index = (ChunkIndex)i; 1252 size_t chunk_size = chunk_manager->size_by_index(index); 1253 1254 while (free_words_in_vs() >= chunk_size) { 1255 Metachunk* chunk = get_chunk_vs(chunk_size); 1256 assert(chunk != NULL, "allocation should have been successful"); 1257 1258 chunk_manager->return_single_chunk(index, chunk); 1259 } 1260 DEBUG_ONLY(verify_container_count();) 1261 } 1262 assert(free_words_in_vs() == 0, "should be empty now"); 1263 } 1264 1265 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1266 _is_class(false), 1267 _virtual_space_list(NULL), 1268 _current_virtual_space(NULL), 1269 _reserved_words(0), 1270 _committed_words(0), 1271 _virtual_space_count(0) { 1272 MutexLockerEx cl(SpaceManager::expand_lock(), 1273 Mutex::_no_safepoint_check_flag); 1274 create_new_virtual_space(word_size); 1275 } 1276 1277 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1278 _is_class(true), 1279 _virtual_space_list(NULL), 1280 _current_virtual_space(NULL), 1281 _reserved_words(0), 1282 _committed_words(0), 1283 _virtual_space_count(0) { 1284 MutexLockerEx cl(SpaceManager::expand_lock(), 1285 Mutex::_no_safepoint_check_flag); 1286 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1287 bool succeeded = class_entry->initialize(); 1288 if (succeeded) { 1289 link_vs(class_entry); 1290 } 1291 } 1292 1293 size_t VirtualSpaceList::free_bytes() { 1294 return current_virtual_space()->free_words_in_vs() * BytesPerWord; 1295 } 1296 1297 // Allocate another meta virtual space and add it to the list. 1298 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1299 assert_lock_strong(SpaceManager::expand_lock()); 1300 1301 if (is_class()) { 1302 assert(false, "We currently don't support more than one VirtualSpace for" 1303 " the compressed class space. The initialization of the" 1304 " CCS uses another code path and should not hit this path."); 1305 return false; 1306 } 1307 1308 if (vs_word_size == 0) { 1309 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1310 return false; 1311 } 1312 1313 // Reserve the space 1314 size_t vs_byte_size = vs_word_size * BytesPerWord; 1315 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1316 1317 // Allocate the meta virtual space and initialize it. 1318 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1319 if (!new_entry->initialize()) { 1320 delete new_entry; 1321 return false; 1322 } else { 1323 assert(new_entry->reserved_words() == vs_word_size, 1324 "Reserved memory size differs from requested memory size"); 1325 // ensure lock-free iteration sees fully initialized node 1326 OrderAccess::storestore(); 1327 link_vs(new_entry); 1328 return true; 1329 } 1330 } 1331 1332 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1333 if (virtual_space_list() == NULL) { 1334 set_virtual_space_list(new_entry); 1335 } else { 1336 current_virtual_space()->set_next(new_entry); 1337 } 1338 set_current_virtual_space(new_entry); 1339 inc_reserved_words(new_entry->reserved_words()); 1340 inc_committed_words(new_entry->committed_words()); 1341 inc_virtual_space_count(); 1342 #ifdef ASSERT 1343 new_entry->mangle(); 1344 #endif 1345 LogTarget(Trace, gc, metaspace) lt; 1346 if (lt.is_enabled()) { 1347 LogStream ls(lt); 1348 VirtualSpaceNode* vsl = current_virtual_space(); 1349 ResourceMark rm; 1350 vsl->print_on(&ls); 1351 } 1352 } 1353 1354 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1355 size_t min_words, 1356 size_t preferred_words) { 1357 size_t before = node->committed_words(); 1358 1359 bool result = node->expand_by(min_words, preferred_words); 1360 1361 size_t after = node->committed_words(); 1362 1363 // after and before can be the same if the memory was pre-committed. 1364 assert(after >= before, "Inconsistency"); 1365 inc_committed_words(after - before); 1366 1367 return result; 1368 } 1369 1370 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1371 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 1372 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 1373 assert(min_words <= preferred_words, "Invalid arguments"); 1374 1375 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1376 return false; 1377 } 1378 1379 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1380 if (allowed_expansion_words < min_words) { 1381 return false; 1382 } 1383 1384 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1385 1386 // Commit more memory from the the current virtual space. 1387 bool vs_expanded = expand_node_by(current_virtual_space(), 1388 min_words, 1389 max_expansion_words); 1390 if (vs_expanded) { 1391 return true; 1392 } 1393 retire_current_virtual_space(); 1394 1395 // Get another virtual space. 1396 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1397 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1398 1399 if (create_new_virtual_space(grow_vs_words)) { 1400 if (current_virtual_space()->is_pre_committed()) { 1401 // The memory was pre-committed, so we are done here. 1402 assert(min_words <= current_virtual_space()->committed_words(), 1403 "The new VirtualSpace was pre-committed, so it" 1404 "should be large enough to fit the alloc request."); 1405 return true; 1406 } 1407 1408 return expand_node_by(current_virtual_space(), 1409 min_words, 1410 max_expansion_words); 1411 } 1412 1413 return false; 1414 } 1415 1416 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 1417 1418 // Allocate a chunk out of the current virtual space. 1419 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1420 1421 if (next != NULL) { 1422 return next; 1423 } 1424 1425 // The expand amount is currently only determined by the requested sizes 1426 // and not how much committed memory is left in the current virtual space. 1427 1428 size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words()); 1429 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 1430 if (min_word_size >= preferred_word_size) { 1431 // Can happen when humongous chunks are allocated. 1432 preferred_word_size = min_word_size; 1433 } 1434 1435 bool expanded = expand_by(min_word_size, preferred_word_size); 1436 if (expanded) { 1437 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1438 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1439 } 1440 1441 return next; 1442 } 1443 1444 void VirtualSpaceList::print_on(outputStream* st) const { 1445 VirtualSpaceListIterator iter(virtual_space_list()); 1446 while (iter.repeat()) { 1447 VirtualSpaceNode* node = iter.get_next(); 1448 node->print_on(st); 1449 } 1450 } 1451 1452 // MetaspaceGC methods 1453 1454 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1455 // Within the VM operation after the GC the attempt to allocate the metadata 1456 // should succeed. If the GC did not free enough space for the metaspace 1457 // allocation, the HWM is increased so that another virtualspace will be 1458 // allocated for the metadata. With perm gen the increase in the perm 1459 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1460 // metaspace policy uses those as the small and large steps for the HWM. 1461 // 1462 // After the GC the compute_new_size() for MetaspaceGC is called to 1463 // resize the capacity of the metaspaces. The current implementation 1464 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1465 // to resize the Java heap by some GC's. New flags can be implemented 1466 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1467 // free space is desirable in the metaspace capacity to decide how much 1468 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1469 // free space is desirable in the metaspace capacity before decreasing 1470 // the HWM. 1471 1472 // Calculate the amount to increase the high water mark (HWM). 1473 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1474 // another expansion is not requested too soon. If that is not 1475 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1476 // If that is still not enough, expand by the size of the allocation 1477 // plus some. 1478 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1479 size_t min_delta = MinMetaspaceExpansion; 1480 size_t max_delta = MaxMetaspaceExpansion; 1481 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 1482 1483 if (delta <= min_delta) { 1484 delta = min_delta; 1485 } else if (delta <= max_delta) { 1486 // Don't want to hit the high water mark on the next 1487 // allocation so make the delta greater than just enough 1488 // for this allocation. 1489 delta = max_delta; 1490 } else { 1491 // This allocation is large but the next ones are probably not 1492 // so increase by the minimum. 1493 delta = delta + min_delta; 1494 } 1495 1496 assert_is_aligned(delta, Metaspace::commit_alignment()); 1497 1498 return delta; 1499 } 1500 1501 size_t MetaspaceGC::capacity_until_GC() { 1502 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1503 assert(value >= MetaspaceSize, "Not initialized properly?"); 1504 return value; 1505 } 1506 1507 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1508 assert_is_aligned(v, Metaspace::commit_alignment()); 1509 1510 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1511 size_t new_value = capacity_until_GC + v; 1512 1513 if (new_value < capacity_until_GC) { 1514 // The addition wrapped around, set new_value to aligned max value. 1515 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 1516 } 1517 1518 intptr_t expected = (intptr_t) capacity_until_GC; 1519 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1520 1521 if (expected != actual) { 1522 return false; 1523 } 1524 1525 if (new_cap_until_GC != NULL) { 1526 *new_cap_until_GC = new_value; 1527 } 1528 if (old_cap_until_GC != NULL) { 1529 *old_cap_until_GC = capacity_until_GC; 1530 } 1531 return true; 1532 } 1533 1534 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1535 assert_is_aligned(v, Metaspace::commit_alignment()); 1536 1537 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1538 } 1539 1540 void MetaspaceGC::initialize() { 1541 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1542 // we can't do a GC during initialization. 1543 _capacity_until_GC = MaxMetaspaceSize; 1544 } 1545 1546 void MetaspaceGC::post_initialize() { 1547 // Reset the high-water mark once the VM initialization is done. 1548 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1549 } 1550 1551 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1552 // Check if the compressed class space is full. 1553 if (is_class && Metaspace::using_class_space()) { 1554 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1555 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1556 return false; 1557 } 1558 } 1559 1560 // Check if the user has imposed a limit on the metaspace memory. 1561 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1562 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1563 return false; 1564 } 1565 1566 return true; 1567 } 1568 1569 size_t MetaspaceGC::allowed_expansion() { 1570 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1571 size_t capacity_until_gc = capacity_until_GC(); 1572 1573 assert(capacity_until_gc >= committed_bytes, 1574 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1575 capacity_until_gc, committed_bytes); 1576 1577 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1578 size_t left_until_GC = capacity_until_gc - committed_bytes; 1579 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1580 1581 return left_to_commit / BytesPerWord; 1582 } 1583 1584 void MetaspaceGC::compute_new_size() { 1585 assert(_shrink_factor <= 100, "invalid shrink factor"); 1586 uint current_shrink_factor = _shrink_factor; 1587 _shrink_factor = 0; 1588 1589 // Using committed_bytes() for used_after_gc is an overestimation, since the 1590 // chunk free lists are included in committed_bytes() and the memory in an 1591 // un-fragmented chunk free list is available for future allocations. 1592 // However, if the chunk free lists becomes fragmented, then the memory may 1593 // not be available for future allocations and the memory is therefore "in use". 1594 // Including the chunk free lists in the definition of "in use" is therefore 1595 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1596 // shrink below committed_bytes() and this has caused serious bugs in the past. 1597 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1598 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1599 1600 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1601 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1602 1603 const double min_tmp = used_after_gc / maximum_used_percentage; 1604 size_t minimum_desired_capacity = 1605 (size_t)MIN2(min_tmp, double(max_uintx)); 1606 // Don't shrink less than the initial generation size 1607 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1608 MetaspaceSize); 1609 1610 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1611 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1612 minimum_free_percentage, maximum_used_percentage); 1613 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1614 1615 1616 size_t shrink_bytes = 0; 1617 if (capacity_until_GC < minimum_desired_capacity) { 1618 // If we have less capacity below the metaspace HWM, then 1619 // increment the HWM. 1620 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1621 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 1622 // Don't expand unless it's significant 1623 if (expand_bytes >= MinMetaspaceExpansion) { 1624 size_t new_capacity_until_GC = 0; 1625 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1626 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1627 1628 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1629 new_capacity_until_GC, 1630 MetaspaceGCThresholdUpdater::ComputeNewSize); 1631 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1632 minimum_desired_capacity / (double) K, 1633 expand_bytes / (double) K, 1634 MinMetaspaceExpansion / (double) K, 1635 new_capacity_until_GC / (double) K); 1636 } 1637 return; 1638 } 1639 1640 // No expansion, now see if we want to shrink 1641 // We would never want to shrink more than this 1642 assert(capacity_until_GC >= minimum_desired_capacity, 1643 SIZE_FORMAT " >= " SIZE_FORMAT, 1644 capacity_until_GC, minimum_desired_capacity); 1645 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1646 1647 // Should shrinking be considered? 1648 if (MaxMetaspaceFreeRatio < 100) { 1649 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1650 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1651 const double max_tmp = used_after_gc / minimum_used_percentage; 1652 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1653 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1654 MetaspaceSize); 1655 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1656 maximum_free_percentage, minimum_used_percentage); 1657 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1658 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1659 1660 assert(minimum_desired_capacity <= maximum_desired_capacity, 1661 "sanity check"); 1662 1663 if (capacity_until_GC > maximum_desired_capacity) { 1664 // Capacity too large, compute shrinking size 1665 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1666 // We don't want shrink all the way back to initSize if people call 1667 // System.gc(), because some programs do that between "phases" and then 1668 // we'd just have to grow the heap up again for the next phase. So we 1669 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1670 // on the third call, and 100% by the fourth call. But if we recompute 1671 // size without shrinking, it goes back to 0%. 1672 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1673 1674 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 1675 1676 assert(shrink_bytes <= max_shrink_bytes, 1677 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1678 shrink_bytes, max_shrink_bytes); 1679 if (current_shrink_factor == 0) { 1680 _shrink_factor = 10; 1681 } else { 1682 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1683 } 1684 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1685 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1686 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1687 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1688 } 1689 } 1690 1691 // Don't shrink unless it's significant 1692 if (shrink_bytes >= MinMetaspaceExpansion && 1693 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1694 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1695 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1696 new_capacity_until_GC, 1697 MetaspaceGCThresholdUpdater::ComputeNewSize); 1698 } 1699 } 1700 1701 // Metadebug methods 1702 1703 void Metadebug::init_allocation_fail_alot_count() { 1704 if (MetadataAllocationFailALot) { 1705 _allocation_fail_alot_count = 1706 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1707 } 1708 } 1709 1710 #ifdef ASSERT 1711 bool Metadebug::test_metadata_failure() { 1712 if (MetadataAllocationFailALot && 1713 Threads::is_vm_complete()) { 1714 if (_allocation_fail_alot_count > 0) { 1715 _allocation_fail_alot_count--; 1716 } else { 1717 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1718 init_allocation_fail_alot_count(); 1719 return true; 1720 } 1721 } 1722 return false; 1723 } 1724 #endif 1725 1726 // ChunkManager methods 1727 1728 size_t ChunkManager::free_chunks_total_words() { 1729 return _free_chunks_total; 1730 } 1731 1732 size_t ChunkManager::free_chunks_total_bytes() { 1733 return free_chunks_total_words() * BytesPerWord; 1734 } 1735 1736 // Update internal accounting after a chunk was added 1737 void ChunkManager::account_for_added_chunk(const Metachunk* c) { 1738 assert_lock_strong(SpaceManager::expand_lock()); 1739 _free_chunks_count ++; 1740 _free_chunks_total += c->word_size(); 1741 } 1742 1743 // Update internal accounting after a chunk was removed 1744 void ChunkManager::account_for_removed_chunk(const Metachunk* c) { 1745 assert_lock_strong(SpaceManager::expand_lock()); 1746 assert(_free_chunks_count >= 1, 1747 "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); 1748 assert(_free_chunks_total >= c->word_size(), 1749 "ChunkManager::_free_chunks_total: about to go negative" 1750 "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size()); 1751 _free_chunks_count --; 1752 _free_chunks_total -= c->word_size(); 1753 } 1754 1755 size_t ChunkManager::free_chunks_count() { 1756 #ifdef ASSERT 1757 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1758 MutexLockerEx cl(SpaceManager::expand_lock(), 1759 Mutex::_no_safepoint_check_flag); 1760 // This lock is only needed in debug because the verification 1761 // of the _free_chunks_totals walks the list of free chunks 1762 slow_locked_verify_free_chunks_count(); 1763 } 1764 #endif 1765 return _free_chunks_count; 1766 } 1767 1768 ChunkIndex ChunkManager::list_index(size_t size) { 1769 if (size_by_index(SpecializedIndex) == size) { 1770 return SpecializedIndex; 1771 } 1772 if (size_by_index(SmallIndex) == size) { 1773 return SmallIndex; 1774 } 1775 const size_t med_size = size_by_index(MediumIndex); 1776 if (med_size == size) { 1777 return MediumIndex; 1778 } 1779 1780 assert(size > med_size, "Not a humongous chunk"); 1781 return HumongousIndex; 1782 } 1783 1784 size_t ChunkManager::size_by_index(ChunkIndex index) const { 1785 index_bounds_check(index); 1786 assert(index != HumongousIndex, "Do not call for humongous chunks."); 1787 return _free_chunks[index].size(); 1788 } 1789 1790 void ChunkManager::locked_verify_free_chunks_total() { 1791 assert_lock_strong(SpaceManager::expand_lock()); 1792 assert(sum_free_chunks() == _free_chunks_total, 1793 "_free_chunks_total " SIZE_FORMAT " is not the" 1794 " same as sum " SIZE_FORMAT, _free_chunks_total, 1795 sum_free_chunks()); 1796 } 1797 1798 void ChunkManager::verify_free_chunks_total() { 1799 MutexLockerEx cl(SpaceManager::expand_lock(), 1800 Mutex::_no_safepoint_check_flag); 1801 locked_verify_free_chunks_total(); 1802 } 1803 1804 void ChunkManager::locked_verify_free_chunks_count() { 1805 assert_lock_strong(SpaceManager::expand_lock()); 1806 assert(sum_free_chunks_count() == _free_chunks_count, 1807 "_free_chunks_count " SIZE_FORMAT " is not the" 1808 " same as sum " SIZE_FORMAT, _free_chunks_count, 1809 sum_free_chunks_count()); 1810 } 1811 1812 void ChunkManager::verify_free_chunks_count() { 1813 #ifdef ASSERT 1814 MutexLockerEx cl(SpaceManager::expand_lock(), 1815 Mutex::_no_safepoint_check_flag); 1816 locked_verify_free_chunks_count(); 1817 #endif 1818 } 1819 1820 void ChunkManager::verify() { 1821 MutexLockerEx cl(SpaceManager::expand_lock(), 1822 Mutex::_no_safepoint_check_flag); 1823 locked_verify(); 1824 } 1825 1826 void ChunkManager::locked_verify() { 1827 locked_verify_free_chunks_count(); 1828 locked_verify_free_chunks_total(); 1829 } 1830 1831 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1832 assert_lock_strong(SpaceManager::expand_lock()); 1833 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1834 _free_chunks_total, _free_chunks_count); 1835 } 1836 1837 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1838 assert_lock_strong(SpaceManager::expand_lock()); 1839 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1840 sum_free_chunks(), sum_free_chunks_count()); 1841 } 1842 1843 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1844 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 1845 "Bad index: %d", (int)index); 1846 1847 return &_free_chunks[index]; 1848 } 1849 1850 // These methods that sum the free chunk lists are used in printing 1851 // methods that are used in product builds. 1852 size_t ChunkManager::sum_free_chunks() { 1853 assert_lock_strong(SpaceManager::expand_lock()); 1854 size_t result = 0; 1855 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1856 ChunkList* list = free_chunks(i); 1857 1858 if (list == NULL) { 1859 continue; 1860 } 1861 1862 result = result + list->count() * list->size(); 1863 } 1864 result = result + humongous_dictionary()->total_size(); 1865 return result; 1866 } 1867 1868 size_t ChunkManager::sum_free_chunks_count() { 1869 assert_lock_strong(SpaceManager::expand_lock()); 1870 size_t count = 0; 1871 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1872 ChunkList* list = free_chunks(i); 1873 if (list == NULL) { 1874 continue; 1875 } 1876 count = count + list->count(); 1877 } 1878 count = count + humongous_dictionary()->total_free_blocks(); 1879 return count; 1880 } 1881 1882 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1883 ChunkIndex index = list_index(word_size); 1884 assert(index < HumongousIndex, "No humongous list"); 1885 return free_chunks(index); 1886 } 1887 1888 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1889 assert_lock_strong(SpaceManager::expand_lock()); 1890 1891 slow_locked_verify(); 1892 1893 Metachunk* chunk = NULL; 1894 if (list_index(word_size) != HumongousIndex) { 1895 ChunkList* free_list = find_free_chunks_list(word_size); 1896 assert(free_list != NULL, "Sanity check"); 1897 1898 chunk = free_list->head(); 1899 1900 if (chunk == NULL) { 1901 return NULL; 1902 } 1903 1904 // Remove the chunk as the head of the list. 1905 free_list->remove_chunk(chunk); 1906 1907 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1908 p2i(free_list), p2i(chunk), chunk->word_size()); 1909 } else { 1910 chunk = humongous_dictionary()->get_chunk(word_size); 1911 1912 if (chunk == NULL) { 1913 return NULL; 1914 } 1915 1916 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1917 chunk->word_size(), word_size, chunk->word_size() - word_size); 1918 } 1919 1920 // Chunk has been removed from the chunk manager; update counters. 1921 account_for_removed_chunk(chunk); 1922 1923 // Remove it from the links to this freelist 1924 chunk->set_next(NULL); 1925 chunk->set_prev(NULL); 1926 #ifdef ASSERT 1927 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1928 // work. 1929 chunk->set_is_tagged_free(false); 1930 #endif 1931 chunk->container()->inc_container_count(); 1932 1933 slow_locked_verify(); 1934 return chunk; 1935 } 1936 1937 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1938 assert_lock_strong(SpaceManager::expand_lock()); 1939 slow_locked_verify(); 1940 1941 // Take from the beginning of the list 1942 Metachunk* chunk = free_chunks_get(word_size); 1943 if (chunk == NULL) { 1944 return NULL; 1945 } 1946 1947 assert((word_size <= chunk->word_size()) || 1948 (list_index(chunk->word_size()) == HumongousIndex), 1949 "Non-humongous variable sized chunk"); 1950 LogTarget(Debug, gc, metaspace, freelist) lt; 1951 if (lt.is_enabled()) { 1952 size_t list_count; 1953 if (list_index(word_size) < HumongousIndex) { 1954 ChunkList* list = find_free_chunks_list(word_size); 1955 list_count = list->count(); 1956 } else { 1957 list_count = humongous_dictionary()->total_count(); 1958 } 1959 LogStream ls(lt); 1960 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1961 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1962 ResourceMark rm; 1963 locked_print_free_chunks(&ls); 1964 } 1965 1966 return chunk; 1967 } 1968 1969 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { 1970 assert_lock_strong(SpaceManager::expand_lock()); 1971 assert(chunk != NULL, "Expected chunk."); 1972 assert(chunk->container() != NULL, "Container should have been set."); 1973 assert(chunk->is_tagged_free() == false, "Chunk should be in use."); 1974 index_bounds_check(index); 1975 1976 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not 1977 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary 1978 // keeps tree node pointers in the chunk payload area which mangle will overwrite. 1979 NOT_PRODUCT(chunk->mangle(badMetaWordVal);) 1980 1981 if (index != HumongousIndex) { 1982 // Return non-humongous chunk to freelist. 1983 ChunkList* list = free_chunks(index); 1984 assert(list->size() == chunk->word_size(), "Wrong chunk type."); 1985 list->return_chunk_at_head(chunk); 1986 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", 1987 chunk_size_name(index), p2i(chunk)); 1988 } else { 1989 // Return humongous chunk to dictionary. 1990 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); 1991 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, 1992 "Humongous chunk has wrong alignment."); 1993 _humongous_dictionary.return_chunk(chunk); 1994 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", 1995 chunk_size_name(index), p2i(chunk), chunk->word_size()); 1996 } 1997 chunk->container()->dec_container_count(); 1998 DEBUG_ONLY(chunk->set_is_tagged_free(true);) 1999 2000 // Chunk has been added; update counters. 2001 account_for_added_chunk(chunk); 2002 2003 } 2004 2005 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { 2006 index_bounds_check(index); 2007 if (chunks == NULL) { 2008 return; 2009 } 2010 LogTarget(Trace, gc, metaspace, freelist) log; 2011 if (log.is_enabled()) { // tracing 2012 log.print("returning list of %s chunks...", chunk_size_name(index)); 2013 } 2014 unsigned num_chunks_returned = 0; 2015 size_t size_chunks_returned = 0; 2016 Metachunk* cur = chunks; 2017 while (cur != NULL) { 2018 // Capture the next link before it is changed 2019 // by the call to return_chunk_at_head(); 2020 Metachunk* next = cur->next(); 2021 if (log.is_enabled()) { // tracing 2022 num_chunks_returned ++; 2023 size_chunks_returned += cur->word_size(); 2024 } 2025 return_single_chunk(index, cur); 2026 cur = next; 2027 } 2028 if (log.is_enabled()) { // tracing 2029 log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".", 2030 num_chunks_returned, chunk_size_name(index), size_chunks_returned); 2031 if (index != HumongousIndex) { 2032 log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size()); 2033 } else { 2034 log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count()); 2035 } 2036 } 2037 } 2038 2039 void ChunkManager::print_on(outputStream* out) const { 2040 _humongous_dictionary.report_statistics(out); 2041 } 2042 2043 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const { 2044 assert_lock_strong(SpaceManager::expand_lock()); 2045 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 2046 stat->num_by_type[i] = num_free_chunks(i); 2047 stat->single_size_by_type[i] = size_by_index(i); 2048 stat->total_size_by_type[i] = size_free_chunks_in_bytes(i); 2049 } 2050 stat->num_humongous_chunks = num_free_chunks(HumongousIndex); 2051 stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex); 2052 } 2053 2054 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const { 2055 MutexLockerEx cl(SpaceManager::expand_lock(), 2056 Mutex::_no_safepoint_check_flag); 2057 locked_get_statistics(stat); 2058 } 2059 2060 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out) { 2061 size_t total = 0; 2062 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 2063 out->print_cr(" " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total " SIZE_FORMAT " bytes", 2064 stat->num_by_type[i], chunk_size_name(i), 2065 stat->single_size_by_type[i], 2066 stat->total_size_by_type[i]); 2067 total += stat->total_size_by_type[i]; 2068 } 2069 out->print_cr(" " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes", 2070 stat->num_humongous_chunks, stat->total_size_humongous_chunks); 2071 total += stat->total_size_humongous_chunks; 2072 out->print_cr(" total size: " SIZE_FORMAT ".", total); 2073 } 2074 2075 void ChunkManager::print_all_chunkmanagers(outputStream* out) { 2076 // Note: keep lock protection only to retrieving statistics; keep printing 2077 // out of lock protection 2078 ChunkManagerStatistics stat; 2079 out->print_cr("Chunkmanager (non-class):"); 2080 const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata(); 2081 if (non_class_cm != NULL) { 2082 non_class_cm->get_statistics(&stat); 2083 ChunkManager::print_statistics(&stat, out); 2084 } else { 2085 out->print_cr("unavailable."); 2086 } 2087 out->print_cr("Chunkmanager (class):"); 2088 const ChunkManager* const class_cm = Metaspace::chunk_manager_class(); 2089 if (class_cm != NULL) { 2090 class_cm->get_statistics(&stat); 2091 ChunkManager::print_statistics(&stat, out); 2092 } else { 2093 out->print_cr("unavailable."); 2094 } 2095 } 2096 2097 // SpaceManager methods 2098 2099 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { 2100 size_t chunk_sizes[] = { 2101 specialized_chunk_size(is_class_space), 2102 small_chunk_size(is_class_space), 2103 medium_chunk_size(is_class_space) 2104 }; 2105 2106 // Adjust up to one of the fixed chunk sizes ... 2107 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { 2108 if (requested <= chunk_sizes[i]) { 2109 return chunk_sizes[i]; 2110 } 2111 } 2112 2113 // ... or return the size as a humongous chunk. 2114 return requested; 2115 } 2116 2117 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { 2118 return adjust_initial_chunk_size(requested, is_class()); 2119 } 2120 2121 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { 2122 size_t requested; 2123 2124 if (is_class()) { 2125 switch (type) { 2126 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; 2127 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break; 2128 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; 2129 default: requested = ClassSmallChunk; break; 2130 } 2131 } else { 2132 switch (type) { 2133 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; 2134 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break; 2135 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; 2136 default: requested = SmallChunk; break; 2137 } 2138 } 2139 2140 // Adjust to one of the fixed chunk sizes (unless humongous) 2141 const size_t adjusted = adjust_initial_chunk_size(requested); 2142 2143 assert(adjusted != 0, "Incorrect initial chunk size. Requested: " 2144 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted); 2145 2146 return adjusted; 2147 } 2148 2149 size_t SpaceManager::sum_free_in_chunks_in_use() const { 2150 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2151 size_t free = 0; 2152 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2153 Metachunk* chunk = chunks_in_use(i); 2154 while (chunk != NULL) { 2155 free += chunk->free_word_size(); 2156 chunk = chunk->next(); 2157 } 2158 } 2159 return free; 2160 } 2161 2162 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 2163 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2164 size_t result = 0; 2165 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2166 result += sum_waste_in_chunks_in_use(i); 2167 } 2168 2169 return result; 2170 } 2171 2172 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 2173 size_t result = 0; 2174 Metachunk* chunk = chunks_in_use(index); 2175 // Count the free space in all the chunk but not the 2176 // current chunk from which allocations are still being done. 2177 while (chunk != NULL) { 2178 if (chunk != current_chunk()) { 2179 result += chunk->free_word_size(); 2180 } 2181 chunk = chunk->next(); 2182 } 2183 return result; 2184 } 2185 2186 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 2187 // For CMS use "allocated_chunks_words()" which does not need the 2188 // Metaspace lock. For the other collectors sum over the 2189 // lists. Use both methods as a check that "allocated_chunks_words()" 2190 // is correct. That is, sum_capacity_in_chunks() is too expensive 2191 // to use in the product and allocated_chunks_words() should be used 2192 // but allow for checking that allocated_chunks_words() returns the same 2193 // value as sum_capacity_in_chunks_in_use() which is the definitive 2194 // answer. 2195 if (UseConcMarkSweepGC) { 2196 return allocated_chunks_words(); 2197 } else { 2198 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2199 size_t sum = 0; 2200 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2201 Metachunk* chunk = chunks_in_use(i); 2202 while (chunk != NULL) { 2203 sum += chunk->word_size(); 2204 chunk = chunk->next(); 2205 } 2206 } 2207 return sum; 2208 } 2209 } 2210 2211 size_t SpaceManager::sum_count_in_chunks_in_use() { 2212 size_t count = 0; 2213 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2214 count = count + sum_count_in_chunks_in_use(i); 2215 } 2216 2217 return count; 2218 } 2219 2220 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2221 size_t count = 0; 2222 Metachunk* chunk = chunks_in_use(i); 2223 while (chunk != NULL) { 2224 count++; 2225 chunk = chunk->next(); 2226 } 2227 return count; 2228 } 2229 2230 2231 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2232 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2233 size_t used = 0; 2234 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2235 Metachunk* chunk = chunks_in_use(i); 2236 while (chunk != NULL) { 2237 used += chunk->used_word_size(); 2238 chunk = chunk->next(); 2239 } 2240 } 2241 return used; 2242 } 2243 2244 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2245 2246 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2247 Metachunk* chunk = chunks_in_use(i); 2248 st->print("SpaceManager: %s " PTR_FORMAT, 2249 chunk_size_name(i), p2i(chunk)); 2250 if (chunk != NULL) { 2251 st->print_cr(" free " SIZE_FORMAT, 2252 chunk->free_word_size()); 2253 } else { 2254 st->cr(); 2255 } 2256 } 2257 2258 chunk_manager()->locked_print_free_chunks(st); 2259 chunk_manager()->locked_print_sum_free_chunks(st); 2260 } 2261 2262 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2263 2264 // Decide between a small chunk and a medium chunk. Up to 2265 // _small_chunk_limit small chunks can be allocated. 2266 // After that a medium chunk is preferred. 2267 size_t chunk_word_size; 2268 if (chunks_in_use(MediumIndex) == NULL && 2269 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2270 chunk_word_size = (size_t) small_chunk_size(); 2271 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2272 chunk_word_size = medium_chunk_size(); 2273 } 2274 } else { 2275 chunk_word_size = medium_chunk_size(); 2276 } 2277 2278 // Might still need a humongous chunk. Enforce 2279 // humongous allocations sizes to be aligned up to 2280 // the smallest chunk size. 2281 size_t if_humongous_sized_chunk = 2282 align_up(word_size + Metachunk::overhead(), 2283 smallest_chunk_size()); 2284 chunk_word_size = 2285 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2286 2287 assert(!SpaceManager::is_humongous(word_size) || 2288 chunk_word_size == if_humongous_sized_chunk, 2289 "Size calculation is wrong, word_size " SIZE_FORMAT 2290 " chunk_word_size " SIZE_FORMAT, 2291 word_size, chunk_word_size); 2292 Log(gc, metaspace, alloc) log; 2293 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2294 log.debug("Metadata humongous allocation:"); 2295 log.debug(" word_size " PTR_FORMAT, word_size); 2296 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2297 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2298 } 2299 return chunk_word_size; 2300 } 2301 2302 void SpaceManager::track_metaspace_memory_usage() { 2303 if (is_init_completed()) { 2304 if (is_class()) { 2305 MemoryService::track_compressed_class_memory_usage(); 2306 } 2307 MemoryService::track_metaspace_memory_usage(); 2308 } 2309 } 2310 2311 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2312 assert(vs_list()->current_virtual_space() != NULL, 2313 "Should have been set"); 2314 assert(current_chunk() == NULL || 2315 current_chunk()->allocate(word_size) == NULL, 2316 "Don't need to expand"); 2317 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2318 2319 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2320 size_t words_left = 0; 2321 size_t words_used = 0; 2322 if (current_chunk() != NULL) { 2323 words_left = current_chunk()->free_word_size(); 2324 words_used = current_chunk()->used_word_size(); 2325 } 2326 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2327 word_size, words_used, words_left); 2328 } 2329 2330 // Get another chunk 2331 size_t chunk_word_size = calc_chunk_size(word_size); 2332 Metachunk* next = get_new_chunk(chunk_word_size); 2333 2334 MetaWord* mem = NULL; 2335 2336 // If a chunk was available, add it to the in-use chunk list 2337 // and do an allocation from it. 2338 if (next != NULL) { 2339 // Add to this manager's list of chunks in use. 2340 add_chunk(next, false); 2341 mem = next->allocate(word_size); 2342 } 2343 2344 // Track metaspace memory usage statistic. 2345 track_metaspace_memory_usage(); 2346 2347 return mem; 2348 } 2349 2350 void SpaceManager::print_on(outputStream* st) const { 2351 2352 for (ChunkIndex i = ZeroIndex; 2353 i < NumberOfInUseLists ; 2354 i = next_chunk_index(i) ) { 2355 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2356 p2i(chunks_in_use(i)), 2357 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2358 } 2359 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2360 " Humongous " SIZE_FORMAT, 2361 sum_waste_in_chunks_in_use(SmallIndex), 2362 sum_waste_in_chunks_in_use(MediumIndex), 2363 sum_waste_in_chunks_in_use(HumongousIndex)); 2364 // block free lists 2365 if (block_freelists() != NULL) { 2366 st->print_cr("total in block free lists " SIZE_FORMAT, 2367 block_freelists()->total_size()); 2368 } 2369 } 2370 2371 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2372 Mutex* lock) : 2373 _mdtype(mdtype), 2374 _allocated_blocks_words(0), 2375 _allocated_chunks_words(0), 2376 _allocated_chunks_count(0), 2377 _block_freelists(NULL), 2378 _lock(lock) 2379 { 2380 initialize(); 2381 } 2382 2383 void SpaceManager::inc_size_metrics(size_t words) { 2384 assert_lock_strong(SpaceManager::expand_lock()); 2385 // Total of allocated Metachunks and allocated Metachunks count 2386 // for each SpaceManager 2387 _allocated_chunks_words = _allocated_chunks_words + words; 2388 _allocated_chunks_count++; 2389 // Global total of capacity in allocated Metachunks 2390 MetaspaceAux::inc_capacity(mdtype(), words); 2391 // Global total of allocated Metablocks. 2392 // used_words_slow() includes the overhead in each 2393 // Metachunk so include it in the used when the 2394 // Metachunk is first added (so only added once per 2395 // Metachunk). 2396 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2397 } 2398 2399 void SpaceManager::inc_used_metrics(size_t words) { 2400 // Add to the per SpaceManager total 2401 Atomic::add_ptr(words, &_allocated_blocks_words); 2402 // Add to the global total 2403 MetaspaceAux::inc_used(mdtype(), words); 2404 } 2405 2406 void SpaceManager::dec_total_from_size_metrics() { 2407 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2408 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2409 // Also deduct the overhead per Metachunk 2410 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2411 } 2412 2413 void SpaceManager::initialize() { 2414 Metadebug::init_allocation_fail_alot_count(); 2415 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2416 _chunks_in_use[i] = NULL; 2417 } 2418 _current_chunk = NULL; 2419 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2420 } 2421 2422 SpaceManager::~SpaceManager() { 2423 // This call this->_lock which can't be done while holding expand_lock() 2424 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2425 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2426 " allocated_chunks_words() " SIZE_FORMAT, 2427 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2428 2429 MutexLockerEx fcl(SpaceManager::expand_lock(), 2430 Mutex::_no_safepoint_check_flag); 2431 2432 chunk_manager()->slow_locked_verify(); 2433 2434 dec_total_from_size_metrics(); 2435 2436 Log(gc, metaspace, freelist) log; 2437 if (log.is_trace()) { 2438 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2439 ResourceMark rm; 2440 LogStream ls(log.trace()); 2441 locked_print_chunks_in_use_on(&ls); 2442 if (block_freelists() != NULL) { 2443 block_freelists()->print_on(&ls); 2444 } 2445 } 2446 2447 // Add all the chunks in use by this space manager 2448 // to the global list of free chunks. 2449 2450 // Follow each list of chunks-in-use and add them to the 2451 // free lists. Each list is NULL terminated. 2452 2453 for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) { 2454 Metachunk* chunks = chunks_in_use(i); 2455 chunk_manager()->return_chunk_list(i, chunks); 2456 set_chunks_in_use(i, NULL); 2457 } 2458 2459 chunk_manager()->slow_locked_verify(); 2460 2461 if (_block_freelists != NULL) { 2462 delete _block_freelists; 2463 } 2464 } 2465 2466 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2467 assert_lock_strong(_lock); 2468 // Allocations and deallocations are in raw_word_size 2469 size_t raw_word_size = get_allocation_word_size(word_size); 2470 // Lazily create a block_freelist 2471 if (block_freelists() == NULL) { 2472 _block_freelists = new BlockFreelist(); 2473 } 2474 block_freelists()->return_block(p, raw_word_size); 2475 } 2476 2477 // Adds a chunk to the list of chunks in use. 2478 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2479 2480 assert(new_chunk != NULL, "Should not be NULL"); 2481 assert(new_chunk->next() == NULL, "Should not be on a list"); 2482 2483 new_chunk->reset_empty(); 2484 2485 // Find the correct list and and set the current 2486 // chunk for that list. 2487 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2488 2489 if (index != HumongousIndex) { 2490 retire_current_chunk(); 2491 set_current_chunk(new_chunk); 2492 new_chunk->set_next(chunks_in_use(index)); 2493 set_chunks_in_use(index, new_chunk); 2494 } else { 2495 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2496 // small, so small will be null. Link this first chunk as the current 2497 // chunk. 2498 if (make_current) { 2499 // Set as the current chunk but otherwise treat as a humongous chunk. 2500 set_current_chunk(new_chunk); 2501 } 2502 // Link at head. The _current_chunk only points to a humongous chunk for 2503 // the null class loader metaspace (class and data virtual space managers) 2504 // any humongous chunks so will not point to the tail 2505 // of the humongous chunks list. 2506 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2507 set_chunks_in_use(HumongousIndex, new_chunk); 2508 2509 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2510 } 2511 2512 // Add to the running sum of capacity 2513 inc_size_metrics(new_chunk->word_size()); 2514 2515 assert(new_chunk->is_empty(), "Not ready for reuse"); 2516 Log(gc, metaspace, freelist) log; 2517 if (log.is_trace()) { 2518 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2519 ResourceMark rm; 2520 LogStream ls(log.trace()); 2521 new_chunk->print_on(&ls); 2522 chunk_manager()->locked_print_free_chunks(&ls); 2523 } 2524 } 2525 2526 void SpaceManager::retire_current_chunk() { 2527 if (current_chunk() != NULL) { 2528 size_t remaining_words = current_chunk()->free_word_size(); 2529 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2530 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2531 deallocate(ptr, remaining_words); 2532 inc_used_metrics(remaining_words); 2533 } 2534 } 2535 } 2536 2537 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { 2538 // Get a chunk from the chunk freelist 2539 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); 2540 2541 if (next == NULL) { 2542 next = vs_list()->get_new_chunk(chunk_word_size, 2543 medium_chunk_bunch()); 2544 } 2545 2546 Log(gc, metaspace, alloc) log; 2547 if (log.is_debug() && next != NULL && 2548 SpaceManager::is_humongous(next->word_size())) { 2549 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2550 } 2551 2552 return next; 2553 } 2554 2555 /* 2556 * The policy is to allocate up to _small_chunk_limit small chunks 2557 * after which only medium chunks are allocated. This is done to 2558 * reduce fragmentation. In some cases, this can result in a lot 2559 * of small chunks being allocated to the point where it's not 2560 * possible to expand. If this happens, there may be no medium chunks 2561 * available and OOME would be thrown. Instead of doing that, 2562 * if the allocation request size fits in a small chunk, an attempt 2563 * will be made to allocate a small chunk. 2564 */ 2565 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2566 size_t raw_word_size = get_allocation_word_size(word_size); 2567 2568 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2569 return NULL; 2570 } 2571 2572 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2573 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2574 2575 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2576 2577 MetaWord* mem = NULL; 2578 2579 if (chunk != NULL) { 2580 // Add chunk to the in-use chunk list and do an allocation from it. 2581 // Add to this manager's list of chunks in use. 2582 add_chunk(chunk, false); 2583 mem = chunk->allocate(raw_word_size); 2584 2585 inc_used_metrics(raw_word_size); 2586 2587 // Track metaspace memory usage statistic. 2588 track_metaspace_memory_usage(); 2589 } 2590 2591 return mem; 2592 } 2593 2594 MetaWord* SpaceManager::allocate(size_t word_size) { 2595 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2596 size_t raw_word_size = get_allocation_word_size(word_size); 2597 BlockFreelist* fl = block_freelists(); 2598 MetaWord* p = NULL; 2599 // Allocation from the dictionary is expensive in the sense that 2600 // the dictionary has to be searched for a size. Don't allocate 2601 // from the dictionary until it starts to get fat. Is this 2602 // a reasonable policy? Maybe an skinny dictionary is fast enough 2603 // for allocations. Do some profiling. JJJ 2604 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2605 p = fl->get_block(raw_word_size); 2606 } 2607 if (p == NULL) { 2608 p = allocate_work(raw_word_size); 2609 } 2610 2611 return p; 2612 } 2613 2614 // Returns the address of spaced allocated for "word_size". 2615 // This methods does not know about blocks (Metablocks) 2616 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2617 assert_lock_strong(_lock); 2618 #ifdef ASSERT 2619 if (Metadebug::test_metadata_failure()) { 2620 return NULL; 2621 } 2622 #endif 2623 // Is there space in the current chunk? 2624 MetaWord* result = NULL; 2625 2626 if (current_chunk() != NULL) { 2627 result = current_chunk()->allocate(word_size); 2628 } 2629 2630 if (result == NULL) { 2631 result = grow_and_allocate(word_size); 2632 } 2633 2634 if (result != NULL) { 2635 inc_used_metrics(word_size); 2636 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2637 "Head of the list is being allocated"); 2638 } 2639 2640 return result; 2641 } 2642 2643 void SpaceManager::verify() { 2644 // If there are blocks in the dictionary, then 2645 // verification of chunks does not work since 2646 // being in the dictionary alters a chunk. 2647 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2648 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2649 Metachunk* curr = chunks_in_use(i); 2650 while (curr != NULL) { 2651 curr->verify(); 2652 verify_chunk_size(curr); 2653 curr = curr->next(); 2654 } 2655 } 2656 } 2657 } 2658 2659 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2660 assert(is_humongous(chunk->word_size()) || 2661 chunk->word_size() == medium_chunk_size() || 2662 chunk->word_size() == small_chunk_size() || 2663 chunk->word_size() == specialized_chunk_size(), 2664 "Chunk size is wrong"); 2665 return; 2666 } 2667 2668 #ifdef ASSERT 2669 void SpaceManager::verify_allocated_blocks_words() { 2670 // Verification is only guaranteed at a safepoint. 2671 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2672 "Verification can fail if the applications is running"); 2673 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2674 "allocation total is not consistent " SIZE_FORMAT 2675 " vs " SIZE_FORMAT, 2676 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2677 } 2678 2679 #endif 2680 2681 void SpaceManager::dump(outputStream* const out) const { 2682 size_t curr_total = 0; 2683 size_t waste = 0; 2684 uint i = 0; 2685 size_t used = 0; 2686 size_t capacity = 0; 2687 2688 // Add up statistics for all chunks in this SpaceManager. 2689 for (ChunkIndex index = ZeroIndex; 2690 index < NumberOfInUseLists; 2691 index = next_chunk_index(index)) { 2692 for (Metachunk* curr = chunks_in_use(index); 2693 curr != NULL; 2694 curr = curr->next()) { 2695 out->print("%d) ", i++); 2696 curr->print_on(out); 2697 curr_total += curr->word_size(); 2698 used += curr->used_word_size(); 2699 capacity += curr->word_size(); 2700 waste += curr->free_word_size() + curr->overhead();; 2701 } 2702 } 2703 2704 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2705 if (block_freelists() != NULL) block_freelists()->print_on(out); 2706 } 2707 2708 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2709 // Free space isn't wasted. 2710 waste -= free; 2711 2712 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2713 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2714 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2715 } 2716 2717 // MetaspaceAux 2718 2719 2720 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2721 volatile size_t MetaspaceAux::_used_words[] = {0, 0}; 2722 2723 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2724 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2725 return list == NULL ? 0 : list->free_bytes(); 2726 } 2727 2728 size_t MetaspaceAux::free_bytes() { 2729 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2730 } 2731 2732 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2733 assert_lock_strong(SpaceManager::expand_lock()); 2734 assert(words <= capacity_words(mdtype), 2735 "About to decrement below 0: words " SIZE_FORMAT 2736 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2737 words, mdtype, capacity_words(mdtype)); 2738 _capacity_words[mdtype] -= words; 2739 } 2740 2741 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2742 assert_lock_strong(SpaceManager::expand_lock()); 2743 // Needs to be atomic 2744 _capacity_words[mdtype] += words; 2745 } 2746 2747 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2748 assert(words <= used_words(mdtype), 2749 "About to decrement below 0: words " SIZE_FORMAT 2750 " is greater than _used_words[%u] " SIZE_FORMAT, 2751 words, mdtype, used_words(mdtype)); 2752 // For CMS deallocation of the Metaspaces occurs during the 2753 // sweep which is a concurrent phase. Protection by the expand_lock() 2754 // is not enough since allocation is on a per Metaspace basis 2755 // and protected by the Metaspace lock. 2756 jlong minus_words = (jlong) - (jlong) words; 2757 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2758 } 2759 2760 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2761 // _used_words tracks allocations for 2762 // each piece of metadata. Those allocations are 2763 // generally done concurrently by different application 2764 // threads so must be done atomically. 2765 Atomic::add_ptr(words, &_used_words[mdtype]); 2766 } 2767 2768 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2769 size_t used = 0; 2770 ClassLoaderDataGraphMetaspaceIterator iter; 2771 while (iter.repeat()) { 2772 Metaspace* msp = iter.get_next(); 2773 // Sum allocated_blocks_words for each metaspace 2774 if (msp != NULL) { 2775 used += msp->used_words_slow(mdtype); 2776 } 2777 } 2778 return used * BytesPerWord; 2779 } 2780 2781 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2782 size_t free = 0; 2783 ClassLoaderDataGraphMetaspaceIterator iter; 2784 while (iter.repeat()) { 2785 Metaspace* msp = iter.get_next(); 2786 if (msp != NULL) { 2787 free += msp->free_words_slow(mdtype); 2788 } 2789 } 2790 return free * BytesPerWord; 2791 } 2792 2793 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2794 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2795 return 0; 2796 } 2797 // Don't count the space in the freelists. That space will be 2798 // added to the capacity calculation as needed. 2799 size_t capacity = 0; 2800 ClassLoaderDataGraphMetaspaceIterator iter; 2801 while (iter.repeat()) { 2802 Metaspace* msp = iter.get_next(); 2803 if (msp != NULL) { 2804 capacity += msp->capacity_words_slow(mdtype); 2805 } 2806 } 2807 return capacity * BytesPerWord; 2808 } 2809 2810 size_t MetaspaceAux::capacity_bytes_slow() { 2811 #ifdef PRODUCT 2812 // Use capacity_bytes() in PRODUCT instead of this function. 2813 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2814 #endif 2815 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2816 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2817 assert(capacity_bytes() == class_capacity + non_class_capacity, 2818 "bad accounting: capacity_bytes() " SIZE_FORMAT 2819 " class_capacity + non_class_capacity " SIZE_FORMAT 2820 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2821 capacity_bytes(), class_capacity + non_class_capacity, 2822 class_capacity, non_class_capacity); 2823 2824 return class_capacity + non_class_capacity; 2825 } 2826 2827 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2828 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2829 return list == NULL ? 0 : list->reserved_bytes(); 2830 } 2831 2832 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2833 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2834 return list == NULL ? 0 : list->committed_bytes(); 2835 } 2836 2837 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2838 2839 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2840 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2841 if (chunk_manager == NULL) { 2842 return 0; 2843 } 2844 chunk_manager->slow_verify(); 2845 return chunk_manager->free_chunks_total_words(); 2846 } 2847 2848 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2849 return free_chunks_total_words(mdtype) * BytesPerWord; 2850 } 2851 2852 size_t MetaspaceAux::free_chunks_total_words() { 2853 return free_chunks_total_words(Metaspace::ClassType) + 2854 free_chunks_total_words(Metaspace::NonClassType); 2855 } 2856 2857 size_t MetaspaceAux::free_chunks_total_bytes() { 2858 return free_chunks_total_words() * BytesPerWord; 2859 } 2860 2861 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2862 return Metaspace::get_chunk_manager(mdtype) != NULL; 2863 } 2864 2865 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2866 if (!has_chunk_free_list(mdtype)) { 2867 return MetaspaceChunkFreeListSummary(); 2868 } 2869 2870 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2871 return cm->chunk_free_list_summary(); 2872 } 2873 2874 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2875 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2876 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2877 } 2878 2879 void MetaspaceAux::print_on(outputStream* out) { 2880 Metaspace::MetadataType nct = Metaspace::NonClassType; 2881 2882 out->print_cr(" Metaspace " 2883 "used " SIZE_FORMAT "K, " 2884 "capacity " SIZE_FORMAT "K, " 2885 "committed " SIZE_FORMAT "K, " 2886 "reserved " SIZE_FORMAT "K", 2887 used_bytes()/K, 2888 capacity_bytes()/K, 2889 committed_bytes()/K, 2890 reserved_bytes()/K); 2891 2892 if (Metaspace::using_class_space()) { 2893 Metaspace::MetadataType ct = Metaspace::ClassType; 2894 out->print_cr(" class space " 2895 "used " SIZE_FORMAT "K, " 2896 "capacity " SIZE_FORMAT "K, " 2897 "committed " SIZE_FORMAT "K, " 2898 "reserved " SIZE_FORMAT "K", 2899 used_bytes(ct)/K, 2900 capacity_bytes(ct)/K, 2901 committed_bytes(ct)/K, 2902 reserved_bytes(ct)/K); 2903 } 2904 } 2905 2906 // Print information for class space and data space separately. 2907 // This is almost the same as above. 2908 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2909 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2910 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2911 size_t used_bytes = used_bytes_slow(mdtype); 2912 size_t free_bytes = free_bytes_slow(mdtype); 2913 size_t used_and_free = used_bytes + free_bytes + 2914 free_chunks_capacity_bytes; 2915 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2916 "K + unused in chunks " SIZE_FORMAT "K + " 2917 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2918 "K capacity in allocated chunks " SIZE_FORMAT "K", 2919 used_bytes / K, 2920 free_bytes / K, 2921 free_chunks_capacity_bytes / K, 2922 used_and_free / K, 2923 capacity_bytes / K); 2924 // Accounting can only be correct if we got the values during a safepoint 2925 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2926 } 2927 2928 // Print total fragmentation for class metaspaces 2929 void MetaspaceAux::print_class_waste(outputStream* out) { 2930 assert(Metaspace::using_class_space(), "class metaspace not used"); 2931 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2932 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2933 ClassLoaderDataGraphMetaspaceIterator iter; 2934 while (iter.repeat()) { 2935 Metaspace* msp = iter.get_next(); 2936 if (msp != NULL) { 2937 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2938 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2939 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2940 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2941 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2942 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2943 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2944 } 2945 } 2946 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2947 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2948 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2949 "large count " SIZE_FORMAT, 2950 cls_specialized_count, cls_specialized_waste, 2951 cls_small_count, cls_small_waste, 2952 cls_medium_count, cls_medium_waste, cls_humongous_count); 2953 } 2954 2955 // Print total fragmentation for data and class metaspaces separately 2956 void MetaspaceAux::print_waste(outputStream* out) { 2957 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2958 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2959 2960 ClassLoaderDataGraphMetaspaceIterator iter; 2961 while (iter.repeat()) { 2962 Metaspace* msp = iter.get_next(); 2963 if (msp != NULL) { 2964 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2965 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2966 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2967 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2968 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2969 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2970 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2971 } 2972 } 2973 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2974 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2975 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2976 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2977 "large count " SIZE_FORMAT, 2978 specialized_count, specialized_waste, small_count, 2979 small_waste, medium_count, medium_waste, humongous_count); 2980 if (Metaspace::using_class_space()) { 2981 print_class_waste(out); 2982 } 2983 } 2984 2985 // Dump global metaspace things from the end of ClassLoaderDataGraph 2986 void MetaspaceAux::dump(outputStream* out) { 2987 out->print_cr("All Metaspace:"); 2988 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2989 out->print("class space: "); print_on(out, Metaspace::ClassType); 2990 print_waste(out); 2991 } 2992 2993 void MetaspaceAux::verify_free_chunks() { 2994 Metaspace::chunk_manager_metadata()->verify(); 2995 if (Metaspace::using_class_space()) { 2996 Metaspace::chunk_manager_class()->verify(); 2997 } 2998 } 2999 3000 void MetaspaceAux::verify_capacity() { 3001 #ifdef ASSERT 3002 size_t running_sum_capacity_bytes = capacity_bytes(); 3003 // For purposes of the running sum of capacity, verify against capacity 3004 size_t capacity_in_use_bytes = capacity_bytes_slow(); 3005 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 3006 "capacity_words() * BytesPerWord " SIZE_FORMAT 3007 " capacity_bytes_slow()" SIZE_FORMAT, 3008 running_sum_capacity_bytes, capacity_in_use_bytes); 3009 for (Metaspace::MetadataType i = Metaspace::ClassType; 3010 i < Metaspace:: MetadataTypeCount; 3011 i = (Metaspace::MetadataType)(i + 1)) { 3012 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 3013 assert(capacity_bytes(i) == capacity_in_use_bytes, 3014 "capacity_bytes(%u) " SIZE_FORMAT 3015 " capacity_bytes_slow(%u)" SIZE_FORMAT, 3016 i, capacity_bytes(i), i, capacity_in_use_bytes); 3017 } 3018 #endif 3019 } 3020 3021 void MetaspaceAux::verify_used() { 3022 #ifdef ASSERT 3023 size_t running_sum_used_bytes = used_bytes(); 3024 // For purposes of the running sum of used, verify against used 3025 size_t used_in_use_bytes = used_bytes_slow(); 3026 assert(used_bytes() == used_in_use_bytes, 3027 "used_bytes() " SIZE_FORMAT 3028 " used_bytes_slow()" SIZE_FORMAT, 3029 used_bytes(), used_in_use_bytes); 3030 for (Metaspace::MetadataType i = Metaspace::ClassType; 3031 i < Metaspace:: MetadataTypeCount; 3032 i = (Metaspace::MetadataType)(i + 1)) { 3033 size_t used_in_use_bytes = used_bytes_slow(i); 3034 assert(used_bytes(i) == used_in_use_bytes, 3035 "used_bytes(%u) " SIZE_FORMAT 3036 " used_bytes_slow(%u)" SIZE_FORMAT, 3037 i, used_bytes(i), i, used_in_use_bytes); 3038 } 3039 #endif 3040 } 3041 3042 void MetaspaceAux::verify_metrics() { 3043 verify_capacity(); 3044 verify_used(); 3045 } 3046 3047 3048 // Metaspace methods 3049 3050 size_t Metaspace::_first_chunk_word_size = 0; 3051 size_t Metaspace::_first_class_chunk_word_size = 0; 3052 3053 size_t Metaspace::_commit_alignment = 0; 3054 size_t Metaspace::_reserve_alignment = 0; 3055 3056 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 3057 initialize(lock, type); 3058 } 3059 3060 Metaspace::~Metaspace() { 3061 delete _vsm; 3062 if (using_class_space()) { 3063 delete _class_vsm; 3064 } 3065 } 3066 3067 VirtualSpaceList* Metaspace::_space_list = NULL; 3068 VirtualSpaceList* Metaspace::_class_space_list = NULL; 3069 3070 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 3071 ChunkManager* Metaspace::_chunk_manager_class = NULL; 3072 3073 #define VIRTUALSPACEMULTIPLIER 2 3074 3075 #ifdef _LP64 3076 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 3077 3078 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 3079 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class."); 3080 // Figure out the narrow_klass_base and the narrow_klass_shift. The 3081 // narrow_klass_base is the lower of the metaspace base and the cds base 3082 // (if cds is enabled). The narrow_klass_shift depends on the distance 3083 // between the lower base and higher address. 3084 address lower_base; 3085 address higher_address; 3086 #if INCLUDE_CDS 3087 if (UseSharedSpaces) { 3088 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 3089 (address)(metaspace_base + compressed_class_space_size())); 3090 lower_base = MIN2(metaspace_base, cds_base); 3091 } else 3092 #endif 3093 { 3094 higher_address = metaspace_base + compressed_class_space_size(); 3095 lower_base = metaspace_base; 3096 3097 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3098 // If compressed class space fits in lower 32G, we don't need a base. 3099 if (higher_address <= (address)klass_encoding_max) { 3100 lower_base = 0; // Effectively lower base is zero. 3101 } 3102 } 3103 3104 Universe::set_narrow_klass_base(lower_base); 3105 3106 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See 3107 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for 3108 // how dump time narrow_klass_shift is set. Although, CDS can work 3109 // with zero-shift mode also, to be consistent with AOT it uses 3110 // LogKlassAlignmentInBytes for klass shift so archived java heap objects 3111 // can be used at same time as AOT code. 3112 if (!UseSharedSpaces 3113 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3114 Universe::set_narrow_klass_shift(0); 3115 } else { 3116 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3117 } 3118 AOTLoader::set_narrow_klass_shift(); 3119 } 3120 3121 #if INCLUDE_CDS 3122 // Return TRUE if the specified metaspace_base and cds_base are close enough 3123 // to work with compressed klass pointers. 3124 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3125 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3126 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3127 address lower_base = MIN2((address)metaspace_base, cds_base); 3128 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 3129 (address)(metaspace_base + compressed_class_space_size())); 3130 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3131 } 3132 #endif 3133 3134 // Try to allocate the metaspace at the requested addr. 3135 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3136 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class."); 3137 assert(using_class_space(), "called improperly"); 3138 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3139 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3140 "Metaspace size is too big"); 3141 assert_is_aligned(requested_addr, _reserve_alignment); 3142 assert_is_aligned(cds_base, _reserve_alignment); 3143 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 3144 3145 // Don't use large pages for the class space. 3146 bool large_pages = false; 3147 3148 #if !(defined(AARCH64) || defined(AIX)) 3149 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3150 _reserve_alignment, 3151 large_pages, 3152 requested_addr); 3153 #else // AARCH64 3154 ReservedSpace metaspace_rs; 3155 3156 // Our compressed klass pointers may fit nicely into the lower 32 3157 // bits. 3158 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3159 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3160 _reserve_alignment, 3161 large_pages, 3162 requested_addr); 3163 } 3164 3165 if (! metaspace_rs.is_reserved()) { 3166 // Aarch64: Try to align metaspace so that we can decode a compressed 3167 // klass with a single MOVK instruction. We can do this iff the 3168 // compressed class base is a multiple of 4G. 3169 // Aix: Search for a place where we can find memory. If we need to load 3170 // the base, 4G alignment is helpful, too. 3171 size_t increment = AARCH64_ONLY(4*)G; 3172 for (char *a = align_up(requested_addr, increment); 3173 a < (char*)(1024*G); 3174 a += increment) { 3175 if (a == (char *)(32*G)) { 3176 // Go faster from here on. Zero-based is no longer possible. 3177 increment = 4*G; 3178 } 3179 3180 #if INCLUDE_CDS 3181 if (UseSharedSpaces 3182 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3183 // We failed to find an aligned base that will reach. Fall 3184 // back to using our requested addr. 3185 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3186 _reserve_alignment, 3187 large_pages, 3188 requested_addr); 3189 break; 3190 } 3191 #endif 3192 3193 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3194 _reserve_alignment, 3195 large_pages, 3196 a); 3197 if (metaspace_rs.is_reserved()) 3198 break; 3199 } 3200 } 3201 3202 #endif // AARCH64 3203 3204 if (!metaspace_rs.is_reserved()) { 3205 #if INCLUDE_CDS 3206 if (UseSharedSpaces) { 3207 size_t increment = align_up(1*G, _reserve_alignment); 3208 3209 // Keep trying to allocate the metaspace, increasing the requested_addr 3210 // by 1GB each time, until we reach an address that will no longer allow 3211 // use of CDS with compressed klass pointers. 3212 char *addr = requested_addr; 3213 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3214 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3215 addr = addr + increment; 3216 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3217 _reserve_alignment, large_pages, addr); 3218 } 3219 } 3220 #endif 3221 // If no successful allocation then try to allocate the space anywhere. If 3222 // that fails then OOM doom. At this point we cannot try allocating the 3223 // metaspace as if UseCompressedClassPointers is off because too much 3224 // initialization has happened that depends on UseCompressedClassPointers. 3225 // So, UseCompressedClassPointers cannot be turned off at this point. 3226 if (!metaspace_rs.is_reserved()) { 3227 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3228 _reserve_alignment, large_pages); 3229 if (!metaspace_rs.is_reserved()) { 3230 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3231 compressed_class_space_size())); 3232 } 3233 } 3234 } 3235 3236 // If we got here then the metaspace got allocated. 3237 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3238 3239 #if INCLUDE_CDS 3240 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3241 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3242 FileMapInfo::stop_sharing_and_unmap( 3243 "Could not allocate metaspace at a compatible address"); 3244 } 3245 #endif 3246 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3247 UseSharedSpaces ? (address)cds_base : 0); 3248 3249 initialize_class_space(metaspace_rs); 3250 3251 LogTarget(Trace, gc, metaspace) lt; 3252 if (lt.is_enabled()) { 3253 ResourceMark rm; 3254 LogStream ls(lt); 3255 print_compressed_class_space(&ls, requested_addr); 3256 } 3257 } 3258 3259 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3260 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3261 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3262 if (_class_space_list != NULL) { 3263 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3264 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3265 compressed_class_space_size(), p2i(base)); 3266 if (requested_addr != 0) { 3267 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3268 } 3269 st->cr(); 3270 } 3271 } 3272 3273 // For UseCompressedClassPointers the class space is reserved above the top of 3274 // the Java heap. The argument passed in is at the base of the compressed space. 3275 void Metaspace::initialize_class_space(ReservedSpace rs) { 3276 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3277 assert(rs.size() >= CompressedClassSpaceSize, 3278 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3279 assert(using_class_space(), "Must be using class space"); 3280 _class_space_list = new VirtualSpaceList(rs); 3281 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3282 3283 if (!_class_space_list->initialization_succeeded()) { 3284 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3285 } 3286 } 3287 3288 #endif 3289 3290 void Metaspace::ergo_initialize() { 3291 if (DumpSharedSpaces) { 3292 // Using large pages when dumping the shared archive is currently not implemented. 3293 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3294 } 3295 3296 size_t page_size = os::vm_page_size(); 3297 if (UseLargePages && UseLargePagesInMetaspace) { 3298 page_size = os::large_page_size(); 3299 } 3300 3301 _commit_alignment = page_size; 3302 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3303 3304 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3305 // override if MaxMetaspaceSize was set on the command line or not. 3306 // This information is needed later to conform to the specification of the 3307 // java.lang.management.MemoryUsage API. 3308 // 3309 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3310 // globals.hpp to the aligned value, but this is not possible, since the 3311 // alignment depends on other flags being parsed. 3312 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3313 3314 if (MetaspaceSize > MaxMetaspaceSize) { 3315 MetaspaceSize = MaxMetaspaceSize; 3316 } 3317 3318 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 3319 3320 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3321 3322 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3323 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3324 3325 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3326 set_compressed_class_space_size(CompressedClassSpaceSize); 3327 } 3328 3329 void Metaspace::global_initialize() { 3330 MetaspaceGC::initialize(); 3331 3332 #if INCLUDE_CDS 3333 if (DumpSharedSpaces) { 3334 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces(); 3335 } else if (UseSharedSpaces) { 3336 // If any of the archived space fails to map, UseSharedSpaces 3337 // is reset to false. Fall through to the 3338 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class 3339 // metaspace. 3340 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 3341 } 3342 3343 if (!DumpSharedSpaces && !UseSharedSpaces) 3344 #endif // INCLUDE_CDS 3345 { 3346 #ifdef _LP64 3347 if (using_class_space()) { 3348 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3349 allocate_metaspace_compressed_klass_ptrs(base, 0); 3350 } 3351 #endif // _LP64 3352 } 3353 3354 // Initialize these before initializing the VirtualSpaceList 3355 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3356 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3357 // Make the first class chunk bigger than a medium chunk so it's not put 3358 // on the medium chunk list. The next chunk will be small and progress 3359 // from there. This size calculated by -version. 3360 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3361 (CompressedClassSpaceSize/BytesPerWord)*2); 3362 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3363 // Arbitrarily set the initial virtual space to a multiple 3364 // of the boot class loader size. 3365 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3366 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 3367 3368 // Initialize the list of virtual spaces. 3369 _space_list = new VirtualSpaceList(word_size); 3370 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3371 3372 if (!_space_list->initialization_succeeded()) { 3373 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3374 } 3375 3376 _tracer = new MetaspaceTracer(); 3377 } 3378 3379 void Metaspace::post_initialize() { 3380 MetaspaceGC::post_initialize(); 3381 } 3382 3383 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) { 3384 Metachunk* chunk = get_initialization_chunk(type, mdtype); 3385 if (chunk != NULL) { 3386 // Add to this manager's list of chunks in use and current_chunk(). 3387 get_space_manager(mdtype)->add_chunk(chunk, true); 3388 } 3389 } 3390 3391 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) { 3392 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 3393 3394 // Get a chunk from the chunk freelist 3395 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3396 3397 if (chunk == NULL) { 3398 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size, 3399 get_space_manager(mdtype)->medium_chunk_bunch()); 3400 } 3401 3402 return chunk; 3403 } 3404 3405 void Metaspace::verify_global_initialization() { 3406 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 3407 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 3408 3409 if (using_class_space()) { 3410 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 3411 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 3412 } 3413 } 3414 3415 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3416 verify_global_initialization(); 3417 3418 // Allocate SpaceManager for metadata objects. 3419 _vsm = new SpaceManager(NonClassType, lock); 3420 3421 if (using_class_space()) { 3422 // Allocate SpaceManager for classes. 3423 _class_vsm = new SpaceManager(ClassType, lock); 3424 } 3425 3426 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3427 3428 // Allocate chunk for metadata objects 3429 initialize_first_chunk(type, NonClassType); 3430 3431 // Allocate chunk for class metadata objects 3432 if (using_class_space()) { 3433 initialize_first_chunk(type, ClassType); 3434 } 3435 } 3436 3437 size_t Metaspace::align_word_size_up(size_t word_size) { 3438 size_t byte_size = word_size * wordSize; 3439 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3440 } 3441 3442 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3443 assert(!_frozen, "sanity"); 3444 // Don't use class_vsm() unless UseCompressedClassPointers is true. 3445 if (is_class_space_allocation(mdtype)) { 3446 return class_vsm()->allocate(word_size); 3447 } else { 3448 return vsm()->allocate(word_size); 3449 } 3450 } 3451 3452 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3453 assert(!_frozen, "sanity"); 3454 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3455 assert(delta_bytes > 0, "Must be"); 3456 3457 size_t before = 0; 3458 size_t after = 0; 3459 MetaWord* res; 3460 bool incremented; 3461 3462 // Each thread increments the HWM at most once. Even if the thread fails to increment 3463 // the HWM, an allocation is still attempted. This is because another thread must then 3464 // have incremented the HWM and therefore the allocation might still succeed. 3465 do { 3466 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3467 res = allocate(word_size, mdtype); 3468 } while (!incremented && res == NULL); 3469 3470 if (incremented) { 3471 tracer()->report_gc_threshold(before, after, 3472 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3473 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3474 } 3475 3476 return res; 3477 } 3478 3479 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3480 if (mdtype == ClassType) { 3481 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3482 } else { 3483 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3484 } 3485 } 3486 3487 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3488 assert(!_frozen, "sanity"); 3489 if (mdtype == ClassType) { 3490 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3491 } else { 3492 return vsm()->sum_free_in_chunks_in_use(); 3493 } 3494 } 3495 3496 // Space capacity in the Metaspace. It includes 3497 // space in the list of chunks from which allocations 3498 // have been made. Don't include space in the global freelist and 3499 // in the space available in the dictionary which 3500 // is already counted in some chunk. 3501 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3502 if (mdtype == ClassType) { 3503 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3504 } else { 3505 return vsm()->sum_capacity_in_chunks_in_use(); 3506 } 3507 } 3508 3509 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3510 return used_words_slow(mdtype) * BytesPerWord; 3511 } 3512 3513 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3514 return capacity_words_slow(mdtype) * BytesPerWord; 3515 } 3516 3517 size_t Metaspace::allocated_blocks_bytes() const { 3518 return vsm()->allocated_blocks_bytes() + 3519 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3520 } 3521 3522 size_t Metaspace::allocated_chunks_bytes() const { 3523 return vsm()->allocated_chunks_bytes() + 3524 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3525 } 3526 3527 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3528 assert(!_frozen, "sanity"); 3529 assert(!SafepointSynchronize::is_at_safepoint() 3530 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3531 3532 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3533 3534 if (is_class && using_class_space()) { 3535 class_vsm()->deallocate(ptr, word_size); 3536 } else { 3537 vsm()->deallocate(ptr, word_size); 3538 } 3539 } 3540 3541 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3542 MetaspaceObj::Type type, TRAPS) { 3543 assert(!_frozen, "sanity"); 3544 if (HAS_PENDING_EXCEPTION) { 3545 assert(false, "Should not allocate with exception pending"); 3546 return NULL; // caller does a CHECK_NULL too 3547 } 3548 3549 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3550 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3551 3552 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3553 3554 // Try to allocate metadata. 3555 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3556 3557 if (result == NULL) { 3558 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3559 3560 // Allocation failed. 3561 if (is_init_completed()) { 3562 // Only start a GC if the bootstrapping has completed. 3563 3564 // Try to clean out some memory and retry. 3565 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3566 loader_data, word_size, mdtype); 3567 } 3568 } 3569 3570 if (result == NULL) { 3571 SpaceManager* sm; 3572 if (is_class_space_allocation(mdtype)) { 3573 sm = loader_data->metaspace_non_null()->class_vsm(); 3574 } else { 3575 sm = loader_data->metaspace_non_null()->vsm(); 3576 } 3577 3578 result = sm->get_small_chunk_and_allocate(word_size); 3579 3580 if (result == NULL) { 3581 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3582 } 3583 } 3584 3585 // Zero initialize. 3586 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3587 3588 return result; 3589 } 3590 3591 size_t Metaspace::class_chunk_size(size_t word_size) { 3592 assert(using_class_space(), "Has to use class space"); 3593 return class_vsm()->calc_chunk_size(word_size); 3594 } 3595 3596 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3597 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3598 3599 // If result is still null, we are out of memory. 3600 Log(gc, metaspace, freelist) log; 3601 if (log.is_info()) { 3602 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3603 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3604 ResourceMark rm; 3605 if (log.is_debug()) { 3606 if (loader_data->metaspace_or_null() != NULL) { 3607 LogStream ls(log.debug()); 3608 loader_data->dump(&ls); 3609 } 3610 } 3611 LogStream ls(log.info()); 3612 MetaspaceAux::dump(&ls); 3613 ChunkManager::print_all_chunkmanagers(&ls); 3614 } 3615 3616 bool out_of_compressed_class_space = false; 3617 if (is_class_space_allocation(mdtype)) { 3618 Metaspace* metaspace = loader_data->metaspace_non_null(); 3619 out_of_compressed_class_space = 3620 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3621 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3622 CompressedClassSpaceSize; 3623 } 3624 3625 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3626 const char* space_string = out_of_compressed_class_space ? 3627 "Compressed class space" : "Metaspace"; 3628 3629 report_java_out_of_memory(space_string); 3630 3631 if (JvmtiExport::should_post_resource_exhausted()) { 3632 JvmtiExport::post_resource_exhausted( 3633 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3634 space_string); 3635 } 3636 3637 if (!is_init_completed()) { 3638 vm_exit_during_initialization("OutOfMemoryError", space_string); 3639 } 3640 3641 if (out_of_compressed_class_space) { 3642 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3643 } else { 3644 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3645 } 3646 } 3647 3648 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3649 switch (mdtype) { 3650 case Metaspace::ClassType: return "Class"; 3651 case Metaspace::NonClassType: return "Metadata"; 3652 default: 3653 assert(false, "Got bad mdtype: %d", (int) mdtype); 3654 return NULL; 3655 } 3656 } 3657 3658 void Metaspace::purge(MetadataType mdtype) { 3659 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3660 } 3661 3662 void Metaspace::purge() { 3663 MutexLockerEx cl(SpaceManager::expand_lock(), 3664 Mutex::_no_safepoint_check_flag); 3665 purge(NonClassType); 3666 if (using_class_space()) { 3667 purge(ClassType); 3668 } 3669 } 3670 3671 void Metaspace::print_on(outputStream* out) const { 3672 // Print both class virtual space counts and metaspace. 3673 if (Verbose) { 3674 vsm()->print_on(out); 3675 if (using_class_space()) { 3676 class_vsm()->print_on(out); 3677 } 3678 } 3679 } 3680 3681 bool Metaspace::contains(const void* ptr) { 3682 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3683 return true; 3684 } 3685 return contains_non_shared(ptr); 3686 } 3687 3688 bool Metaspace::contains_non_shared(const void* ptr) { 3689 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3690 return true; 3691 } 3692 3693 return get_space_list(NonClassType)->contains(ptr); 3694 } 3695 3696 void Metaspace::verify() { 3697 vsm()->verify(); 3698 if (using_class_space()) { 3699 class_vsm()->verify(); 3700 } 3701 } 3702 3703 void Metaspace::dump(outputStream* const out) const { 3704 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3705 vsm()->dump(out); 3706 if (using_class_space()) { 3707 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3708 class_vsm()->dump(out); 3709 } 3710 } 3711 3712 /////////////// Unit tests /////////////// 3713 3714 #ifndef PRODUCT 3715 3716 class TestMetaspaceAuxTest : AllStatic { 3717 public: 3718 static void test_reserved() { 3719 size_t reserved = MetaspaceAux::reserved_bytes(); 3720 3721 assert(reserved > 0, "assert"); 3722 3723 size_t committed = MetaspaceAux::committed_bytes(); 3724 assert(committed <= reserved, "assert"); 3725 3726 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3727 assert(reserved_metadata > 0, "assert"); 3728 assert(reserved_metadata <= reserved, "assert"); 3729 3730 if (UseCompressedClassPointers) { 3731 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3732 assert(reserved_class > 0, "assert"); 3733 assert(reserved_class < reserved, "assert"); 3734 } 3735 } 3736 3737 static void test_committed() { 3738 size_t committed = MetaspaceAux::committed_bytes(); 3739 3740 assert(committed > 0, "assert"); 3741 3742 size_t reserved = MetaspaceAux::reserved_bytes(); 3743 assert(committed <= reserved, "assert"); 3744 3745 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3746 assert(committed_metadata > 0, "assert"); 3747 assert(committed_metadata <= committed, "assert"); 3748 3749 if (UseCompressedClassPointers) { 3750 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3751 assert(committed_class > 0, "assert"); 3752 assert(committed_class < committed, "assert"); 3753 } 3754 } 3755 3756 static void test_virtual_space_list_large_chunk() { 3757 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3758 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3759 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3760 // vm_allocation_granularity aligned on Windows. 3761 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3762 large_size += (os::vm_page_size()/BytesPerWord); 3763 vs_list->get_new_chunk(large_size, 0); 3764 } 3765 3766 static void test() { 3767 test_reserved(); 3768 test_committed(); 3769 test_virtual_space_list_large_chunk(); 3770 } 3771 }; 3772 3773 void TestMetaspaceAux_test() { 3774 TestMetaspaceAuxTest::test(); 3775 } 3776 3777 class TestVirtualSpaceNodeTest { 3778 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3779 size_t& num_small_chunks, 3780 size_t& num_specialized_chunks) { 3781 num_medium_chunks = words_left / MediumChunk; 3782 words_left = words_left % MediumChunk; 3783 3784 num_small_chunks = words_left / SmallChunk; 3785 words_left = words_left % SmallChunk; 3786 // how many specialized chunks can we get? 3787 num_specialized_chunks = words_left / SpecializedChunk; 3788 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3789 } 3790 3791 public: 3792 static void test() { 3793 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3794 const size_t vsn_test_size_words = MediumChunk * 4; 3795 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3796 3797 // The chunk sizes must be multiples of eachother, or this will fail 3798 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3799 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3800 3801 { // No committed memory in VSN 3802 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3803 VirtualSpaceNode vsn(vsn_test_size_bytes); 3804 vsn.initialize(); 3805 vsn.retire(&cm); 3806 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3807 } 3808 3809 { // All of VSN is committed, half is used by chunks 3810 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3811 VirtualSpaceNode vsn(vsn_test_size_bytes); 3812 vsn.initialize(); 3813 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3814 vsn.get_chunk_vs(MediumChunk); 3815 vsn.get_chunk_vs(MediumChunk); 3816 vsn.retire(&cm); 3817 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3818 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3819 } 3820 3821 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3822 // This doesn't work for systems with vm_page_size >= 16K. 3823 if (page_chunks < MediumChunk) { 3824 // 4 pages of VSN is committed, some is used by chunks 3825 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3826 VirtualSpaceNode vsn(vsn_test_size_bytes); 3827 3828 vsn.initialize(); 3829 vsn.expand_by(page_chunks, page_chunks); 3830 vsn.get_chunk_vs(SmallChunk); 3831 vsn.get_chunk_vs(SpecializedChunk); 3832 vsn.retire(&cm); 3833 3834 // committed - used = words left to retire 3835 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3836 3837 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3838 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3839 3840 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3841 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3842 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3843 } 3844 3845 { // Half of VSN is committed, a humongous chunk is used 3846 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3847 VirtualSpaceNode vsn(vsn_test_size_bytes); 3848 vsn.initialize(); 3849 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3850 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3851 vsn.retire(&cm); 3852 3853 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3854 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3855 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3856 3857 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3858 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3859 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3860 } 3861 3862 } 3863 3864 #define assert_is_available_positive(word_size) \ 3865 assert(vsn.is_available(word_size), \ 3866 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3867 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3868 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3869 3870 #define assert_is_available_negative(word_size) \ 3871 assert(!vsn.is_available(word_size), \ 3872 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 3873 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3874 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3875 3876 static void test_is_available_positive() { 3877 // Reserve some memory. 3878 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3879 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3880 3881 // Commit some memory. 3882 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3883 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3884 assert(expanded, "Failed to commit"); 3885 3886 // Check that is_available accepts the committed size. 3887 assert_is_available_positive(commit_word_size); 3888 3889 // Check that is_available accepts half the committed size. 3890 size_t expand_word_size = commit_word_size / 2; 3891 assert_is_available_positive(expand_word_size); 3892 } 3893 3894 static void test_is_available_negative() { 3895 // Reserve some memory. 3896 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3897 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3898 3899 // Commit some memory. 3900 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3901 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3902 assert(expanded, "Failed to commit"); 3903 3904 // Check that is_available doesn't accept a too large size. 3905 size_t two_times_commit_word_size = commit_word_size * 2; 3906 assert_is_available_negative(two_times_commit_word_size); 3907 } 3908 3909 static void test_is_available_overflow() { 3910 // Reserve some memory. 3911 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3912 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3913 3914 // Commit some memory. 3915 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3916 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3917 assert(expanded, "Failed to commit"); 3918 3919 // Calculate a size that will overflow the virtual space size. 3920 void* virtual_space_max = (void*)(uintptr_t)-1; 3921 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3922 size_t overflow_size = bottom_to_max + BytesPerWord; 3923 size_t overflow_word_size = overflow_size / BytesPerWord; 3924 3925 // Check that is_available can handle the overflow. 3926 assert_is_available_negative(overflow_word_size); 3927 } 3928 3929 static void test_is_available() { 3930 TestVirtualSpaceNodeTest::test_is_available_positive(); 3931 TestVirtualSpaceNodeTest::test_is_available_negative(); 3932 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3933 } 3934 }; 3935 3936 void TestVirtualSpaceNode_test() { 3937 TestVirtualSpaceNodeTest::test(); 3938 TestVirtualSpaceNodeTest::test_is_available(); 3939 } 3940 3941 // The following test is placed here instead of a gtest / unittest file 3942 // because the ChunkManager class is only available in this file. 3943 void ChunkManager_test_list_index() { 3944 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3945 3946 // Test previous bug where a query for a humongous class metachunk, 3947 // incorrectly matched the non-class medium metachunk size. 3948 { 3949 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 3950 3951 ChunkIndex index = manager.list_index(MediumChunk); 3952 3953 assert(index == HumongousIndex, 3954 "Requested size is larger than ClassMediumChunk," 3955 " so should return HumongousIndex. Got index: %d", (int)index); 3956 } 3957 3958 // Check the specified sizes as well. 3959 { 3960 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 3961 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index); 3962 } 3963 { 3964 ChunkIndex index = manager.list_index(ClassSmallChunk); 3965 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index); 3966 } 3967 { 3968 ChunkIndex index = manager.list_index(ClassMediumChunk); 3969 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index); 3970 } 3971 { 3972 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 3973 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index); 3974 } 3975 } 3976 3977 #endif // !PRODUCT 3978 3979 #ifdef ASSERT 3980 3981 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and 3982 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager 3983 // content. 3984 class ChunkManagerReturnTestImpl { 3985 3986 VirtualSpaceNode _vsn; 3987 ChunkManager _cm; 3988 3989 // The expected content of the chunk manager. 3990 unsigned _chunks_in_chunkmanager; 3991 size_t _words_in_chunkmanager; 3992 3993 // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use). 3994 static const int num_chunks = 256; 3995 Metachunk* _pool[num_chunks]; 3996 3997 // Helper, return a random position into the chunk pool. 3998 static int get_random_position() { 3999 return os::random() % num_chunks; 4000 } 4001 4002 // Asserts that ChunkManager counters match expectations. 4003 void assert_counters() { 4004 assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch."); 4005 assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch."); 4006 assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch."); 4007 } 4008 4009 // Get a random chunk size. Equal chance to get spec/med/small chunk size or 4010 // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med). 4011 size_t get_random_chunk_size() { 4012 const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk }; 4013 const int rand = os::random() % 4; 4014 if (rand < 3) { 4015 return sizes[rand]; 4016 } else { 4017 // Note: this affects the max. size of space (see _vsn initialization in ctor). 4018 return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); 4019 } 4020 } 4021 4022 // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending 4023 // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found. 4024 int next_matching_chunk(int start, bool is_free) const { 4025 assert(start >= 0 && start < num_chunks, "invalid parameter"); 4026 int pos = start; 4027 do { 4028 if (++pos == num_chunks) { 4029 pos = 0; 4030 } 4031 if (_pool[pos]->is_tagged_free() == is_free) { 4032 return pos; 4033 } 4034 } while (pos != start); 4035 return -1; 4036 } 4037 4038 // A structure to keep information about a chunk list including which 4039 // chunks are part of this list. This is needed to keep information about a chunk list 4040 // we will to return to the ChunkManager, because the original list will be destroyed. 4041 struct AChunkList { 4042 Metachunk* head; 4043 Metachunk* all[num_chunks]; 4044 size_t size; 4045 int num; 4046 ChunkIndex index; 4047 }; 4048 4049 // Assemble, from the in-use chunks (not in the chunk manager) in the pool, 4050 // a random chunk list of max. length <list_size> of chunks with the same 4051 // ChunkIndex (chunk size). 4052 // Returns false if list cannot be assembled. List is returned in the <out> 4053 // structure. Returned list may be smaller than <list_size>. 4054 bool assemble_random_chunklist(AChunkList* out, int list_size) { 4055 // Choose a random in-use chunk from the pool... 4056 const int headpos = next_matching_chunk(get_random_position(), false); 4057 if (headpos == -1) { 4058 return false; 4059 } 4060 Metachunk* const head = _pool[headpos]; 4061 out->all[0] = head; 4062 assert(head->is_tagged_free() == false, "Chunk state mismatch"); 4063 // ..then go from there, chain it up with up to list_size - 1 number of other 4064 // in-use chunks of the same index. 4065 const ChunkIndex index = _cm.list_index(head->word_size()); 4066 int num_added = 1; 4067 size_t size_added = head->word_size(); 4068 int pos = headpos; 4069 Metachunk* tail = head; 4070 do { 4071 pos = next_matching_chunk(pos, false); 4072 if (pos != headpos) { 4073 Metachunk* c = _pool[pos]; 4074 assert(c->is_tagged_free() == false, "Chunk state mismatch"); 4075 if (index == _cm.list_index(c->word_size())) { 4076 tail->set_next(c); 4077 c->set_prev(tail); 4078 tail = c; 4079 out->all[num_added] = c; 4080 num_added ++; 4081 size_added += c->word_size(); 4082 } 4083 } 4084 } while (num_added < list_size && pos != headpos); 4085 out->head = head; 4086 out->index = index; 4087 out->size = size_added; 4088 out->num = num_added; 4089 return true; 4090 } 4091 4092 // Take a single random chunk from the ChunkManager. 4093 bool take_single_random_chunk_from_chunkmanager() { 4094 assert_counters(); 4095 _cm.locked_verify(); 4096 int pos = next_matching_chunk(get_random_position(), true); 4097 if (pos == -1) { 4098 return false; 4099 } 4100 Metachunk* c = _pool[pos]; 4101 assert(c->is_tagged_free(), "Chunk state mismatch"); 4102 // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call 4103 // ChunkManager::free_chunks_get() with this chunk's word size. We really want 4104 // to exercise ChunkManager::free_chunks_get() because that one gets called for 4105 // normal chunk allocation. 4106 Metachunk* c2 = _cm.free_chunks_get(c->word_size()); 4107 assert(c2 != NULL, "Unexpected."); 4108 assert(!c2->is_tagged_free(), "Chunk state mismatch"); 4109 assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list."); 4110 _chunks_in_chunkmanager --; 4111 _words_in_chunkmanager -= c->word_size(); 4112 assert_counters(); 4113 _cm.locked_verify(); 4114 return true; 4115 } 4116 4117 // Returns a single random chunk to the chunk manager. Returns false if that 4118 // was not possible (all chunks are already in the chunk manager). 4119 bool return_single_random_chunk_to_chunkmanager() { 4120 assert_counters(); 4121 _cm.locked_verify(); 4122 int pos = next_matching_chunk(get_random_position(), false); 4123 if (pos == -1) { 4124 return false; 4125 } 4126 Metachunk* c = _pool[pos]; 4127 assert(c->is_tagged_free() == false, "wrong chunk information"); 4128 _cm.return_single_chunk(_cm.list_index(c->word_size()), c); 4129 _chunks_in_chunkmanager ++; 4130 _words_in_chunkmanager += c->word_size(); 4131 assert(c->is_tagged_free() == true, "wrong chunk information"); 4132 assert_counters(); 4133 _cm.locked_verify(); 4134 return true; 4135 } 4136 4137 // Return a random chunk list to the chunk manager. Returns the length of the 4138 // returned list. 4139 int return_random_chunk_list_to_chunkmanager(int list_size) { 4140 assert_counters(); 4141 _cm.locked_verify(); 4142 AChunkList aChunkList; 4143 if (!assemble_random_chunklist(&aChunkList, list_size)) { 4144 return 0; 4145 } 4146 // Before returning chunks are returned, they should be tagged in use. 4147 for (int i = 0; i < aChunkList.num; i ++) { 4148 assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4149 } 4150 _cm.return_chunk_list(aChunkList.index, aChunkList.head); 4151 _chunks_in_chunkmanager += aChunkList.num; 4152 _words_in_chunkmanager += aChunkList.size; 4153 // After all chunks are returned, check that they are now tagged free. 4154 for (int i = 0; i < aChunkList.num; i ++) { 4155 assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch."); 4156 } 4157 assert_counters(); 4158 _cm.locked_verify(); 4159 return aChunkList.num; 4160 } 4161 4162 public: 4163 4164 ChunkManagerReturnTestImpl() 4165 : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) 4166 , _cm(SpecializedChunk, SmallChunk, MediumChunk) 4167 , _chunks_in_chunkmanager(0) 4168 , _words_in_chunkmanager(0) 4169 { 4170 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4171 // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are 4172 // "in use", because not yet added to any chunk manager. 4173 _vsn.initialize(); 4174 _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words()); 4175 for (int i = 0; i < num_chunks; i ++) { 4176 const size_t size = get_random_chunk_size(); 4177 _pool[i] = _vsn.get_chunk_vs(size); 4178 assert(_pool[i] != NULL, "allocation failed"); 4179 } 4180 assert_counters(); 4181 _cm.locked_verify(); 4182 } 4183 4184 // Test entry point. 4185 // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat. 4186 // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected 4187 // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between 4188 // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the 4189 // chunks manager, thereby emptying or filling it completely. 4190 void do_test(float phase_length_factor) { 4191 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 4192 assert_counters(); 4193 // Execute n operations, and operation being the move of a single chunk to/from the chunk manager. 4194 const int num_max_ops = num_chunks * 100; 4195 int num_ops = num_max_ops; 4196 const int average_phase_length = (int)(phase_length_factor * num_chunks); 4197 int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4198 bool return_phase = true; 4199 while (num_ops > 0) { 4200 int chunks_moved = 0; 4201 if (return_phase) { 4202 // Randomly switch between returning a single chunk or a random length chunk list. 4203 if (os::random() % 2 == 0) { 4204 if (return_single_random_chunk_to_chunkmanager()) { 4205 chunks_moved = 1; 4206 } 4207 } else { 4208 const int list_length = MAX2(1, (os::random() % num_ops_until_switch)); 4209 chunks_moved = return_random_chunk_list_to_chunkmanager(list_length); 4210 } 4211 } else { 4212 // Breath out. 4213 if (take_single_random_chunk_from_chunkmanager()) { 4214 chunks_moved = 1; 4215 } 4216 } 4217 num_ops -= chunks_moved; 4218 num_ops_until_switch -= chunks_moved; 4219 if (chunks_moved == 0 || num_ops_until_switch <= 0) { 4220 return_phase = !return_phase; 4221 num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4)); 4222 } 4223 } 4224 } 4225 }; 4226 4227 void* setup_chunkmanager_returntests() { 4228 ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl(); 4229 return p; 4230 } 4231 4232 void teardown_chunkmanager_returntests(void* p) { 4233 delete (ChunkManagerReturnTestImpl*) p; 4234 } 4235 4236 void run_chunkmanager_returntests(void* p, float phase_length) { 4237 ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p; 4238 test->do_test(phase_length); 4239 } 4240 4241 // The following test is placed here instead of a gtest / unittest file 4242 // because the ChunkManager class is only available in this file. 4243 class SpaceManagerTest : AllStatic { 4244 friend void SpaceManager_test_adjust_initial_chunk_size(); 4245 4246 static void test_adjust_initial_chunk_size(bool is_class) { 4247 const size_t smallest = SpaceManager::smallest_chunk_size(is_class); 4248 const size_t normal = SpaceManager::small_chunk_size(is_class); 4249 const size_t medium = SpaceManager::medium_chunk_size(is_class); 4250 4251 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \ 4252 do { \ 4253 size_t v = value; \ 4254 size_t e = expected; \ 4255 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \ 4256 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \ 4257 } while (0) 4258 4259 // Smallest (specialized) 4260 test_adjust_initial_chunk_size(1, smallest, is_class); 4261 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class); 4262 test_adjust_initial_chunk_size(smallest, smallest, is_class); 4263 4264 // Small 4265 test_adjust_initial_chunk_size(smallest + 1, normal, is_class); 4266 test_adjust_initial_chunk_size(normal - 1, normal, is_class); 4267 test_adjust_initial_chunk_size(normal, normal, is_class); 4268 4269 // Medium 4270 test_adjust_initial_chunk_size(normal + 1, medium, is_class); 4271 test_adjust_initial_chunk_size(medium - 1, medium, is_class); 4272 test_adjust_initial_chunk_size(medium, medium, is_class); 4273 4274 // Humongous 4275 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class); 4276 4277 #undef test_adjust_initial_chunk_size 4278 } 4279 4280 static void test_adjust_initial_chunk_size() { 4281 test_adjust_initial_chunk_size(false); 4282 test_adjust_initial_chunk_size(true); 4283 } 4284 }; 4285 4286 void SpaceManager_test_adjust_initial_chunk_size() { 4287 SpaceManagerTest::test_adjust_initial_chunk_size(); 4288 } 4289 4290 #endif // ASSERT