1 /* 2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc/shared/collectedHeap.hpp" 26 #include "gc/shared/collectorPolicy.hpp" 27 #include "gc/shared/gcLocker.hpp" 28 #include "logging/log.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/binaryTreeDictionary.hpp" 31 #include "memory/filemap.hpp" 32 #include "memory/freeList.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 53 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 54 55 // Set this constant to enable slow integrity checking of the free chunk lists 56 const bool metaspace_slow_verify = false; 57 58 size_t const allocation_from_dictionary_limit = 4 * K; 59 60 MetaWord* last_allocated = 0; 61 62 size_t Metaspace::_compressed_class_space_size; 63 const MetaspaceTracer* Metaspace::_tracer = NULL; 64 65 // Used in declarations in SpaceManager and ChunkManager 66 enum ChunkIndex { 67 ZeroIndex = 0, 68 SpecializedIndex = ZeroIndex, 69 SmallIndex = SpecializedIndex + 1, 70 MediumIndex = SmallIndex + 1, 71 HumongousIndex = MediumIndex + 1, 72 NumberOfFreeLists = 3, 73 NumberOfInUseLists = 4 74 }; 75 76 enum ChunkSizes { // in words. 77 ClassSpecializedChunk = 128, 78 SpecializedChunk = 128, 79 ClassSmallChunk = 256, 80 SmallChunk = 512, 81 ClassMediumChunk = 4 * K, 82 MediumChunk = 8 * K 83 }; 84 85 static ChunkIndex next_chunk_index(ChunkIndex i) { 86 assert(i < NumberOfInUseLists, "Out of bound"); 87 return (ChunkIndex) (i+1); 88 } 89 90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 91 uint MetaspaceGC::_shrink_factor = 0; 92 bool MetaspaceGC::_should_concurrent_collect = false; 93 94 typedef class FreeList<Metachunk> ChunkList; 95 96 // Manages the global free lists of chunks. 97 class ChunkManager : public CHeapObj<mtInternal> { 98 friend class TestVirtualSpaceNodeTest; 99 100 // Free list of chunks of different sizes. 101 // SpecializedChunk 102 // SmallChunk 103 // MediumChunk 104 // HumongousChunk 105 ChunkList _free_chunks[NumberOfFreeLists]; 106 107 // HumongousChunk 108 ChunkTreeDictionary _humongous_dictionary; 109 110 // ChunkManager in all lists of this type 111 size_t _free_chunks_total; 112 size_t _free_chunks_count; 113 114 // Return the fixed chunk size for the given list index 115 size_t list_chunk_size(ChunkIndex index) const; 116 117 void dec_free_chunks_total(size_t v) { 118 assert(_free_chunks_count > 0 && 119 _free_chunks_total > 0, 120 "About to go negative"); 121 Atomic::add_ptr(-1, &_free_chunks_count); 122 jlong minus_v = (jlong) - (jlong) v; 123 Atomic::add_ptr(minus_v, &_free_chunks_total); 124 } 125 126 // Debug support 127 128 size_t sum_free_chunks(); 129 size_t sum_free_chunks_count(); 130 131 void locked_verify_free_chunks_total(); 132 void slow_locked_verify_free_chunks_total() { 133 if (metaspace_slow_verify) { 134 locked_verify_free_chunks_total(); 135 } 136 } 137 void locked_verify_free_chunks_count(); 138 void slow_locked_verify_free_chunks_count() { 139 if (metaspace_slow_verify) { 140 locked_verify_free_chunks_count(); 141 } 142 } 143 void verify_free_chunks_count(); 144 145 public: 146 147 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 148 : _free_chunks_total(0), _free_chunks_count(0) { 149 _free_chunks[SpecializedIndex].set_size(specialized_size); 150 _free_chunks[SmallIndex].set_size(small_size); 151 _free_chunks[MediumIndex].set_size(medium_size); 152 } 153 154 // add or delete (return) a chunk to the global freelist. 155 Metachunk* chunk_freelist_allocate(size_t word_size); 156 157 // Map a size to a list index assuming that there are lists 158 // for special, small, medium, and humongous chunks. 159 ChunkIndex list_index(size_t size) const; 160 161 // Remove the chunk from its freelist. It is 162 // expected to be on one of the _free_chunks[] lists. 163 void remove_chunk(Metachunk* chunk); 164 165 // Add the simple linked list of chunks to the freelist of chunks 166 // of type index. 167 void return_chunks(ChunkIndex index, Metachunk* chunks); 168 169 // Total of the space in the free chunks list 170 size_t free_chunks_total_words(); 171 size_t free_chunks_total_bytes(); 172 173 // Number of chunks in the free chunks list 174 size_t free_chunks_count(); 175 176 void inc_free_chunks_total(size_t v, size_t count = 1) { 177 Atomic::add_ptr(count, &_free_chunks_count); 178 Atomic::add_ptr(v, &_free_chunks_total); 179 } 180 ChunkTreeDictionary* humongous_dictionary() { 181 return &_humongous_dictionary; 182 } 183 184 ChunkList* free_chunks(ChunkIndex index); 185 186 // Returns the list for the given chunk word size. 187 ChunkList* find_free_chunks_list(size_t word_size); 188 189 // Remove from a list by size. Selects list based on size of chunk. 190 Metachunk* free_chunks_get(size_t chunk_word_size); 191 192 #define index_bounds_check(index) \ 193 assert(index == SpecializedIndex || \ 194 index == SmallIndex || \ 195 index == MediumIndex || \ 196 index == HumongousIndex, "Bad index: %d", (int) index) 197 198 size_t num_free_chunks(ChunkIndex index) const { 199 index_bounds_check(index); 200 201 if (index == HumongousIndex) { 202 return _humongous_dictionary.total_free_blocks(); 203 } 204 205 ssize_t count = _free_chunks[index].count(); 206 return count == -1 ? 0 : (size_t) count; 207 } 208 209 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 210 index_bounds_check(index); 211 212 size_t word_size = 0; 213 if (index == HumongousIndex) { 214 word_size = _humongous_dictionary.total_size(); 215 } else { 216 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 217 word_size = size_per_chunk_in_words * num_free_chunks(index); 218 } 219 220 return word_size * BytesPerWord; 221 } 222 223 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 224 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 225 num_free_chunks(SmallIndex), 226 num_free_chunks(MediumIndex), 227 num_free_chunks(HumongousIndex), 228 size_free_chunks_in_bytes(SpecializedIndex), 229 size_free_chunks_in_bytes(SmallIndex), 230 size_free_chunks_in_bytes(MediumIndex), 231 size_free_chunks_in_bytes(HumongousIndex)); 232 } 233 234 // Debug support 235 void verify(); 236 void slow_verify() { 237 if (metaspace_slow_verify) { 238 verify(); 239 } 240 } 241 void locked_verify(); 242 void slow_locked_verify() { 243 if (metaspace_slow_verify) { 244 locked_verify(); 245 } 246 } 247 void verify_free_chunks_total(); 248 249 void locked_print_free_chunks(outputStream* st); 250 void locked_print_sum_free_chunks(outputStream* st); 251 252 void print_on(outputStream* st) const; 253 }; 254 255 void ChunkManager_test_list_index() { 256 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 257 258 // Test previous bug where a query for a humongous class metachunk, 259 // incorrectly matched the non-class medium metachunk size. 260 { 261 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 262 263 ChunkIndex index = manager.list_index(MediumChunk); 264 265 assert(index == HumongousIndex, 266 "Requested size is larger than ClassMediumChunk," 267 " so should return HumongousIndex. Got index: %d", (int)index); 268 } 269 270 // Check the specified sizes as well. 271 { 272 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 273 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index); 274 } 275 { 276 ChunkIndex index = manager.list_index(ClassSmallChunk); 277 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index); 278 } 279 { 280 ChunkIndex index = manager.list_index(ClassMediumChunk); 281 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index); 282 } 283 { 284 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 285 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index); 286 } 287 } 288 289 class SmallBlocks : public CHeapObj<mtClass> { 290 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 291 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 292 293 private: 294 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 295 296 FreeList<Metablock>& list_at(size_t word_size) { 297 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 298 return _small_lists[word_size - _small_block_min_size]; 299 } 300 301 public: 302 SmallBlocks() { 303 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 304 uint k = i - _small_block_min_size; 305 _small_lists[k].set_size(i); 306 } 307 } 308 309 size_t total_size() const { 310 size_t result = 0; 311 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 312 uint k = i - _small_block_min_size; 313 result = result + _small_lists[k].count() * _small_lists[k].size(); 314 } 315 return result; 316 } 317 318 static uint small_block_max_size() { return _small_block_max_size; } 319 static uint small_block_min_size() { return _small_block_min_size; } 320 321 MetaWord* get_block(size_t word_size) { 322 if (list_at(word_size).count() > 0) { 323 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 324 return new_block; 325 } else { 326 return NULL; 327 } 328 } 329 void return_block(Metablock* free_chunk, size_t word_size) { 330 list_at(word_size).return_chunk_at_head(free_chunk, false); 331 assert(list_at(word_size).count() > 0, "Should have a chunk"); 332 } 333 334 void print_on(outputStream* st) const { 335 st->print_cr("SmallBlocks:"); 336 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 337 uint k = i - _small_block_min_size; 338 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 339 } 340 } 341 }; 342 343 // Used to manage the free list of Metablocks (a block corresponds 344 // to the allocation of a quantum of metadata). 345 class BlockFreelist : public CHeapObj<mtClass> { 346 BlockTreeDictionary* const _dictionary; 347 SmallBlocks* _small_blocks; 348 349 // Only allocate and split from freelist if the size of the allocation 350 // is at least 1/4th the size of the available block. 351 const static int WasteMultiplier = 4; 352 353 // Accessors 354 BlockTreeDictionary* dictionary() const { return _dictionary; } 355 SmallBlocks* small_blocks() { 356 if (_small_blocks == NULL) { 357 _small_blocks = new SmallBlocks(); 358 } 359 return _small_blocks; 360 } 361 362 public: 363 BlockFreelist(); 364 ~BlockFreelist(); 365 366 // Get and return a block to the free list 367 MetaWord* get_block(size_t word_size); 368 void return_block(MetaWord* p, size_t word_size); 369 370 size_t total_size() const { 371 size_t result = dictionary()->total_size(); 372 if (_small_blocks != NULL) { 373 result = result + _small_blocks->total_size(); 374 } 375 return result; 376 } 377 378 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 379 void print_on(outputStream* st) const; 380 }; 381 382 // A VirtualSpaceList node. 383 class VirtualSpaceNode : public CHeapObj<mtClass> { 384 friend class VirtualSpaceList; 385 386 // Link to next VirtualSpaceNode 387 VirtualSpaceNode* _next; 388 389 // total in the VirtualSpace 390 MemRegion _reserved; 391 ReservedSpace _rs; 392 VirtualSpace _virtual_space; 393 MetaWord* _top; 394 // count of chunks contained in this VirtualSpace 395 uintx _container_count; 396 397 // Convenience functions to access the _virtual_space 398 char* low() const { return virtual_space()->low(); } 399 char* high() const { return virtual_space()->high(); } 400 401 // The first Metachunk will be allocated at the bottom of the 402 // VirtualSpace 403 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 404 405 // Committed but unused space in the virtual space 406 size_t free_words_in_vs() const; 407 public: 408 409 VirtualSpaceNode(size_t byte_size); 410 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 411 ~VirtualSpaceNode(); 412 413 // Convenience functions for logical bottom and end 414 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 415 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 416 417 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 418 419 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 420 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 421 422 bool is_pre_committed() const { return _virtual_space.special(); } 423 424 // address of next available space in _virtual_space; 425 // Accessors 426 VirtualSpaceNode* next() { return _next; } 427 void set_next(VirtualSpaceNode* v) { _next = v; } 428 429 void set_reserved(MemRegion const v) { _reserved = v; } 430 void set_top(MetaWord* v) { _top = v; } 431 432 // Accessors 433 MemRegion* reserved() { return &_reserved; } 434 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 435 436 // Returns true if "word_size" is available in the VirtualSpace 437 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 438 439 MetaWord* top() const { return _top; } 440 void inc_top(size_t word_size) { _top += word_size; } 441 442 uintx container_count() { return _container_count; } 443 void inc_container_count(); 444 void dec_container_count(); 445 #ifdef ASSERT 446 uintx container_count_slow(); 447 void verify_container_count(); 448 #endif 449 450 // used and capacity in this single entry in the list 451 size_t used_words_in_vs() const; 452 size_t capacity_words_in_vs() const; 453 454 bool initialize(); 455 456 // get space from the virtual space 457 Metachunk* take_from_committed(size_t chunk_word_size); 458 459 // Allocate a chunk from the virtual space and return it. 460 Metachunk* get_chunk_vs(size_t chunk_word_size); 461 462 // Expands/shrinks the committed space in a virtual space. Delegates 463 // to Virtualspace 464 bool expand_by(size_t min_words, size_t preferred_words); 465 466 // In preparation for deleting this node, remove all the chunks 467 // in the node from any freelist. 468 void purge(ChunkManager* chunk_manager); 469 470 // If an allocation doesn't fit in the current node a new node is created. 471 // Allocate chunks out of the remaining committed space in this node 472 // to avoid wasting that memory. 473 // This always adds up because all the chunk sizes are multiples of 474 // the smallest chunk size. 475 void retire(ChunkManager* chunk_manager); 476 477 #ifdef ASSERT 478 // Debug support 479 void mangle(); 480 #endif 481 482 void print_on(outputStream* st) const; 483 }; 484 485 #define assert_is_ptr_aligned(ptr, alignment) \ 486 assert(is_ptr_aligned(ptr, alignment), \ 487 PTR_FORMAT " is not aligned to " \ 488 SIZE_FORMAT, p2i(ptr), alignment) 489 490 #define assert_is_size_aligned(size, alignment) \ 491 assert(is_size_aligned(size, alignment), \ 492 SIZE_FORMAT " is not aligned to " \ 493 SIZE_FORMAT, size, alignment) 494 495 496 // Decide if large pages should be committed when the memory is reserved. 497 static bool should_commit_large_pages_when_reserving(size_t bytes) { 498 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 499 size_t words = bytes / BytesPerWord; 500 bool is_class = false; // We never reserve large pages for the class space. 501 if (MetaspaceGC::can_expand(words, is_class) && 502 MetaspaceGC::allowed_expansion() >= words) { 503 return true; 504 } 505 } 506 507 return false; 508 } 509 510 // byte_size is the size of the associated virtualspace. 511 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 512 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 513 514 #if INCLUDE_CDS 515 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 516 // configurable address, generally at the top of the Java heap so other 517 // memory addresses don't conflict. 518 if (DumpSharedSpaces) { 519 bool large_pages = false; // No large pages when dumping the CDS archive. 520 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 521 522 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 523 if (_rs.is_reserved()) { 524 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 525 } else { 526 // Get a mmap region anywhere if the SharedBaseAddress fails. 527 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 528 } 529 MetaspaceShared::initialize_shared_rs(&_rs); 530 } else 531 #endif 532 { 533 bool large_pages = should_commit_large_pages_when_reserving(bytes); 534 535 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 536 } 537 538 if (_rs.is_reserved()) { 539 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 540 assert(_rs.size() != 0, "Catch if we get a 0 size"); 541 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 542 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 543 544 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 545 } 546 } 547 548 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 549 Metachunk* chunk = first_chunk(); 550 Metachunk* invalid_chunk = (Metachunk*) top(); 551 while (chunk < invalid_chunk ) { 552 assert(chunk->is_tagged_free(), "Should be tagged free"); 553 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 554 chunk_manager->remove_chunk(chunk); 555 assert(chunk->next() == NULL && 556 chunk->prev() == NULL, 557 "Was not removed from its list"); 558 chunk = (Metachunk*) next; 559 } 560 } 561 562 #ifdef ASSERT 563 uintx VirtualSpaceNode::container_count_slow() { 564 uintx count = 0; 565 Metachunk* chunk = first_chunk(); 566 Metachunk* invalid_chunk = (Metachunk*) top(); 567 while (chunk < invalid_chunk ) { 568 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 569 // Don't count the chunks on the free lists. Those are 570 // still part of the VirtualSpaceNode but not currently 571 // counted. 572 if (!chunk->is_tagged_free()) { 573 count++; 574 } 575 chunk = (Metachunk*) next; 576 } 577 return count; 578 } 579 #endif 580 581 // List of VirtualSpaces for metadata allocation. 582 class VirtualSpaceList : public CHeapObj<mtClass> { 583 friend class VirtualSpaceNode; 584 585 enum VirtualSpaceSizes { 586 VirtualSpaceSize = 256 * K 587 }; 588 589 // Head of the list 590 VirtualSpaceNode* _virtual_space_list; 591 // virtual space currently being used for allocations 592 VirtualSpaceNode* _current_virtual_space; 593 594 // Is this VirtualSpaceList used for the compressed class space 595 bool _is_class; 596 597 // Sum of reserved and committed memory in the virtual spaces 598 size_t _reserved_words; 599 size_t _committed_words; 600 601 // Number of virtual spaces 602 size_t _virtual_space_count; 603 604 ~VirtualSpaceList(); 605 606 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 607 608 void set_virtual_space_list(VirtualSpaceNode* v) { 609 _virtual_space_list = v; 610 } 611 void set_current_virtual_space(VirtualSpaceNode* v) { 612 _current_virtual_space = v; 613 } 614 615 void link_vs(VirtualSpaceNode* new_entry); 616 617 // Get another virtual space and add it to the list. This 618 // is typically prompted by a failed attempt to allocate a chunk 619 // and is typically followed by the allocation of a chunk. 620 bool create_new_virtual_space(size_t vs_word_size); 621 622 // Chunk up the unused committed space in the current 623 // virtual space and add the chunks to the free list. 624 void retire_current_virtual_space(); 625 626 public: 627 VirtualSpaceList(size_t word_size); 628 VirtualSpaceList(ReservedSpace rs); 629 630 size_t free_bytes(); 631 632 Metachunk* get_new_chunk(size_t word_size, 633 size_t grow_chunks_by_words, 634 size_t medium_chunk_bunch); 635 636 bool expand_node_by(VirtualSpaceNode* node, 637 size_t min_words, 638 size_t preferred_words); 639 640 bool expand_by(size_t min_words, 641 size_t preferred_words); 642 643 VirtualSpaceNode* current_virtual_space() { 644 return _current_virtual_space; 645 } 646 647 bool is_class() const { return _is_class; } 648 649 bool initialization_succeeded() { return _virtual_space_list != NULL; } 650 651 size_t reserved_words() { return _reserved_words; } 652 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 653 size_t committed_words() { return _committed_words; } 654 size_t committed_bytes() { return committed_words() * BytesPerWord; } 655 656 void inc_reserved_words(size_t v); 657 void dec_reserved_words(size_t v); 658 void inc_committed_words(size_t v); 659 void dec_committed_words(size_t v); 660 void inc_virtual_space_count(); 661 void dec_virtual_space_count(); 662 663 bool contains(const void* ptr); 664 665 // Unlink empty VirtualSpaceNodes and free it. 666 void purge(ChunkManager* chunk_manager); 667 668 void print_on(outputStream* st) const; 669 670 class VirtualSpaceListIterator : public StackObj { 671 VirtualSpaceNode* _virtual_spaces; 672 public: 673 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 674 _virtual_spaces(virtual_spaces) {} 675 676 bool repeat() { 677 return _virtual_spaces != NULL; 678 } 679 680 VirtualSpaceNode* get_next() { 681 VirtualSpaceNode* result = _virtual_spaces; 682 if (_virtual_spaces != NULL) { 683 _virtual_spaces = _virtual_spaces->next(); 684 } 685 return result; 686 } 687 }; 688 }; 689 690 class Metadebug : AllStatic { 691 // Debugging support for Metaspaces 692 static int _allocation_fail_alot_count; 693 694 public: 695 696 static void init_allocation_fail_alot_count(); 697 #ifdef ASSERT 698 static bool test_metadata_failure(); 699 #endif 700 }; 701 702 int Metadebug::_allocation_fail_alot_count = 0; 703 704 // SpaceManager - used by Metaspace to handle allocations 705 class SpaceManager : public CHeapObj<mtClass> { 706 friend class Metaspace; 707 friend class Metadebug; 708 709 private: 710 711 // protects allocations 712 Mutex* const _lock; 713 714 // Type of metadata allocated. 715 Metaspace::MetadataType _mdtype; 716 717 // List of chunks in use by this SpaceManager. Allocations 718 // are done from the current chunk. The list is used for deallocating 719 // chunks when the SpaceManager is freed. 720 Metachunk* _chunks_in_use[NumberOfInUseLists]; 721 Metachunk* _current_chunk; 722 723 // Maximum number of small chunks to allocate to a SpaceManager 724 static uint const _small_chunk_limit; 725 726 // Sum of all space in allocated chunks 727 size_t _allocated_blocks_words; 728 729 // Sum of all allocated chunks 730 size_t _allocated_chunks_words; 731 size_t _allocated_chunks_count; 732 733 // Free lists of blocks are per SpaceManager since they 734 // are assumed to be in chunks in use by the SpaceManager 735 // and all chunks in use by a SpaceManager are freed when 736 // the class loader using the SpaceManager is collected. 737 BlockFreelist* _block_freelists; 738 739 // protects virtualspace and chunk expansions 740 static const char* _expand_lock_name; 741 static const int _expand_lock_rank; 742 static Mutex* const _expand_lock; 743 744 private: 745 // Accessors 746 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 747 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 748 _chunks_in_use[index] = v; 749 } 750 751 BlockFreelist* block_freelists() const { return _block_freelists; } 752 753 Metaspace::MetadataType mdtype() { return _mdtype; } 754 755 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 756 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 757 758 Metachunk* current_chunk() const { return _current_chunk; } 759 void set_current_chunk(Metachunk* v) { 760 _current_chunk = v; 761 } 762 763 Metachunk* find_current_chunk(size_t word_size); 764 765 // Add chunk to the list of chunks in use 766 void add_chunk(Metachunk* v, bool make_current); 767 void retire_current_chunk(); 768 769 Mutex* lock() const { return _lock; } 770 771 const char* chunk_size_name(ChunkIndex index) const; 772 773 protected: 774 void initialize(); 775 776 public: 777 SpaceManager(Metaspace::MetadataType mdtype, 778 Mutex* lock); 779 ~SpaceManager(); 780 781 enum ChunkMultiples { 782 MediumChunkMultiple = 4 783 }; 784 785 bool is_class() { return _mdtype == Metaspace::ClassType; } 786 787 // Accessors 788 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 789 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 790 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 791 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 792 793 size_t smallest_chunk_size() { return specialized_chunk_size(); } 794 795 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 796 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 797 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 798 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 799 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 800 801 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 802 803 static Mutex* expand_lock() { return _expand_lock; } 804 805 // Increment the per Metaspace and global running sums for Metachunks 806 // by the given size. This is used when a Metachunk to added to 807 // the in-use list. 808 void inc_size_metrics(size_t words); 809 // Increment the per Metaspace and global running sums Metablocks by the given 810 // size. This is used when a Metablock is allocated. 811 void inc_used_metrics(size_t words); 812 // Delete the portion of the running sums for this SpaceManager. That is, 813 // the globals running sums for the Metachunks and Metablocks are 814 // decremented for all the Metachunks in-use by this SpaceManager. 815 void dec_total_from_size_metrics(); 816 817 // Set the sizes for the initial chunks. 818 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 819 size_t* chunk_word_size, 820 size_t* class_chunk_word_size); 821 822 size_t sum_capacity_in_chunks_in_use() const; 823 size_t sum_used_in_chunks_in_use() const; 824 size_t sum_free_in_chunks_in_use() const; 825 size_t sum_waste_in_chunks_in_use() const; 826 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 827 828 size_t sum_count_in_chunks_in_use(); 829 size_t sum_count_in_chunks_in_use(ChunkIndex i); 830 831 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 832 833 // Block allocation and deallocation. 834 // Allocates a block from the current chunk 835 MetaWord* allocate(size_t word_size); 836 // Allocates a block from a small chunk 837 MetaWord* get_small_chunk_and_allocate(size_t word_size); 838 839 // Helper for allocations 840 MetaWord* allocate_work(size_t word_size); 841 842 // Returns a block to the per manager freelist 843 void deallocate(MetaWord* p, size_t word_size); 844 845 // Based on the allocation size and a minimum chunk size, 846 // returned chunk size (for expanding space for chunk allocation). 847 size_t calc_chunk_size(size_t allocation_word_size); 848 849 // Called when an allocation from the current chunk fails. 850 // Gets a new chunk (may require getting a new virtual space), 851 // and allocates from that chunk. 852 MetaWord* grow_and_allocate(size_t word_size); 853 854 // Notify memory usage to MemoryService. 855 void track_metaspace_memory_usage(); 856 857 // debugging support. 858 859 void dump(outputStream* const out) const; 860 void print_on(outputStream* st) const; 861 void locked_print_chunks_in_use_on(outputStream* st) const; 862 863 void verify(); 864 void verify_chunk_size(Metachunk* chunk); 865 #ifdef ASSERT 866 void verify_allocated_blocks_words(); 867 #endif 868 869 // This adjusts the size given to be greater than the minimum allocation size in 870 // words for data in metaspace. Esentially the minimum size is currently 3 words. 871 size_t get_allocation_word_size(size_t word_size) { 872 size_t byte_size = word_size * BytesPerWord; 873 874 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 875 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 876 877 size_t raw_word_size = raw_bytes_size / BytesPerWord; 878 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 879 880 return raw_word_size; 881 } 882 }; 883 884 uint const SpaceManager::_small_chunk_limit = 4; 885 886 const char* SpaceManager::_expand_lock_name = 887 "SpaceManager chunk allocation lock"; 888 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 889 Mutex* const SpaceManager::_expand_lock = 890 new Mutex(SpaceManager::_expand_lock_rank, 891 SpaceManager::_expand_lock_name, 892 Mutex::_allow_vm_block_flag, 893 Monitor::_safepoint_check_never); 894 895 void VirtualSpaceNode::inc_container_count() { 896 assert_lock_strong(SpaceManager::expand_lock()); 897 _container_count++; 898 } 899 900 void VirtualSpaceNode::dec_container_count() { 901 assert_lock_strong(SpaceManager::expand_lock()); 902 _container_count--; 903 } 904 905 #ifdef ASSERT 906 void VirtualSpaceNode::verify_container_count() { 907 assert(_container_count == container_count_slow(), 908 "Inconsistency in container_count _container_count " UINTX_FORMAT 909 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 910 } 911 #endif 912 913 // BlockFreelist methods 914 915 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 916 917 BlockFreelist::~BlockFreelist() { 918 delete _dictionary; 919 if (_small_blocks != NULL) { 920 delete _small_blocks; 921 } 922 } 923 924 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 925 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 926 927 Metablock* free_chunk = ::new (p) Metablock(word_size); 928 if (word_size < SmallBlocks::small_block_max_size()) { 929 small_blocks()->return_block(free_chunk, word_size); 930 } else { 931 dictionary()->return_chunk(free_chunk); 932 } 933 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 934 SIZE_FORMAT, p2i(free_chunk), word_size); 935 } 936 937 MetaWord* BlockFreelist::get_block(size_t word_size) { 938 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 939 940 // Try small_blocks first. 941 if (word_size < SmallBlocks::small_block_max_size()) { 942 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 943 // this space manager. 944 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 945 if (new_block != NULL) { 946 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 947 p2i(new_block), word_size); 948 return new_block; 949 } 950 } 951 952 if (word_size < BlockFreelist::min_dictionary_size()) { 953 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 954 return NULL; 955 } 956 957 Metablock* free_block = 958 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 959 if (free_block == NULL) { 960 return NULL; 961 } 962 963 const size_t block_size = free_block->size(); 964 if (block_size > WasteMultiplier * word_size) { 965 return_block((MetaWord*)free_block, block_size); 966 return NULL; 967 } 968 969 MetaWord* new_block = (MetaWord*)free_block; 970 assert(block_size >= word_size, "Incorrect size of block from freelist"); 971 const size_t unused = block_size - word_size; 972 if (unused >= SmallBlocks::small_block_min_size()) { 973 return_block(new_block + word_size, unused); 974 } 975 976 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 977 p2i(new_block), word_size); 978 return new_block; 979 } 980 981 void BlockFreelist::print_on(outputStream* st) const { 982 dictionary()->print_free_lists(st); 983 if (_small_blocks != NULL) { 984 _small_blocks->print_on(st); 985 } 986 } 987 988 // VirtualSpaceNode methods 989 990 VirtualSpaceNode::~VirtualSpaceNode() { 991 _rs.release(); 992 #ifdef ASSERT 993 size_t word_size = sizeof(*this) / BytesPerWord; 994 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 995 #endif 996 } 997 998 size_t VirtualSpaceNode::used_words_in_vs() const { 999 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 1000 } 1001 1002 // Space committed in the VirtualSpace 1003 size_t VirtualSpaceNode::capacity_words_in_vs() const { 1004 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 1005 } 1006 1007 size_t VirtualSpaceNode::free_words_in_vs() const { 1008 return pointer_delta(end(), top(), sizeof(MetaWord)); 1009 } 1010 1011 // Allocates the chunk from the virtual space only. 1012 // This interface is also used internally for debugging. Not all 1013 // chunks removed here are necessarily used for allocation. 1014 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 1015 // Bottom of the new chunk 1016 MetaWord* chunk_limit = top(); 1017 assert(chunk_limit != NULL, "Not safe to call this method"); 1018 1019 // The virtual spaces are always expanded by the 1020 // commit granularity to enforce the following condition. 1021 // Without this the is_available check will not work correctly. 1022 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 1023 "The committed memory doesn't match the expanded memory."); 1024 1025 if (!is_available(chunk_word_size)) { 1026 Log(gc, metaspace, freelist) log; 1027 log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 1028 // Dump some information about the virtual space that is nearly full 1029 ResourceMark rm; 1030 print_on(log.debug_stream()); 1031 return NULL; 1032 } 1033 1034 // Take the space (bump top on the current virtual space). 1035 inc_top(chunk_word_size); 1036 1037 // Initialize the chunk 1038 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1039 return result; 1040 } 1041 1042 1043 // Expand the virtual space (commit more of the reserved space) 1044 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1045 size_t min_bytes = min_words * BytesPerWord; 1046 size_t preferred_bytes = preferred_words * BytesPerWord; 1047 1048 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1049 1050 if (uncommitted < min_bytes) { 1051 return false; 1052 } 1053 1054 size_t commit = MIN2(preferred_bytes, uncommitted); 1055 bool result = virtual_space()->expand_by(commit, false); 1056 1057 assert(result, "Failed to commit memory"); 1058 1059 return result; 1060 } 1061 1062 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1063 assert_lock_strong(SpaceManager::expand_lock()); 1064 Metachunk* result = take_from_committed(chunk_word_size); 1065 if (result != NULL) { 1066 inc_container_count(); 1067 } 1068 return result; 1069 } 1070 1071 bool VirtualSpaceNode::initialize() { 1072 1073 if (!_rs.is_reserved()) { 1074 return false; 1075 } 1076 1077 // These are necessary restriction to make sure that the virtual space always 1078 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1079 // aligned only the middle alignment of the VirtualSpace is used. 1080 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 1081 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 1082 1083 // ReservedSpaces marked as special will have the entire memory 1084 // pre-committed. Setting a committed size will make sure that 1085 // committed_size and actual_committed_size agrees. 1086 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1087 1088 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1089 Metaspace::commit_alignment()); 1090 if (result) { 1091 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1092 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1093 1094 set_top((MetaWord*)virtual_space()->low()); 1095 set_reserved(MemRegion((HeapWord*)_rs.base(), 1096 (HeapWord*)(_rs.base() + _rs.size()))); 1097 1098 assert(reserved()->start() == (HeapWord*) _rs.base(), 1099 "Reserved start was not set properly " PTR_FORMAT 1100 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1101 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1102 "Reserved size was not set properly " SIZE_FORMAT 1103 " != " SIZE_FORMAT, reserved()->word_size(), 1104 _rs.size() / BytesPerWord); 1105 } 1106 1107 return result; 1108 } 1109 1110 void VirtualSpaceNode::print_on(outputStream* st) const { 1111 size_t used = used_words_in_vs(); 1112 size_t capacity = capacity_words_in_vs(); 1113 VirtualSpace* vs = virtual_space(); 1114 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1115 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1116 PTR_FORMAT ", " PTR_FORMAT ")", 1117 p2i(vs), capacity / K, 1118 capacity == 0 ? 0 : used * 100 / capacity, 1119 p2i(bottom()), p2i(top()), p2i(end()), 1120 p2i(vs->high_boundary())); 1121 } 1122 1123 #ifdef ASSERT 1124 void VirtualSpaceNode::mangle() { 1125 size_t word_size = capacity_words_in_vs(); 1126 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1127 } 1128 #endif // ASSERT 1129 1130 // VirtualSpaceList methods 1131 // Space allocated from the VirtualSpace 1132 1133 VirtualSpaceList::~VirtualSpaceList() { 1134 VirtualSpaceListIterator iter(virtual_space_list()); 1135 while (iter.repeat()) { 1136 VirtualSpaceNode* vsl = iter.get_next(); 1137 delete vsl; 1138 } 1139 } 1140 1141 void VirtualSpaceList::inc_reserved_words(size_t v) { 1142 assert_lock_strong(SpaceManager::expand_lock()); 1143 _reserved_words = _reserved_words + v; 1144 } 1145 void VirtualSpaceList::dec_reserved_words(size_t v) { 1146 assert_lock_strong(SpaceManager::expand_lock()); 1147 _reserved_words = _reserved_words - v; 1148 } 1149 1150 #define assert_committed_below_limit() \ 1151 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1152 "Too much committed memory. Committed: " SIZE_FORMAT \ 1153 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1154 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1155 1156 void VirtualSpaceList::inc_committed_words(size_t v) { 1157 assert_lock_strong(SpaceManager::expand_lock()); 1158 _committed_words = _committed_words + v; 1159 1160 assert_committed_below_limit(); 1161 } 1162 void VirtualSpaceList::dec_committed_words(size_t v) { 1163 assert_lock_strong(SpaceManager::expand_lock()); 1164 _committed_words = _committed_words - v; 1165 1166 assert_committed_below_limit(); 1167 } 1168 1169 void VirtualSpaceList::inc_virtual_space_count() { 1170 assert_lock_strong(SpaceManager::expand_lock()); 1171 _virtual_space_count++; 1172 } 1173 void VirtualSpaceList::dec_virtual_space_count() { 1174 assert_lock_strong(SpaceManager::expand_lock()); 1175 _virtual_space_count--; 1176 } 1177 1178 void ChunkManager::remove_chunk(Metachunk* chunk) { 1179 size_t word_size = chunk->word_size(); 1180 ChunkIndex index = list_index(word_size); 1181 if (index != HumongousIndex) { 1182 free_chunks(index)->remove_chunk(chunk); 1183 } else { 1184 humongous_dictionary()->remove_chunk(chunk); 1185 } 1186 1187 // Chunk is being removed from the chunks free list. 1188 dec_free_chunks_total(chunk->word_size()); 1189 } 1190 1191 // Walk the list of VirtualSpaceNodes and delete 1192 // nodes with a 0 container_count. Remove Metachunks in 1193 // the node from their respective freelists. 1194 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1195 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1196 assert_lock_strong(SpaceManager::expand_lock()); 1197 // Don't use a VirtualSpaceListIterator because this 1198 // list is being changed and a straightforward use of an iterator is not safe. 1199 VirtualSpaceNode* purged_vsl = NULL; 1200 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1201 VirtualSpaceNode* next_vsl = prev_vsl; 1202 while (next_vsl != NULL) { 1203 VirtualSpaceNode* vsl = next_vsl; 1204 DEBUG_ONLY(vsl->verify_container_count();) 1205 next_vsl = vsl->next(); 1206 // Don't free the current virtual space since it will likely 1207 // be needed soon. 1208 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1209 // Unlink it from the list 1210 if (prev_vsl == vsl) { 1211 // This is the case of the current node being the first node. 1212 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1213 set_virtual_space_list(vsl->next()); 1214 } else { 1215 prev_vsl->set_next(vsl->next()); 1216 } 1217 1218 vsl->purge(chunk_manager); 1219 dec_reserved_words(vsl->reserved_words()); 1220 dec_committed_words(vsl->committed_words()); 1221 dec_virtual_space_count(); 1222 purged_vsl = vsl; 1223 delete vsl; 1224 } else { 1225 prev_vsl = vsl; 1226 } 1227 } 1228 #ifdef ASSERT 1229 if (purged_vsl != NULL) { 1230 // List should be stable enough to use an iterator here. 1231 VirtualSpaceListIterator iter(virtual_space_list()); 1232 while (iter.repeat()) { 1233 VirtualSpaceNode* vsl = iter.get_next(); 1234 assert(vsl != purged_vsl, "Purge of vsl failed"); 1235 } 1236 } 1237 #endif 1238 } 1239 1240 1241 // This function looks at the mmap regions in the metaspace without locking. 1242 // The chunks are added with store ordering and not deleted except for at 1243 // unloading time during a safepoint. 1244 bool VirtualSpaceList::contains(const void* ptr) { 1245 // List should be stable enough to use an iterator here because removing virtual 1246 // space nodes is only allowed at a safepoint. 1247 VirtualSpaceListIterator iter(virtual_space_list()); 1248 while (iter.repeat()) { 1249 VirtualSpaceNode* vsn = iter.get_next(); 1250 if (vsn->contains(ptr)) { 1251 return true; 1252 } 1253 } 1254 return false; 1255 } 1256 1257 void VirtualSpaceList::retire_current_virtual_space() { 1258 assert_lock_strong(SpaceManager::expand_lock()); 1259 1260 VirtualSpaceNode* vsn = current_virtual_space(); 1261 1262 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1263 Metaspace::chunk_manager_metadata(); 1264 1265 vsn->retire(cm); 1266 } 1267 1268 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1269 DEBUG_ONLY(verify_container_count();) 1270 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1271 ChunkIndex index = (ChunkIndex)i; 1272 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1273 1274 while (free_words_in_vs() >= chunk_size) { 1275 Metachunk* chunk = get_chunk_vs(chunk_size); 1276 assert(chunk != NULL, "allocation should have been successful"); 1277 1278 chunk_manager->return_chunks(index, chunk); 1279 chunk_manager->inc_free_chunks_total(chunk_size); 1280 } 1281 DEBUG_ONLY(verify_container_count();) 1282 } 1283 assert(free_words_in_vs() == 0, "should be empty now"); 1284 } 1285 1286 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1287 _is_class(false), 1288 _virtual_space_list(NULL), 1289 _current_virtual_space(NULL), 1290 _reserved_words(0), 1291 _committed_words(0), 1292 _virtual_space_count(0) { 1293 MutexLockerEx cl(SpaceManager::expand_lock(), 1294 Mutex::_no_safepoint_check_flag); 1295 create_new_virtual_space(word_size); 1296 } 1297 1298 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1299 _is_class(true), 1300 _virtual_space_list(NULL), 1301 _current_virtual_space(NULL), 1302 _reserved_words(0), 1303 _committed_words(0), 1304 _virtual_space_count(0) { 1305 MutexLockerEx cl(SpaceManager::expand_lock(), 1306 Mutex::_no_safepoint_check_flag); 1307 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1308 bool succeeded = class_entry->initialize(); 1309 if (succeeded) { 1310 link_vs(class_entry); 1311 } 1312 } 1313 1314 size_t VirtualSpaceList::free_bytes() { 1315 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1316 } 1317 1318 // Allocate another meta virtual space and add it to the list. 1319 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1320 assert_lock_strong(SpaceManager::expand_lock()); 1321 1322 if (is_class()) { 1323 assert(false, "We currently don't support more than one VirtualSpace for" 1324 " the compressed class space. The initialization of the" 1325 " CCS uses another code path and should not hit this path."); 1326 return false; 1327 } 1328 1329 if (vs_word_size == 0) { 1330 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1331 return false; 1332 } 1333 1334 // Reserve the space 1335 size_t vs_byte_size = vs_word_size * BytesPerWord; 1336 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1337 1338 // Allocate the meta virtual space and initialize it. 1339 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1340 if (!new_entry->initialize()) { 1341 delete new_entry; 1342 return false; 1343 } else { 1344 assert(new_entry->reserved_words() == vs_word_size, 1345 "Reserved memory size differs from requested memory size"); 1346 // ensure lock-free iteration sees fully initialized node 1347 OrderAccess::storestore(); 1348 link_vs(new_entry); 1349 return true; 1350 } 1351 } 1352 1353 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1354 if (virtual_space_list() == NULL) { 1355 set_virtual_space_list(new_entry); 1356 } else { 1357 current_virtual_space()->set_next(new_entry); 1358 } 1359 set_current_virtual_space(new_entry); 1360 inc_reserved_words(new_entry->reserved_words()); 1361 inc_committed_words(new_entry->committed_words()); 1362 inc_virtual_space_count(); 1363 #ifdef ASSERT 1364 new_entry->mangle(); 1365 #endif 1366 if (log_is_enabled(Trace, gc, metaspace)) { 1367 Log(gc, metaspace) log; 1368 VirtualSpaceNode* vsl = current_virtual_space(); 1369 ResourceMark rm; 1370 vsl->print_on(log.trace_stream()); 1371 } 1372 } 1373 1374 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1375 size_t min_words, 1376 size_t preferred_words) { 1377 size_t before = node->committed_words(); 1378 1379 bool result = node->expand_by(min_words, preferred_words); 1380 1381 size_t after = node->committed_words(); 1382 1383 // after and before can be the same if the memory was pre-committed. 1384 assert(after >= before, "Inconsistency"); 1385 inc_committed_words(after - before); 1386 1387 return result; 1388 } 1389 1390 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1391 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1392 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1393 assert(min_words <= preferred_words, "Invalid arguments"); 1394 1395 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1396 return false; 1397 } 1398 1399 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1400 if (allowed_expansion_words < min_words) { 1401 return false; 1402 } 1403 1404 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1405 1406 // Commit more memory from the the current virtual space. 1407 bool vs_expanded = expand_node_by(current_virtual_space(), 1408 min_words, 1409 max_expansion_words); 1410 if (vs_expanded) { 1411 return true; 1412 } 1413 retire_current_virtual_space(); 1414 1415 // Get another virtual space. 1416 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1417 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1418 1419 if (create_new_virtual_space(grow_vs_words)) { 1420 if (current_virtual_space()->is_pre_committed()) { 1421 // The memory was pre-committed, so we are done here. 1422 assert(min_words <= current_virtual_space()->committed_words(), 1423 "The new VirtualSpace was pre-committed, so it" 1424 "should be large enough to fit the alloc request."); 1425 return true; 1426 } 1427 1428 return expand_node_by(current_virtual_space(), 1429 min_words, 1430 max_expansion_words); 1431 } 1432 1433 return false; 1434 } 1435 1436 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1437 size_t grow_chunks_by_words, 1438 size_t medium_chunk_bunch) { 1439 1440 // Allocate a chunk out of the current virtual space. 1441 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1442 1443 if (next != NULL) { 1444 return next; 1445 } 1446 1447 // The expand amount is currently only determined by the requested sizes 1448 // and not how much committed memory is left in the current virtual space. 1449 1450 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1451 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1452 if (min_word_size >= preferred_word_size) { 1453 // Can happen when humongous chunks are allocated. 1454 preferred_word_size = min_word_size; 1455 } 1456 1457 bool expanded = expand_by(min_word_size, preferred_word_size); 1458 if (expanded) { 1459 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1460 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1461 } 1462 1463 return next; 1464 } 1465 1466 void VirtualSpaceList::print_on(outputStream* st) const { 1467 VirtualSpaceListIterator iter(virtual_space_list()); 1468 while (iter.repeat()) { 1469 VirtualSpaceNode* node = iter.get_next(); 1470 node->print_on(st); 1471 } 1472 } 1473 1474 // MetaspaceGC methods 1475 1476 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1477 // Within the VM operation after the GC the attempt to allocate the metadata 1478 // should succeed. If the GC did not free enough space for the metaspace 1479 // allocation, the HWM is increased so that another virtualspace will be 1480 // allocated for the metadata. With perm gen the increase in the perm 1481 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1482 // metaspace policy uses those as the small and large steps for the HWM. 1483 // 1484 // After the GC the compute_new_size() for MetaspaceGC is called to 1485 // resize the capacity of the metaspaces. The current implementation 1486 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1487 // to resize the Java heap by some GC's. New flags can be implemented 1488 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1489 // free space is desirable in the metaspace capacity to decide how much 1490 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1491 // free space is desirable in the metaspace capacity before decreasing 1492 // the HWM. 1493 1494 // Calculate the amount to increase the high water mark (HWM). 1495 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1496 // another expansion is not requested too soon. If that is not 1497 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1498 // If that is still not enough, expand by the size of the allocation 1499 // plus some. 1500 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1501 size_t min_delta = MinMetaspaceExpansion; 1502 size_t max_delta = MaxMetaspaceExpansion; 1503 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1504 1505 if (delta <= min_delta) { 1506 delta = min_delta; 1507 } else if (delta <= max_delta) { 1508 // Don't want to hit the high water mark on the next 1509 // allocation so make the delta greater than just enough 1510 // for this allocation. 1511 delta = max_delta; 1512 } else { 1513 // This allocation is large but the next ones are probably not 1514 // so increase by the minimum. 1515 delta = delta + min_delta; 1516 } 1517 1518 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1519 1520 return delta; 1521 } 1522 1523 size_t MetaspaceGC::capacity_until_GC() { 1524 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1525 assert(value >= MetaspaceSize, "Not initialized properly?"); 1526 return value; 1527 } 1528 1529 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1530 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1531 1532 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1533 size_t new_value = capacity_until_GC + v; 1534 1535 if (new_value < capacity_until_GC) { 1536 // The addition wrapped around, set new_value to aligned max value. 1537 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1538 } 1539 1540 intptr_t expected = (intptr_t) capacity_until_GC; 1541 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1542 1543 if (expected != actual) { 1544 return false; 1545 } 1546 1547 if (new_cap_until_GC != NULL) { 1548 *new_cap_until_GC = new_value; 1549 } 1550 if (old_cap_until_GC != NULL) { 1551 *old_cap_until_GC = capacity_until_GC; 1552 } 1553 return true; 1554 } 1555 1556 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1557 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1558 1559 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1560 } 1561 1562 void MetaspaceGC::initialize() { 1563 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1564 // we can't do a GC during initialization. 1565 _capacity_until_GC = MaxMetaspaceSize; 1566 } 1567 1568 void MetaspaceGC::post_initialize() { 1569 // Reset the high-water mark once the VM initialization is done. 1570 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1571 } 1572 1573 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1574 // Check if the compressed class space is full. 1575 if (is_class && Metaspace::using_class_space()) { 1576 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1577 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1578 return false; 1579 } 1580 } 1581 1582 // Check if the user has imposed a limit on the metaspace memory. 1583 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1584 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1585 return false; 1586 } 1587 1588 return true; 1589 } 1590 1591 size_t MetaspaceGC::allowed_expansion() { 1592 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1593 size_t capacity_until_gc = capacity_until_GC(); 1594 1595 assert(capacity_until_gc >= committed_bytes, 1596 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1597 capacity_until_gc, committed_bytes); 1598 1599 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1600 size_t left_until_GC = capacity_until_gc - committed_bytes; 1601 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1602 1603 return left_to_commit / BytesPerWord; 1604 } 1605 1606 void MetaspaceGC::compute_new_size() { 1607 assert(_shrink_factor <= 100, "invalid shrink factor"); 1608 uint current_shrink_factor = _shrink_factor; 1609 _shrink_factor = 0; 1610 1611 // Using committed_bytes() for used_after_gc is an overestimation, since the 1612 // chunk free lists are included in committed_bytes() and the memory in an 1613 // un-fragmented chunk free list is available for future allocations. 1614 // However, if the chunk free lists becomes fragmented, then the memory may 1615 // not be available for future allocations and the memory is therefore "in use". 1616 // Including the chunk free lists in the definition of "in use" is therefore 1617 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1618 // shrink below committed_bytes() and this has caused serious bugs in the past. 1619 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1620 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1621 1622 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1623 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1624 1625 const double min_tmp = used_after_gc / maximum_used_percentage; 1626 size_t minimum_desired_capacity = 1627 (size_t)MIN2(min_tmp, double(max_uintx)); 1628 // Don't shrink less than the initial generation size 1629 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1630 MetaspaceSize); 1631 1632 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1633 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1634 minimum_free_percentage, maximum_used_percentage); 1635 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1636 1637 1638 size_t shrink_bytes = 0; 1639 if (capacity_until_GC < minimum_desired_capacity) { 1640 // If we have less capacity below the metaspace HWM, then 1641 // increment the HWM. 1642 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1643 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1644 // Don't expand unless it's significant 1645 if (expand_bytes >= MinMetaspaceExpansion) { 1646 size_t new_capacity_until_GC = 0; 1647 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1648 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1649 1650 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1651 new_capacity_until_GC, 1652 MetaspaceGCThresholdUpdater::ComputeNewSize); 1653 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1654 minimum_desired_capacity / (double) K, 1655 expand_bytes / (double) K, 1656 MinMetaspaceExpansion / (double) K, 1657 new_capacity_until_GC / (double) K); 1658 } 1659 return; 1660 } 1661 1662 // No expansion, now see if we want to shrink 1663 // We would never want to shrink more than this 1664 assert(capacity_until_GC >= minimum_desired_capacity, 1665 SIZE_FORMAT " >= " SIZE_FORMAT, 1666 capacity_until_GC, minimum_desired_capacity); 1667 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1668 1669 // Should shrinking be considered? 1670 if (MaxMetaspaceFreeRatio < 100) { 1671 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1672 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1673 const double max_tmp = used_after_gc / minimum_used_percentage; 1674 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1675 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1676 MetaspaceSize); 1677 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1678 maximum_free_percentage, minimum_used_percentage); 1679 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1680 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1681 1682 assert(minimum_desired_capacity <= maximum_desired_capacity, 1683 "sanity check"); 1684 1685 if (capacity_until_GC > maximum_desired_capacity) { 1686 // Capacity too large, compute shrinking size 1687 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1688 // We don't want shrink all the way back to initSize if people call 1689 // System.gc(), because some programs do that between "phases" and then 1690 // we'd just have to grow the heap up again for the next phase. So we 1691 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1692 // on the third call, and 100% by the fourth call. But if we recompute 1693 // size without shrinking, it goes back to 0%. 1694 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1695 1696 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1697 1698 assert(shrink_bytes <= max_shrink_bytes, 1699 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1700 shrink_bytes, max_shrink_bytes); 1701 if (current_shrink_factor == 0) { 1702 _shrink_factor = 10; 1703 } else { 1704 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1705 } 1706 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1707 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1708 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1709 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1710 } 1711 } 1712 1713 // Don't shrink unless it's significant 1714 if (shrink_bytes >= MinMetaspaceExpansion && 1715 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1716 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1717 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1718 new_capacity_until_GC, 1719 MetaspaceGCThresholdUpdater::ComputeNewSize); 1720 } 1721 } 1722 1723 // Metadebug methods 1724 1725 void Metadebug::init_allocation_fail_alot_count() { 1726 if (MetadataAllocationFailALot) { 1727 _allocation_fail_alot_count = 1728 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1729 } 1730 } 1731 1732 #ifdef ASSERT 1733 bool Metadebug::test_metadata_failure() { 1734 if (MetadataAllocationFailALot && 1735 Threads::is_vm_complete()) { 1736 if (_allocation_fail_alot_count > 0) { 1737 _allocation_fail_alot_count--; 1738 } else { 1739 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1740 init_allocation_fail_alot_count(); 1741 return true; 1742 } 1743 } 1744 return false; 1745 } 1746 #endif 1747 1748 // ChunkManager methods 1749 1750 size_t ChunkManager::free_chunks_total_words() { 1751 return _free_chunks_total; 1752 } 1753 1754 size_t ChunkManager::free_chunks_total_bytes() { 1755 return free_chunks_total_words() * BytesPerWord; 1756 } 1757 1758 size_t ChunkManager::free_chunks_count() { 1759 #ifdef ASSERT 1760 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1761 MutexLockerEx cl(SpaceManager::expand_lock(), 1762 Mutex::_no_safepoint_check_flag); 1763 // This lock is only needed in debug because the verification 1764 // of the _free_chunks_totals walks the list of free chunks 1765 slow_locked_verify_free_chunks_count(); 1766 } 1767 #endif 1768 return _free_chunks_count; 1769 } 1770 1771 void ChunkManager::locked_verify_free_chunks_total() { 1772 assert_lock_strong(SpaceManager::expand_lock()); 1773 assert(sum_free_chunks() == _free_chunks_total, 1774 "_free_chunks_total " SIZE_FORMAT " is not the" 1775 " same as sum " SIZE_FORMAT, _free_chunks_total, 1776 sum_free_chunks()); 1777 } 1778 1779 void ChunkManager::verify_free_chunks_total() { 1780 MutexLockerEx cl(SpaceManager::expand_lock(), 1781 Mutex::_no_safepoint_check_flag); 1782 locked_verify_free_chunks_total(); 1783 } 1784 1785 void ChunkManager::locked_verify_free_chunks_count() { 1786 assert_lock_strong(SpaceManager::expand_lock()); 1787 assert(sum_free_chunks_count() == _free_chunks_count, 1788 "_free_chunks_count " SIZE_FORMAT " is not the" 1789 " same as sum " SIZE_FORMAT, _free_chunks_count, 1790 sum_free_chunks_count()); 1791 } 1792 1793 void ChunkManager::verify_free_chunks_count() { 1794 #ifdef ASSERT 1795 MutexLockerEx cl(SpaceManager::expand_lock(), 1796 Mutex::_no_safepoint_check_flag); 1797 locked_verify_free_chunks_count(); 1798 #endif 1799 } 1800 1801 void ChunkManager::verify() { 1802 MutexLockerEx cl(SpaceManager::expand_lock(), 1803 Mutex::_no_safepoint_check_flag); 1804 locked_verify(); 1805 } 1806 1807 void ChunkManager::locked_verify() { 1808 locked_verify_free_chunks_count(); 1809 locked_verify_free_chunks_total(); 1810 } 1811 1812 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1813 assert_lock_strong(SpaceManager::expand_lock()); 1814 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1815 _free_chunks_total, _free_chunks_count); 1816 } 1817 1818 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1819 assert_lock_strong(SpaceManager::expand_lock()); 1820 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1821 sum_free_chunks(), sum_free_chunks_count()); 1822 } 1823 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1824 return &_free_chunks[index]; 1825 } 1826 1827 // These methods that sum the free chunk lists are used in printing 1828 // methods that are used in product builds. 1829 size_t ChunkManager::sum_free_chunks() { 1830 assert_lock_strong(SpaceManager::expand_lock()); 1831 size_t result = 0; 1832 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1833 ChunkList* list = free_chunks(i); 1834 1835 if (list == NULL) { 1836 continue; 1837 } 1838 1839 result = result + list->count() * list->size(); 1840 } 1841 result = result + humongous_dictionary()->total_size(); 1842 return result; 1843 } 1844 1845 size_t ChunkManager::sum_free_chunks_count() { 1846 assert_lock_strong(SpaceManager::expand_lock()); 1847 size_t count = 0; 1848 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1849 ChunkList* list = free_chunks(i); 1850 if (list == NULL) { 1851 continue; 1852 } 1853 count = count + list->count(); 1854 } 1855 count = count + humongous_dictionary()->total_free_blocks(); 1856 return count; 1857 } 1858 1859 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1860 ChunkIndex index = list_index(word_size); 1861 assert(index < HumongousIndex, "No humongous list"); 1862 return free_chunks(index); 1863 } 1864 1865 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1866 assert_lock_strong(SpaceManager::expand_lock()); 1867 1868 slow_locked_verify(); 1869 1870 Metachunk* chunk = NULL; 1871 if (list_index(word_size) != HumongousIndex) { 1872 ChunkList* free_list = find_free_chunks_list(word_size); 1873 assert(free_list != NULL, "Sanity check"); 1874 1875 chunk = free_list->head(); 1876 1877 if (chunk == NULL) { 1878 return NULL; 1879 } 1880 1881 // Remove the chunk as the head of the list. 1882 free_list->remove_chunk(chunk); 1883 1884 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1885 p2i(free_list), p2i(chunk), chunk->word_size()); 1886 } else { 1887 chunk = humongous_dictionary()->get_chunk( 1888 word_size, 1889 FreeBlockDictionary<Metachunk>::atLeast); 1890 1891 if (chunk == NULL) { 1892 return NULL; 1893 } 1894 1895 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1896 chunk->word_size(), word_size, chunk->word_size() - word_size); 1897 } 1898 1899 // Chunk is being removed from the chunks free list. 1900 dec_free_chunks_total(chunk->word_size()); 1901 1902 // Remove it from the links to this freelist 1903 chunk->set_next(NULL); 1904 chunk->set_prev(NULL); 1905 #ifdef ASSERT 1906 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1907 // work. 1908 chunk->set_is_tagged_free(false); 1909 #endif 1910 chunk->container()->inc_container_count(); 1911 1912 slow_locked_verify(); 1913 return chunk; 1914 } 1915 1916 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1917 assert_lock_strong(SpaceManager::expand_lock()); 1918 slow_locked_verify(); 1919 1920 // Take from the beginning of the list 1921 Metachunk* chunk = free_chunks_get(word_size); 1922 if (chunk == NULL) { 1923 return NULL; 1924 } 1925 1926 assert((word_size <= chunk->word_size()) || 1927 list_index(chunk->word_size() == HumongousIndex), 1928 "Non-humongous variable sized chunk"); 1929 Log(gc, metaspace, freelist) log; 1930 if (log.is_debug()) { 1931 size_t list_count; 1932 if (list_index(word_size) < HumongousIndex) { 1933 ChunkList* list = find_free_chunks_list(word_size); 1934 list_count = list->count(); 1935 } else { 1936 list_count = humongous_dictionary()->total_count(); 1937 } 1938 log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1939 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1940 ResourceMark rm; 1941 locked_print_free_chunks(log.debug_stream()); 1942 } 1943 1944 return chunk; 1945 } 1946 1947 void ChunkManager::print_on(outputStream* out) const { 1948 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 1949 } 1950 1951 // SpaceManager methods 1952 1953 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1954 size_t* chunk_word_size, 1955 size_t* class_chunk_word_size) { 1956 switch (type) { 1957 case Metaspace::BootMetaspaceType: 1958 *chunk_word_size = Metaspace::first_chunk_word_size(); 1959 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1960 break; 1961 case Metaspace::ROMetaspaceType: 1962 *chunk_word_size = SharedReadOnlySize / wordSize; 1963 *class_chunk_word_size = ClassSpecializedChunk; 1964 break; 1965 case Metaspace::ReadWriteMetaspaceType: 1966 *chunk_word_size = SharedReadWriteSize / wordSize; 1967 *class_chunk_word_size = ClassSpecializedChunk; 1968 break; 1969 case Metaspace::AnonymousMetaspaceType: 1970 case Metaspace::ReflectionMetaspaceType: 1971 *chunk_word_size = SpecializedChunk; 1972 *class_chunk_word_size = ClassSpecializedChunk; 1973 break; 1974 default: 1975 *chunk_word_size = SmallChunk; 1976 *class_chunk_word_size = ClassSmallChunk; 1977 break; 1978 } 1979 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1980 "Initial chunks sizes bad: data " SIZE_FORMAT 1981 " class " SIZE_FORMAT, 1982 *chunk_word_size, *class_chunk_word_size); 1983 } 1984 1985 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1986 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1987 size_t free = 0; 1988 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1989 Metachunk* chunk = chunks_in_use(i); 1990 while (chunk != NULL) { 1991 free += chunk->free_word_size(); 1992 chunk = chunk->next(); 1993 } 1994 } 1995 return free; 1996 } 1997 1998 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1999 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2000 size_t result = 0; 2001 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2002 result += sum_waste_in_chunks_in_use(i); 2003 } 2004 2005 return result; 2006 } 2007 2008 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 2009 size_t result = 0; 2010 Metachunk* chunk = chunks_in_use(index); 2011 // Count the free space in all the chunk but not the 2012 // current chunk from which allocations are still being done. 2013 while (chunk != NULL) { 2014 if (chunk != current_chunk()) { 2015 result += chunk->free_word_size(); 2016 } 2017 chunk = chunk->next(); 2018 } 2019 return result; 2020 } 2021 2022 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 2023 // For CMS use "allocated_chunks_words()" which does not need the 2024 // Metaspace lock. For the other collectors sum over the 2025 // lists. Use both methods as a check that "allocated_chunks_words()" 2026 // is correct. That is, sum_capacity_in_chunks() is too expensive 2027 // to use in the product and allocated_chunks_words() should be used 2028 // but allow for checking that allocated_chunks_words() returns the same 2029 // value as sum_capacity_in_chunks_in_use() which is the definitive 2030 // answer. 2031 if (UseConcMarkSweepGC) { 2032 return allocated_chunks_words(); 2033 } else { 2034 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2035 size_t sum = 0; 2036 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2037 Metachunk* chunk = chunks_in_use(i); 2038 while (chunk != NULL) { 2039 sum += chunk->word_size(); 2040 chunk = chunk->next(); 2041 } 2042 } 2043 return sum; 2044 } 2045 } 2046 2047 size_t SpaceManager::sum_count_in_chunks_in_use() { 2048 size_t count = 0; 2049 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2050 count = count + sum_count_in_chunks_in_use(i); 2051 } 2052 2053 return count; 2054 } 2055 2056 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2057 size_t count = 0; 2058 Metachunk* chunk = chunks_in_use(i); 2059 while (chunk != NULL) { 2060 count++; 2061 chunk = chunk->next(); 2062 } 2063 return count; 2064 } 2065 2066 2067 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2068 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2069 size_t used = 0; 2070 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2071 Metachunk* chunk = chunks_in_use(i); 2072 while (chunk != NULL) { 2073 used += chunk->used_word_size(); 2074 chunk = chunk->next(); 2075 } 2076 } 2077 return used; 2078 } 2079 2080 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2081 2082 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2083 Metachunk* chunk = chunks_in_use(i); 2084 st->print("SpaceManager: %s " PTR_FORMAT, 2085 chunk_size_name(i), p2i(chunk)); 2086 if (chunk != NULL) { 2087 st->print_cr(" free " SIZE_FORMAT, 2088 chunk->free_word_size()); 2089 } else { 2090 st->cr(); 2091 } 2092 } 2093 2094 chunk_manager()->locked_print_free_chunks(st); 2095 chunk_manager()->locked_print_sum_free_chunks(st); 2096 } 2097 2098 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2099 2100 // Decide between a small chunk and a medium chunk. Up to 2101 // _small_chunk_limit small chunks can be allocated. 2102 // After that a medium chunk is preferred. 2103 size_t chunk_word_size; 2104 if (chunks_in_use(MediumIndex) == NULL && 2105 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2106 chunk_word_size = (size_t) small_chunk_size(); 2107 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2108 chunk_word_size = medium_chunk_size(); 2109 } 2110 } else { 2111 chunk_word_size = medium_chunk_size(); 2112 } 2113 2114 // Might still need a humongous chunk. Enforce 2115 // humongous allocations sizes to be aligned up to 2116 // the smallest chunk size. 2117 size_t if_humongous_sized_chunk = 2118 align_size_up(word_size + Metachunk::overhead(), 2119 smallest_chunk_size()); 2120 chunk_word_size = 2121 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2122 2123 assert(!SpaceManager::is_humongous(word_size) || 2124 chunk_word_size == if_humongous_sized_chunk, 2125 "Size calculation is wrong, word_size " SIZE_FORMAT 2126 " chunk_word_size " SIZE_FORMAT, 2127 word_size, chunk_word_size); 2128 Log(gc, metaspace, alloc) log; 2129 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2130 log.debug("Metadata humongous allocation:"); 2131 log.debug(" word_size " PTR_FORMAT, word_size); 2132 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2133 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2134 } 2135 return chunk_word_size; 2136 } 2137 2138 void SpaceManager::track_metaspace_memory_usage() { 2139 if (is_init_completed()) { 2140 if (is_class()) { 2141 MemoryService::track_compressed_class_memory_usage(); 2142 } 2143 MemoryService::track_metaspace_memory_usage(); 2144 } 2145 } 2146 2147 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2148 assert(vs_list()->current_virtual_space() != NULL, 2149 "Should have been set"); 2150 assert(current_chunk() == NULL || 2151 current_chunk()->allocate(word_size) == NULL, 2152 "Don't need to expand"); 2153 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2154 2155 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2156 size_t words_left = 0; 2157 size_t words_used = 0; 2158 if (current_chunk() != NULL) { 2159 words_left = current_chunk()->free_word_size(); 2160 words_used = current_chunk()->used_word_size(); 2161 } 2162 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2163 word_size, words_used, words_left); 2164 } 2165 2166 // Get another chunk 2167 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2168 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2169 2170 MetaWord* mem = NULL; 2171 2172 // If a chunk was available, add it to the in-use chunk list 2173 // and do an allocation from it. 2174 if (next != NULL) { 2175 // Add to this manager's list of chunks in use. 2176 add_chunk(next, false); 2177 mem = next->allocate(word_size); 2178 } 2179 2180 // Track metaspace memory usage statistic. 2181 track_metaspace_memory_usage(); 2182 2183 return mem; 2184 } 2185 2186 void SpaceManager::print_on(outputStream* st) const { 2187 2188 for (ChunkIndex i = ZeroIndex; 2189 i < NumberOfInUseLists ; 2190 i = next_chunk_index(i) ) { 2191 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2192 p2i(chunks_in_use(i)), 2193 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2194 } 2195 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2196 " Humongous " SIZE_FORMAT, 2197 sum_waste_in_chunks_in_use(SmallIndex), 2198 sum_waste_in_chunks_in_use(MediumIndex), 2199 sum_waste_in_chunks_in_use(HumongousIndex)); 2200 // block free lists 2201 if (block_freelists() != NULL) { 2202 st->print_cr("total in block free lists " SIZE_FORMAT, 2203 block_freelists()->total_size()); 2204 } 2205 } 2206 2207 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2208 Mutex* lock) : 2209 _mdtype(mdtype), 2210 _allocated_blocks_words(0), 2211 _allocated_chunks_words(0), 2212 _allocated_chunks_count(0), 2213 _block_freelists(NULL), 2214 _lock(lock) 2215 { 2216 initialize(); 2217 } 2218 2219 void SpaceManager::inc_size_metrics(size_t words) { 2220 assert_lock_strong(SpaceManager::expand_lock()); 2221 // Total of allocated Metachunks and allocated Metachunks count 2222 // for each SpaceManager 2223 _allocated_chunks_words = _allocated_chunks_words + words; 2224 _allocated_chunks_count++; 2225 // Global total of capacity in allocated Metachunks 2226 MetaspaceAux::inc_capacity(mdtype(), words); 2227 // Global total of allocated Metablocks. 2228 // used_words_slow() includes the overhead in each 2229 // Metachunk so include it in the used when the 2230 // Metachunk is first added (so only added once per 2231 // Metachunk). 2232 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2233 } 2234 2235 void SpaceManager::inc_used_metrics(size_t words) { 2236 // Add to the per SpaceManager total 2237 Atomic::add_ptr(words, &_allocated_blocks_words); 2238 // Add to the global total 2239 MetaspaceAux::inc_used(mdtype(), words); 2240 } 2241 2242 void SpaceManager::dec_total_from_size_metrics() { 2243 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2244 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2245 // Also deduct the overhead per Metachunk 2246 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2247 } 2248 2249 void SpaceManager::initialize() { 2250 Metadebug::init_allocation_fail_alot_count(); 2251 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2252 _chunks_in_use[i] = NULL; 2253 } 2254 _current_chunk = NULL; 2255 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2256 } 2257 2258 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2259 if (chunks == NULL) { 2260 return; 2261 } 2262 ChunkList* list = free_chunks(index); 2263 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2264 assert_lock_strong(SpaceManager::expand_lock()); 2265 Metachunk* cur = chunks; 2266 2267 // This returns chunks one at a time. If a new 2268 // class List can be created that is a base class 2269 // of FreeList then something like FreeList::prepend() 2270 // can be used in place of this loop 2271 while (cur != NULL) { 2272 assert(cur->container() != NULL, "Container should have been set"); 2273 cur->container()->dec_container_count(); 2274 // Capture the next link before it is changed 2275 // by the call to return_chunk_at_head(); 2276 Metachunk* next = cur->next(); 2277 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2278 NOT_PRODUCT(cur->mangle(badMetaWordVal);) 2279 list->return_chunk_at_head(cur); 2280 cur = next; 2281 } 2282 } 2283 2284 SpaceManager::~SpaceManager() { 2285 // This call this->_lock which can't be done while holding expand_lock() 2286 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2287 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2288 " allocated_chunks_words() " SIZE_FORMAT, 2289 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2290 2291 MutexLockerEx fcl(SpaceManager::expand_lock(), 2292 Mutex::_no_safepoint_check_flag); 2293 2294 chunk_manager()->slow_locked_verify(); 2295 2296 dec_total_from_size_metrics(); 2297 2298 Log(gc, metaspace, freelist) log; 2299 if (log.is_trace()) { 2300 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2301 ResourceMark rm; 2302 locked_print_chunks_in_use_on(log.trace_stream()); 2303 if (block_freelists() != NULL) { 2304 block_freelists()->print_on(log.trace_stream()); 2305 } 2306 } 2307 2308 // Have to update before the chunks_in_use lists are emptied 2309 // below. 2310 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2311 sum_count_in_chunks_in_use()); 2312 2313 // Add all the chunks in use by this space manager 2314 // to the global list of free chunks. 2315 2316 // Follow each list of chunks-in-use and add them to the 2317 // free lists. Each list is NULL terminated. 2318 2319 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2320 log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i)); 2321 Metachunk* chunks = chunks_in_use(i); 2322 chunk_manager()->return_chunks(i, chunks); 2323 set_chunks_in_use(i, NULL); 2324 log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i)); 2325 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2326 } 2327 2328 // The medium chunk case may be optimized by passing the head and 2329 // tail of the medium chunk list to add_at_head(). The tail is often 2330 // the current chunk but there are probably exceptions. 2331 2332 // Humongous chunks 2333 log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary", 2334 sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex)); 2335 log.trace("Humongous chunk dictionary: "); 2336 // Humongous chunks are never the current chunk. 2337 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2338 2339 while (humongous_chunks != NULL) { 2340 DEBUG_ONLY(humongous_chunks->set_is_tagged_free(true);) 2341 NOT_PRODUCT(humongous_chunks->mangle(badMetaWordVal);) 2342 log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size()); 2343 assert(humongous_chunks->word_size() == (size_t) 2344 align_size_up(humongous_chunks->word_size(), 2345 smallest_chunk_size()), 2346 "Humongous chunk size is wrong: word size " SIZE_FORMAT 2347 " granularity " SIZE_FORMAT, 2348 humongous_chunks->word_size(), smallest_chunk_size()); 2349 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2350 humongous_chunks->container()->dec_container_count(); 2351 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2352 humongous_chunks = next_humongous_chunks; 2353 } 2354 log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); 2355 chunk_manager()->slow_locked_verify(); 2356 2357 if (_block_freelists != NULL) { 2358 delete _block_freelists; 2359 } 2360 } 2361 2362 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2363 switch (index) { 2364 case SpecializedIndex: 2365 return "Specialized"; 2366 case SmallIndex: 2367 return "Small"; 2368 case MediumIndex: 2369 return "Medium"; 2370 case HumongousIndex: 2371 return "Humongous"; 2372 default: 2373 return NULL; 2374 } 2375 } 2376 2377 size_t ChunkManager::list_chunk_size(ChunkIndex index) const { 2378 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 2379 "Bad index: %d", (int)index); 2380 2381 return _free_chunks[index].size(); 2382 } 2383 2384 ChunkIndex ChunkManager::list_index(size_t size) const { 2385 if (list_chunk_size(SpecializedIndex) == size) { 2386 return SpecializedIndex; 2387 } 2388 if (list_chunk_size(SmallIndex) == size) { 2389 return SmallIndex; 2390 } 2391 if (list_chunk_size(MediumIndex) == size) { 2392 return MediumIndex; 2393 } 2394 2395 assert(size > list_chunk_size(MediumIndex), "Not a humongous chunk"); 2396 return HumongousIndex; 2397 } 2398 2399 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2400 assert_lock_strong(_lock); 2401 // Allocations and deallocations are in raw_word_size 2402 size_t raw_word_size = get_allocation_word_size(word_size); 2403 // Lazily create a block_freelist 2404 if (block_freelists() == NULL) { 2405 _block_freelists = new BlockFreelist(); 2406 } 2407 block_freelists()->return_block(p, raw_word_size); 2408 } 2409 2410 // Adds a chunk to the list of chunks in use. 2411 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2412 2413 assert(new_chunk != NULL, "Should not be NULL"); 2414 assert(new_chunk->next() == NULL, "Should not be on a list"); 2415 2416 new_chunk->reset_empty(); 2417 2418 // Find the correct list and and set the current 2419 // chunk for that list. 2420 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2421 2422 if (index != HumongousIndex) { 2423 retire_current_chunk(); 2424 set_current_chunk(new_chunk); 2425 new_chunk->set_next(chunks_in_use(index)); 2426 set_chunks_in_use(index, new_chunk); 2427 } else { 2428 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2429 // small, so small will be null. Link this first chunk as the current 2430 // chunk. 2431 if (make_current) { 2432 // Set as the current chunk but otherwise treat as a humongous chunk. 2433 set_current_chunk(new_chunk); 2434 } 2435 // Link at head. The _current_chunk only points to a humongous chunk for 2436 // the null class loader metaspace (class and data virtual space managers) 2437 // any humongous chunks so will not point to the tail 2438 // of the humongous chunks list. 2439 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2440 set_chunks_in_use(HumongousIndex, new_chunk); 2441 2442 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2443 } 2444 2445 // Add to the running sum of capacity 2446 inc_size_metrics(new_chunk->word_size()); 2447 2448 assert(new_chunk->is_empty(), "Not ready for reuse"); 2449 Log(gc, metaspace, freelist) log; 2450 if (log.is_trace()) { 2451 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2452 ResourceMark rm; 2453 outputStream* out = log.trace_stream(); 2454 new_chunk->print_on(out); 2455 chunk_manager()->locked_print_free_chunks(out); 2456 } 2457 } 2458 2459 void SpaceManager::retire_current_chunk() { 2460 if (current_chunk() != NULL) { 2461 size_t remaining_words = current_chunk()->free_word_size(); 2462 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2463 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2464 deallocate(ptr, remaining_words); 2465 inc_used_metrics(remaining_words); 2466 } 2467 } 2468 } 2469 2470 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2471 size_t grow_chunks_by_words) { 2472 // Get a chunk from the chunk freelist 2473 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2474 2475 if (next == NULL) { 2476 next = vs_list()->get_new_chunk(word_size, 2477 grow_chunks_by_words, 2478 medium_chunk_bunch()); 2479 } 2480 2481 Log(gc, metaspace, alloc) log; 2482 if (log.is_debug() && next != NULL && 2483 SpaceManager::is_humongous(next->word_size())) { 2484 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2485 } 2486 2487 return next; 2488 } 2489 2490 /* 2491 * The policy is to allocate up to _small_chunk_limit small chunks 2492 * after which only medium chunks are allocated. This is done to 2493 * reduce fragmentation. In some cases, this can result in a lot 2494 * of small chunks being allocated to the point where it's not 2495 * possible to expand. If this happens, there may be no medium chunks 2496 * available and OOME would be thrown. Instead of doing that, 2497 * if the allocation request size fits in a small chunk, an attempt 2498 * will be made to allocate a small chunk. 2499 */ 2500 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2501 size_t raw_word_size = get_allocation_word_size(word_size); 2502 2503 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2504 return NULL; 2505 } 2506 2507 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2508 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2509 2510 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2511 2512 MetaWord* mem = NULL; 2513 2514 if (chunk != NULL) { 2515 // Add chunk to the in-use chunk list and do an allocation from it. 2516 // Add to this manager's list of chunks in use. 2517 add_chunk(chunk, false); 2518 mem = chunk->allocate(raw_word_size); 2519 2520 inc_used_metrics(raw_word_size); 2521 2522 // Track metaspace memory usage statistic. 2523 track_metaspace_memory_usage(); 2524 } 2525 2526 return mem; 2527 } 2528 2529 MetaWord* SpaceManager::allocate(size_t word_size) { 2530 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2531 size_t raw_word_size = get_allocation_word_size(word_size); 2532 BlockFreelist* fl = block_freelists(); 2533 MetaWord* p = NULL; 2534 // Allocation from the dictionary is expensive in the sense that 2535 // the dictionary has to be searched for a size. Don't allocate 2536 // from the dictionary until it starts to get fat. Is this 2537 // a reasonable policy? Maybe an skinny dictionary is fast enough 2538 // for allocations. Do some profiling. JJJ 2539 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2540 p = fl->get_block(raw_word_size); 2541 } 2542 if (p == NULL) { 2543 p = allocate_work(raw_word_size); 2544 } 2545 2546 return p; 2547 } 2548 2549 // Returns the address of spaced allocated for "word_size". 2550 // This methods does not know about blocks (Metablocks) 2551 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2552 assert_lock_strong(_lock); 2553 #ifdef ASSERT 2554 if (Metadebug::test_metadata_failure()) { 2555 return NULL; 2556 } 2557 #endif 2558 // Is there space in the current chunk? 2559 MetaWord* result = NULL; 2560 2561 // For DumpSharedSpaces, only allocate out of the current chunk which is 2562 // never null because we gave it the size we wanted. Caller reports out 2563 // of memory if this returns null. 2564 if (DumpSharedSpaces) { 2565 assert(current_chunk() != NULL, "should never happen"); 2566 inc_used_metrics(word_size); 2567 return current_chunk()->allocate(word_size); // caller handles null result 2568 } 2569 2570 if (current_chunk() != NULL) { 2571 result = current_chunk()->allocate(word_size); 2572 } 2573 2574 if (result == NULL) { 2575 result = grow_and_allocate(word_size); 2576 } 2577 2578 if (result != NULL) { 2579 inc_used_metrics(word_size); 2580 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2581 "Head of the list is being allocated"); 2582 } 2583 2584 return result; 2585 } 2586 2587 void SpaceManager::verify() { 2588 // If there are blocks in the dictionary, then 2589 // verification of chunks does not work since 2590 // being in the dictionary alters a chunk. 2591 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2592 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2593 Metachunk* curr = chunks_in_use(i); 2594 while (curr != NULL) { 2595 curr->verify(); 2596 verify_chunk_size(curr); 2597 curr = curr->next(); 2598 } 2599 } 2600 } 2601 } 2602 2603 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2604 assert(is_humongous(chunk->word_size()) || 2605 chunk->word_size() == medium_chunk_size() || 2606 chunk->word_size() == small_chunk_size() || 2607 chunk->word_size() == specialized_chunk_size(), 2608 "Chunk size is wrong"); 2609 return; 2610 } 2611 2612 #ifdef ASSERT 2613 void SpaceManager::verify_allocated_blocks_words() { 2614 // Verification is only guaranteed at a safepoint. 2615 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2616 "Verification can fail if the applications is running"); 2617 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2618 "allocation total is not consistent " SIZE_FORMAT 2619 " vs " SIZE_FORMAT, 2620 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2621 } 2622 2623 #endif 2624 2625 void SpaceManager::dump(outputStream* const out) const { 2626 size_t curr_total = 0; 2627 size_t waste = 0; 2628 uint i = 0; 2629 size_t used = 0; 2630 size_t capacity = 0; 2631 2632 // Add up statistics for all chunks in this SpaceManager. 2633 for (ChunkIndex index = ZeroIndex; 2634 index < NumberOfInUseLists; 2635 index = next_chunk_index(index)) { 2636 for (Metachunk* curr = chunks_in_use(index); 2637 curr != NULL; 2638 curr = curr->next()) { 2639 out->print("%d) ", i++); 2640 curr->print_on(out); 2641 curr_total += curr->word_size(); 2642 used += curr->used_word_size(); 2643 capacity += curr->word_size(); 2644 waste += curr->free_word_size() + curr->overhead();; 2645 } 2646 } 2647 2648 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2649 if (block_freelists() != NULL) block_freelists()->print_on(out); 2650 } 2651 2652 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2653 // Free space isn't wasted. 2654 waste -= free; 2655 2656 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2657 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2658 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2659 } 2660 2661 // MetaspaceAux 2662 2663 2664 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2665 size_t MetaspaceAux::_used_words[] = {0, 0}; 2666 2667 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2668 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2669 return list == NULL ? 0 : list->free_bytes(); 2670 } 2671 2672 size_t MetaspaceAux::free_bytes() { 2673 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2674 } 2675 2676 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2677 assert_lock_strong(SpaceManager::expand_lock()); 2678 assert(words <= capacity_words(mdtype), 2679 "About to decrement below 0: words " SIZE_FORMAT 2680 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2681 words, mdtype, capacity_words(mdtype)); 2682 _capacity_words[mdtype] -= words; 2683 } 2684 2685 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2686 assert_lock_strong(SpaceManager::expand_lock()); 2687 // Needs to be atomic 2688 _capacity_words[mdtype] += words; 2689 } 2690 2691 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2692 assert(words <= used_words(mdtype), 2693 "About to decrement below 0: words " SIZE_FORMAT 2694 " is greater than _used_words[%u] " SIZE_FORMAT, 2695 words, mdtype, used_words(mdtype)); 2696 // For CMS deallocation of the Metaspaces occurs during the 2697 // sweep which is a concurrent phase. Protection by the expand_lock() 2698 // is not enough since allocation is on a per Metaspace basis 2699 // and protected by the Metaspace lock. 2700 jlong minus_words = (jlong) - (jlong) words; 2701 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2702 } 2703 2704 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2705 // _used_words tracks allocations for 2706 // each piece of metadata. Those allocations are 2707 // generally done concurrently by different application 2708 // threads so must be done atomically. 2709 Atomic::add_ptr(words, &_used_words[mdtype]); 2710 } 2711 2712 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2713 size_t used = 0; 2714 ClassLoaderDataGraphMetaspaceIterator iter; 2715 while (iter.repeat()) { 2716 Metaspace* msp = iter.get_next(); 2717 // Sum allocated_blocks_words for each metaspace 2718 if (msp != NULL) { 2719 used += msp->used_words_slow(mdtype); 2720 } 2721 } 2722 return used * BytesPerWord; 2723 } 2724 2725 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2726 size_t free = 0; 2727 ClassLoaderDataGraphMetaspaceIterator iter; 2728 while (iter.repeat()) { 2729 Metaspace* msp = iter.get_next(); 2730 if (msp != NULL) { 2731 free += msp->free_words_slow(mdtype); 2732 } 2733 } 2734 return free * BytesPerWord; 2735 } 2736 2737 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2738 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2739 return 0; 2740 } 2741 // Don't count the space in the freelists. That space will be 2742 // added to the capacity calculation as needed. 2743 size_t capacity = 0; 2744 ClassLoaderDataGraphMetaspaceIterator iter; 2745 while (iter.repeat()) { 2746 Metaspace* msp = iter.get_next(); 2747 if (msp != NULL) { 2748 capacity += msp->capacity_words_slow(mdtype); 2749 } 2750 } 2751 return capacity * BytesPerWord; 2752 } 2753 2754 size_t MetaspaceAux::capacity_bytes_slow() { 2755 #ifdef PRODUCT 2756 // Use capacity_bytes() in PRODUCT instead of this function. 2757 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2758 #endif 2759 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2760 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2761 assert(capacity_bytes() == class_capacity + non_class_capacity, 2762 "bad accounting: capacity_bytes() " SIZE_FORMAT 2763 " class_capacity + non_class_capacity " SIZE_FORMAT 2764 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2765 capacity_bytes(), class_capacity + non_class_capacity, 2766 class_capacity, non_class_capacity); 2767 2768 return class_capacity + non_class_capacity; 2769 } 2770 2771 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2772 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2773 return list == NULL ? 0 : list->reserved_bytes(); 2774 } 2775 2776 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2777 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2778 return list == NULL ? 0 : list->committed_bytes(); 2779 } 2780 2781 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2782 2783 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2784 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2785 if (chunk_manager == NULL) { 2786 return 0; 2787 } 2788 chunk_manager->slow_verify(); 2789 return chunk_manager->free_chunks_total_words(); 2790 } 2791 2792 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2793 return free_chunks_total_words(mdtype) * BytesPerWord; 2794 } 2795 2796 size_t MetaspaceAux::free_chunks_total_words() { 2797 return free_chunks_total_words(Metaspace::ClassType) + 2798 free_chunks_total_words(Metaspace::NonClassType); 2799 } 2800 2801 size_t MetaspaceAux::free_chunks_total_bytes() { 2802 return free_chunks_total_words() * BytesPerWord; 2803 } 2804 2805 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2806 return Metaspace::get_chunk_manager(mdtype) != NULL; 2807 } 2808 2809 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2810 if (!has_chunk_free_list(mdtype)) { 2811 return MetaspaceChunkFreeListSummary(); 2812 } 2813 2814 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2815 return cm->chunk_free_list_summary(); 2816 } 2817 2818 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2819 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2820 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2821 } 2822 2823 void MetaspaceAux::print_on(outputStream* out) { 2824 Metaspace::MetadataType nct = Metaspace::NonClassType; 2825 2826 out->print_cr(" Metaspace " 2827 "used " SIZE_FORMAT "K, " 2828 "capacity " SIZE_FORMAT "K, " 2829 "committed " SIZE_FORMAT "K, " 2830 "reserved " SIZE_FORMAT "K", 2831 used_bytes()/K, 2832 capacity_bytes()/K, 2833 committed_bytes()/K, 2834 reserved_bytes()/K); 2835 2836 if (Metaspace::using_class_space()) { 2837 Metaspace::MetadataType ct = Metaspace::ClassType; 2838 out->print_cr(" class space " 2839 "used " SIZE_FORMAT "K, " 2840 "capacity " SIZE_FORMAT "K, " 2841 "committed " SIZE_FORMAT "K, " 2842 "reserved " SIZE_FORMAT "K", 2843 used_bytes(ct)/K, 2844 capacity_bytes(ct)/K, 2845 committed_bytes(ct)/K, 2846 reserved_bytes(ct)/K); 2847 } 2848 } 2849 2850 // Print information for class space and data space separately. 2851 // This is almost the same as above. 2852 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2853 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2854 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2855 size_t used_bytes = used_bytes_slow(mdtype); 2856 size_t free_bytes = free_bytes_slow(mdtype); 2857 size_t used_and_free = used_bytes + free_bytes + 2858 free_chunks_capacity_bytes; 2859 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2860 "K + unused in chunks " SIZE_FORMAT "K + " 2861 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2862 "K capacity in allocated chunks " SIZE_FORMAT "K", 2863 used_bytes / K, 2864 free_bytes / K, 2865 free_chunks_capacity_bytes / K, 2866 used_and_free / K, 2867 capacity_bytes / K); 2868 // Accounting can only be correct if we got the values during a safepoint 2869 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2870 } 2871 2872 // Print total fragmentation for class metaspaces 2873 void MetaspaceAux::print_class_waste(outputStream* out) { 2874 assert(Metaspace::using_class_space(), "class metaspace not used"); 2875 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2876 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2877 ClassLoaderDataGraphMetaspaceIterator iter; 2878 while (iter.repeat()) { 2879 Metaspace* msp = iter.get_next(); 2880 if (msp != NULL) { 2881 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2882 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2883 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2884 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2885 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2886 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2887 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2888 } 2889 } 2890 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2891 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2892 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2893 "large count " SIZE_FORMAT, 2894 cls_specialized_count, cls_specialized_waste, 2895 cls_small_count, cls_small_waste, 2896 cls_medium_count, cls_medium_waste, cls_humongous_count); 2897 } 2898 2899 // Print total fragmentation for data and class metaspaces separately 2900 void MetaspaceAux::print_waste(outputStream* out) { 2901 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2902 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2903 2904 ClassLoaderDataGraphMetaspaceIterator iter; 2905 while (iter.repeat()) { 2906 Metaspace* msp = iter.get_next(); 2907 if (msp != NULL) { 2908 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2909 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2910 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2911 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2912 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2913 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2914 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2915 } 2916 } 2917 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2918 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2919 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2920 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2921 "large count " SIZE_FORMAT, 2922 specialized_count, specialized_waste, small_count, 2923 small_waste, medium_count, medium_waste, humongous_count); 2924 if (Metaspace::using_class_space()) { 2925 print_class_waste(out); 2926 } 2927 } 2928 2929 // Dump global metaspace things from the end of ClassLoaderDataGraph 2930 void MetaspaceAux::dump(outputStream* out) { 2931 out->print_cr("All Metaspace:"); 2932 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2933 out->print("class space: "); print_on(out, Metaspace::ClassType); 2934 print_waste(out); 2935 } 2936 2937 void MetaspaceAux::verify_free_chunks() { 2938 Metaspace::chunk_manager_metadata()->verify(); 2939 if (Metaspace::using_class_space()) { 2940 Metaspace::chunk_manager_class()->verify(); 2941 } 2942 } 2943 2944 void MetaspaceAux::verify_capacity() { 2945 #ifdef ASSERT 2946 size_t running_sum_capacity_bytes = capacity_bytes(); 2947 // For purposes of the running sum of capacity, verify against capacity 2948 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2949 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2950 "capacity_words() * BytesPerWord " SIZE_FORMAT 2951 " capacity_bytes_slow()" SIZE_FORMAT, 2952 running_sum_capacity_bytes, capacity_in_use_bytes); 2953 for (Metaspace::MetadataType i = Metaspace::ClassType; 2954 i < Metaspace:: MetadataTypeCount; 2955 i = (Metaspace::MetadataType)(i + 1)) { 2956 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2957 assert(capacity_bytes(i) == capacity_in_use_bytes, 2958 "capacity_bytes(%u) " SIZE_FORMAT 2959 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2960 i, capacity_bytes(i), i, capacity_in_use_bytes); 2961 } 2962 #endif 2963 } 2964 2965 void MetaspaceAux::verify_used() { 2966 #ifdef ASSERT 2967 size_t running_sum_used_bytes = used_bytes(); 2968 // For purposes of the running sum of used, verify against used 2969 size_t used_in_use_bytes = used_bytes_slow(); 2970 assert(used_bytes() == used_in_use_bytes, 2971 "used_bytes() " SIZE_FORMAT 2972 " used_bytes_slow()" SIZE_FORMAT, 2973 used_bytes(), used_in_use_bytes); 2974 for (Metaspace::MetadataType i = Metaspace::ClassType; 2975 i < Metaspace:: MetadataTypeCount; 2976 i = (Metaspace::MetadataType)(i + 1)) { 2977 size_t used_in_use_bytes = used_bytes_slow(i); 2978 assert(used_bytes(i) == used_in_use_bytes, 2979 "used_bytes(%u) " SIZE_FORMAT 2980 " used_bytes_slow(%u)" SIZE_FORMAT, 2981 i, used_bytes(i), i, used_in_use_bytes); 2982 } 2983 #endif 2984 } 2985 2986 void MetaspaceAux::verify_metrics() { 2987 verify_capacity(); 2988 verify_used(); 2989 } 2990 2991 2992 // Metaspace methods 2993 2994 size_t Metaspace::_first_chunk_word_size = 0; 2995 size_t Metaspace::_first_class_chunk_word_size = 0; 2996 2997 size_t Metaspace::_commit_alignment = 0; 2998 size_t Metaspace::_reserve_alignment = 0; 2999 3000 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 3001 initialize(lock, type); 3002 } 3003 3004 Metaspace::~Metaspace() { 3005 delete _vsm; 3006 if (using_class_space()) { 3007 delete _class_vsm; 3008 } 3009 } 3010 3011 VirtualSpaceList* Metaspace::_space_list = NULL; 3012 VirtualSpaceList* Metaspace::_class_space_list = NULL; 3013 3014 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 3015 ChunkManager* Metaspace::_chunk_manager_class = NULL; 3016 3017 #define VIRTUALSPACEMULTIPLIER 2 3018 3019 #ifdef _LP64 3020 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 3021 3022 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 3023 // Figure out the narrow_klass_base and the narrow_klass_shift. The 3024 // narrow_klass_base is the lower of the metaspace base and the cds base 3025 // (if cds is enabled). The narrow_klass_shift depends on the distance 3026 // between the lower base and higher address. 3027 address lower_base; 3028 address higher_address; 3029 #if INCLUDE_CDS 3030 if (UseSharedSpaces) { 3031 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3032 (address)(metaspace_base + compressed_class_space_size())); 3033 lower_base = MIN2(metaspace_base, cds_base); 3034 } else 3035 #endif 3036 { 3037 higher_address = metaspace_base + compressed_class_space_size(); 3038 lower_base = metaspace_base; 3039 3040 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3041 // If compressed class space fits in lower 32G, we don't need a base. 3042 if (higher_address <= (address)klass_encoding_max) { 3043 lower_base = 0; // Effectively lower base is zero. 3044 } 3045 } 3046 3047 Universe::set_narrow_klass_base(lower_base); 3048 3049 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3050 Universe::set_narrow_klass_shift(0); 3051 } else { 3052 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3053 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3054 } 3055 } 3056 3057 #if INCLUDE_CDS 3058 // Return TRUE if the specified metaspace_base and cds_base are close enough 3059 // to work with compressed klass pointers. 3060 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3061 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3062 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3063 address lower_base = MIN2((address)metaspace_base, cds_base); 3064 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3065 (address)(metaspace_base + compressed_class_space_size())); 3066 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3067 } 3068 #endif 3069 3070 // Try to allocate the metaspace at the requested addr. 3071 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3072 assert(using_class_space(), "called improperly"); 3073 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3074 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3075 "Metaspace size is too big"); 3076 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3077 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3078 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3079 3080 // Don't use large pages for the class space. 3081 bool large_pages = false; 3082 3083 #if !(defined(AARCH64) || defined(AIX)) 3084 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3085 _reserve_alignment, 3086 large_pages, 3087 requested_addr); 3088 #else // AARCH64 3089 ReservedSpace metaspace_rs; 3090 3091 // Our compressed klass pointers may fit nicely into the lower 32 3092 // bits. 3093 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3094 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3095 _reserve_alignment, 3096 large_pages, 3097 requested_addr); 3098 } 3099 3100 if (! metaspace_rs.is_reserved()) { 3101 // Aarch64: Try to align metaspace so that we can decode a compressed 3102 // klass with a single MOVK instruction. We can do this iff the 3103 // compressed class base is a multiple of 4G. 3104 // Aix: Search for a place where we can find memory. If we need to load 3105 // the base, 4G alignment is helpful, too. 3106 size_t increment = AARCH64_ONLY(4*)G; 3107 for (char *a = (char*)align_ptr_up(requested_addr, increment); 3108 a < (char*)(1024*G); 3109 a += increment) { 3110 if (a == (char *)(32*G)) { 3111 // Go faster from here on. Zero-based is no longer possible. 3112 increment = 4*G; 3113 } 3114 3115 #if INCLUDE_CDS 3116 if (UseSharedSpaces 3117 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3118 // We failed to find an aligned base that will reach. Fall 3119 // back to using our requested addr. 3120 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3121 _reserve_alignment, 3122 large_pages, 3123 requested_addr); 3124 break; 3125 } 3126 #endif 3127 3128 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3129 _reserve_alignment, 3130 large_pages, 3131 a); 3132 if (metaspace_rs.is_reserved()) 3133 break; 3134 } 3135 } 3136 3137 #endif // AARCH64 3138 3139 if (!metaspace_rs.is_reserved()) { 3140 #if INCLUDE_CDS 3141 if (UseSharedSpaces) { 3142 size_t increment = align_size_up(1*G, _reserve_alignment); 3143 3144 // Keep trying to allocate the metaspace, increasing the requested_addr 3145 // by 1GB each time, until we reach an address that will no longer allow 3146 // use of CDS with compressed klass pointers. 3147 char *addr = requested_addr; 3148 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3149 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3150 addr = addr + increment; 3151 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3152 _reserve_alignment, large_pages, addr); 3153 } 3154 } 3155 #endif 3156 // If no successful allocation then try to allocate the space anywhere. If 3157 // that fails then OOM doom. At this point we cannot try allocating the 3158 // metaspace as if UseCompressedClassPointers is off because too much 3159 // initialization has happened that depends on UseCompressedClassPointers. 3160 // So, UseCompressedClassPointers cannot be turned off at this point. 3161 if (!metaspace_rs.is_reserved()) { 3162 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3163 _reserve_alignment, large_pages); 3164 if (!metaspace_rs.is_reserved()) { 3165 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3166 compressed_class_space_size())); 3167 } 3168 } 3169 } 3170 3171 // If we got here then the metaspace got allocated. 3172 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3173 3174 #if INCLUDE_CDS 3175 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3176 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3177 FileMapInfo::stop_sharing_and_unmap( 3178 "Could not allocate metaspace at a compatible address"); 3179 } 3180 #endif 3181 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3182 UseSharedSpaces ? (address)cds_base : 0); 3183 3184 initialize_class_space(metaspace_rs); 3185 3186 if (log_is_enabled(Trace, gc, metaspace)) { 3187 Log(gc, metaspace) log; 3188 ResourceMark rm; 3189 print_compressed_class_space(log.trace_stream(), requested_addr); 3190 } 3191 } 3192 3193 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3194 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3195 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3196 if (_class_space_list != NULL) { 3197 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3198 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3199 compressed_class_space_size(), p2i(base)); 3200 if (requested_addr != 0) { 3201 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3202 } 3203 st->cr(); 3204 } 3205 } 3206 3207 // For UseCompressedClassPointers the class space is reserved above the top of 3208 // the Java heap. The argument passed in is at the base of the compressed space. 3209 void Metaspace::initialize_class_space(ReservedSpace rs) { 3210 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3211 assert(rs.size() >= CompressedClassSpaceSize, 3212 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3213 assert(using_class_space(), "Must be using class space"); 3214 _class_space_list = new VirtualSpaceList(rs); 3215 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3216 3217 if (!_class_space_list->initialization_succeeded()) { 3218 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3219 } 3220 } 3221 3222 #endif 3223 3224 void Metaspace::ergo_initialize() { 3225 if (DumpSharedSpaces) { 3226 // Using large pages when dumping the shared archive is currently not implemented. 3227 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3228 } 3229 3230 size_t page_size = os::vm_page_size(); 3231 if (UseLargePages && UseLargePagesInMetaspace) { 3232 page_size = os::large_page_size(); 3233 } 3234 3235 _commit_alignment = page_size; 3236 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3237 3238 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3239 // override if MaxMetaspaceSize was set on the command line or not. 3240 // This information is needed later to conform to the specification of the 3241 // java.lang.management.MemoryUsage API. 3242 // 3243 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3244 // globals.hpp to the aligned value, but this is not possible, since the 3245 // alignment depends on other flags being parsed. 3246 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3247 3248 if (MetaspaceSize > MaxMetaspaceSize) { 3249 MetaspaceSize = MaxMetaspaceSize; 3250 } 3251 3252 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3253 3254 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3255 3256 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3257 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3258 3259 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3260 set_compressed_class_space_size(CompressedClassSpaceSize); 3261 } 3262 3263 void Metaspace::global_initialize() { 3264 MetaspaceGC::initialize(); 3265 3266 // Initialize the alignment for shared spaces. 3267 int max_alignment = os::vm_allocation_granularity(); 3268 size_t cds_total = 0; 3269 3270 MetaspaceShared::set_max_alignment(max_alignment); 3271 3272 if (DumpSharedSpaces) { 3273 #if INCLUDE_CDS 3274 MetaspaceShared::estimate_regions_size(); 3275 3276 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3277 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3278 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3279 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3280 3281 // Initialize with the sum of the shared space sizes. The read-only 3282 // and read write metaspace chunks will be allocated out of this and the 3283 // remainder is the misc code and data chunks. 3284 cds_total = FileMapInfo::shared_spaces_size(); 3285 cds_total = align_size_up(cds_total, _reserve_alignment); 3286 _space_list = new VirtualSpaceList(cds_total/wordSize); 3287 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3288 3289 if (!_space_list->initialization_succeeded()) { 3290 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3291 } 3292 3293 #ifdef _LP64 3294 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3295 vm_exit_during_initialization("Unable to dump shared archive.", 3296 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3297 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3298 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3299 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3300 } 3301 3302 // Set the compressed klass pointer base so that decoding of these pointers works 3303 // properly when creating the shared archive. 3304 assert(UseCompressedOops && UseCompressedClassPointers, 3305 "UseCompressedOops and UseCompressedClassPointers must be set"); 3306 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3307 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3308 p2i(_space_list->current_virtual_space()->bottom())); 3309 3310 Universe::set_narrow_klass_shift(0); 3311 #endif // _LP64 3312 #endif // INCLUDE_CDS 3313 } else { 3314 #if INCLUDE_CDS 3315 if (UseSharedSpaces) { 3316 // If using shared space, open the file that contains the shared space 3317 // and map in the memory before initializing the rest of metaspace (so 3318 // the addresses don't conflict) 3319 address cds_address = NULL; 3320 FileMapInfo* mapinfo = new FileMapInfo(); 3321 3322 // Open the shared archive file, read and validate the header. If 3323 // initialization fails, shared spaces [UseSharedSpaces] are 3324 // disabled and the file is closed. 3325 // Map in spaces now also 3326 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3327 cds_total = FileMapInfo::shared_spaces_size(); 3328 cds_address = (address)mapinfo->header()->region_addr(0); 3329 #ifdef _LP64 3330 if (using_class_space()) { 3331 char* cds_end = (char*)(cds_address + cds_total); 3332 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3333 // If UseCompressedClassPointers is set then allocate the metaspace area 3334 // above the heap and above the CDS area (if it exists). 3335 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3336 // Map the shared string space after compressed pointers 3337 // because it relies on compressed class pointers setting to work 3338 mapinfo->map_string_regions(); 3339 } 3340 #endif // _LP64 3341 } else { 3342 assert(!mapinfo->is_open() && !UseSharedSpaces, 3343 "archive file not closed or shared spaces not disabled."); 3344 } 3345 } 3346 #endif // INCLUDE_CDS 3347 3348 #ifdef _LP64 3349 if (!UseSharedSpaces && using_class_space()) { 3350 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3351 allocate_metaspace_compressed_klass_ptrs(base, 0); 3352 } 3353 #endif // _LP64 3354 3355 // Initialize these before initializing the VirtualSpaceList 3356 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3357 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3358 // Make the first class chunk bigger than a medium chunk so it's not put 3359 // on the medium chunk list. The next chunk will be small and progress 3360 // from there. This size calculated by -version. 3361 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3362 (CompressedClassSpaceSize/BytesPerWord)*2); 3363 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3364 // Arbitrarily set the initial virtual space to a multiple 3365 // of the boot class loader size. 3366 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3367 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3368 3369 // Initialize the list of virtual spaces. 3370 _space_list = new VirtualSpaceList(word_size); 3371 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3372 3373 if (!_space_list->initialization_succeeded()) { 3374 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3375 } 3376 } 3377 3378 _tracer = new MetaspaceTracer(); 3379 } 3380 3381 void Metaspace::post_initialize() { 3382 MetaspaceGC::post_initialize(); 3383 } 3384 3385 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3386 size_t chunk_word_size, 3387 size_t chunk_bunch) { 3388 // Get a chunk from the chunk freelist 3389 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3390 if (chunk != NULL) { 3391 return chunk; 3392 } 3393 3394 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3395 } 3396 3397 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3398 3399 assert(space_list() != NULL, 3400 "Metadata VirtualSpaceList has not been initialized"); 3401 assert(chunk_manager_metadata() != NULL, 3402 "Metadata ChunkManager has not been initialized"); 3403 3404 _vsm = new SpaceManager(NonClassType, lock); 3405 if (_vsm == NULL) { 3406 return; 3407 } 3408 size_t word_size; 3409 size_t class_word_size; 3410 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3411 3412 if (using_class_space()) { 3413 assert(class_space_list() != NULL, 3414 "Class VirtualSpaceList has not been initialized"); 3415 assert(chunk_manager_class() != NULL, 3416 "Class ChunkManager has not been initialized"); 3417 3418 // Allocate SpaceManager for classes. 3419 _class_vsm = new SpaceManager(ClassType, lock); 3420 if (_class_vsm == NULL) { 3421 return; 3422 } 3423 } 3424 3425 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3426 3427 // Allocate chunk for metadata objects 3428 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3429 word_size, 3430 vsm()->medium_chunk_bunch()); 3431 // For dumping shared archive, report error if allocation has failed. 3432 if (DumpSharedSpaces && new_chunk == NULL) { 3433 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3434 } 3435 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3436 if (new_chunk != NULL) { 3437 // Add to this manager's list of chunks in use and current_chunk(). 3438 vsm()->add_chunk(new_chunk, true); 3439 } 3440 3441 // Allocate chunk for class metadata objects 3442 if (using_class_space()) { 3443 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3444 class_word_size, 3445 class_vsm()->medium_chunk_bunch()); 3446 if (class_chunk != NULL) { 3447 class_vsm()->add_chunk(class_chunk, true); 3448 } else { 3449 // For dumping shared archive, report error if allocation has failed. 3450 if (DumpSharedSpaces) { 3451 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3452 } 3453 } 3454 } 3455 3456 _alloc_record_head = NULL; 3457 _alloc_record_tail = NULL; 3458 } 3459 3460 size_t Metaspace::align_word_size_up(size_t word_size) { 3461 size_t byte_size = word_size * wordSize; 3462 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3463 } 3464 3465 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3466 // DumpSharedSpaces doesn't use class metadata area (yet) 3467 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3468 if (is_class_space_allocation(mdtype)) { 3469 return class_vsm()->allocate(word_size); 3470 } else { 3471 return vsm()->allocate(word_size); 3472 } 3473 } 3474 3475 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3476 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3477 assert(delta_bytes > 0, "Must be"); 3478 3479 size_t before = 0; 3480 size_t after = 0; 3481 MetaWord* res; 3482 bool incremented; 3483 3484 // Each thread increments the HWM at most once. Even if the thread fails to increment 3485 // the HWM, an allocation is still attempted. This is because another thread must then 3486 // have incremented the HWM and therefore the allocation might still succeed. 3487 do { 3488 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3489 res = allocate(word_size, mdtype); 3490 } while (!incremented && res == NULL); 3491 3492 if (incremented) { 3493 tracer()->report_gc_threshold(before, after, 3494 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3495 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3496 } 3497 3498 return res; 3499 } 3500 3501 // Space allocated in the Metaspace. This may 3502 // be across several metadata virtual spaces. 3503 char* Metaspace::bottom() const { 3504 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3505 return (char*)vsm()->current_chunk()->bottom(); 3506 } 3507 3508 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3509 if (mdtype == ClassType) { 3510 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3511 } else { 3512 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3513 } 3514 } 3515 3516 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3517 if (mdtype == ClassType) { 3518 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3519 } else { 3520 return vsm()->sum_free_in_chunks_in_use(); 3521 } 3522 } 3523 3524 // Space capacity in the Metaspace. It includes 3525 // space in the list of chunks from which allocations 3526 // have been made. Don't include space in the global freelist and 3527 // in the space available in the dictionary which 3528 // is already counted in some chunk. 3529 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3530 if (mdtype == ClassType) { 3531 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3532 } else { 3533 return vsm()->sum_capacity_in_chunks_in_use(); 3534 } 3535 } 3536 3537 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3538 return used_words_slow(mdtype) * BytesPerWord; 3539 } 3540 3541 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3542 return capacity_words_slow(mdtype) * BytesPerWord; 3543 } 3544 3545 size_t Metaspace::allocated_blocks_bytes() const { 3546 return vsm()->allocated_blocks_bytes() + 3547 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3548 } 3549 3550 size_t Metaspace::allocated_chunks_bytes() const { 3551 return vsm()->allocated_chunks_bytes() + 3552 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3553 } 3554 3555 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3556 assert(!SafepointSynchronize::is_at_safepoint() 3557 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3558 3559 if (DumpSharedSpaces && PrintSharedSpaces) { 3560 record_deallocation(ptr, vsm()->get_allocation_word_size(word_size)); 3561 } 3562 3563 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3564 3565 if (is_class && using_class_space()) { 3566 class_vsm()->deallocate(ptr, word_size); 3567 } else { 3568 vsm()->deallocate(ptr, word_size); 3569 } 3570 } 3571 3572 3573 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3574 bool read_only, MetaspaceObj::Type type, TRAPS) { 3575 if (HAS_PENDING_EXCEPTION) { 3576 assert(false, "Should not allocate with exception pending"); 3577 return NULL; // caller does a CHECK_NULL too 3578 } 3579 3580 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3581 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3582 3583 // Allocate in metaspaces without taking out a lock, because it deadlocks 3584 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3585 // to revisit this for application class data sharing. 3586 if (DumpSharedSpaces) { 3587 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3588 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3589 MetaWord* result = space->allocate(word_size, NonClassType); 3590 if (result == NULL) { 3591 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3592 } 3593 if (PrintSharedSpaces) { 3594 space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size)); 3595 } 3596 3597 // Zero initialize. 3598 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3599 3600 return result; 3601 } 3602 3603 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3604 3605 // Try to allocate metadata. 3606 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3607 3608 if (result == NULL) { 3609 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3610 3611 // Allocation failed. 3612 if (is_init_completed()) { 3613 // Only start a GC if the bootstrapping has completed. 3614 3615 // Try to clean out some memory and retry. 3616 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3617 loader_data, word_size, mdtype); 3618 } 3619 } 3620 3621 if (result == NULL) { 3622 SpaceManager* sm; 3623 if (is_class_space_allocation(mdtype)) { 3624 sm = loader_data->metaspace_non_null()->class_vsm(); 3625 } else { 3626 sm = loader_data->metaspace_non_null()->vsm(); 3627 } 3628 3629 result = sm->get_small_chunk_and_allocate(word_size); 3630 3631 if (result == NULL) { 3632 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3633 } 3634 } 3635 3636 // Zero initialize. 3637 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3638 3639 return result; 3640 } 3641 3642 size_t Metaspace::class_chunk_size(size_t word_size) { 3643 assert(using_class_space(), "Has to use class space"); 3644 return class_vsm()->calc_chunk_size(word_size); 3645 } 3646 3647 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3648 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3649 3650 // If result is still null, we are out of memory. 3651 Log(gc, metaspace, freelist) log; 3652 if (log.is_info()) { 3653 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3654 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3655 ResourceMark rm; 3656 outputStream* out = log.info_stream(); 3657 if (loader_data->metaspace_or_null() != NULL) { 3658 loader_data->dump(out); 3659 } 3660 MetaspaceAux::dump(out); 3661 } 3662 3663 bool out_of_compressed_class_space = false; 3664 if (is_class_space_allocation(mdtype)) { 3665 Metaspace* metaspace = loader_data->metaspace_non_null(); 3666 out_of_compressed_class_space = 3667 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3668 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3669 CompressedClassSpaceSize; 3670 } 3671 3672 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3673 const char* space_string = out_of_compressed_class_space ? 3674 "Compressed class space" : "Metaspace"; 3675 3676 report_java_out_of_memory(space_string); 3677 3678 if (JvmtiExport::should_post_resource_exhausted()) { 3679 JvmtiExport::post_resource_exhausted( 3680 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3681 space_string); 3682 } 3683 3684 if (!is_init_completed()) { 3685 vm_exit_during_initialization("OutOfMemoryError", space_string); 3686 } 3687 3688 if (out_of_compressed_class_space) { 3689 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3690 } else { 3691 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3692 } 3693 } 3694 3695 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3696 switch (mdtype) { 3697 case Metaspace::ClassType: return "Class"; 3698 case Metaspace::NonClassType: return "Metadata"; 3699 default: 3700 assert(false, "Got bad mdtype: %d", (int) mdtype); 3701 return NULL; 3702 } 3703 } 3704 3705 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3706 assert(DumpSharedSpaces, "sanity"); 3707 3708 int byte_size = (int)word_size * wordSize; 3709 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3710 3711 if (_alloc_record_head == NULL) { 3712 _alloc_record_head = _alloc_record_tail = rec; 3713 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3714 _alloc_record_tail->_next = rec; 3715 _alloc_record_tail = rec; 3716 } else { 3717 // slow linear search, but this doesn't happen that often, and only when dumping 3718 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3719 if (old->_ptr == ptr) { 3720 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3721 int remain_bytes = old->_byte_size - byte_size; 3722 assert(remain_bytes >= 0, "sanity"); 3723 old->_type = type; 3724 3725 if (remain_bytes == 0) { 3726 delete(rec); 3727 } else { 3728 address remain_ptr = address(ptr) + byte_size; 3729 rec->_ptr = remain_ptr; 3730 rec->_byte_size = remain_bytes; 3731 rec->_type = MetaspaceObj::DeallocatedType; 3732 rec->_next = old->_next; 3733 old->_byte_size = byte_size; 3734 old->_next = rec; 3735 } 3736 return; 3737 } 3738 } 3739 assert(0, "reallocating a freed pointer that was not recorded"); 3740 } 3741 } 3742 3743 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3744 assert(DumpSharedSpaces, "sanity"); 3745 3746 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3747 if (rec->_ptr == ptr) { 3748 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3749 rec->_type = MetaspaceObj::DeallocatedType; 3750 return; 3751 } 3752 } 3753 3754 assert(0, "deallocating a pointer that was not recorded"); 3755 } 3756 3757 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3758 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3759 3760 address last_addr = (address)bottom(); 3761 3762 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3763 address ptr = rec->_ptr; 3764 if (last_addr < ptr) { 3765 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3766 } 3767 closure->doit(ptr, rec->_type, rec->_byte_size); 3768 last_addr = ptr + rec->_byte_size; 3769 } 3770 3771 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3772 if (last_addr < top) { 3773 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3774 } 3775 } 3776 3777 void Metaspace::purge(MetadataType mdtype) { 3778 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3779 } 3780 3781 void Metaspace::purge() { 3782 MutexLockerEx cl(SpaceManager::expand_lock(), 3783 Mutex::_no_safepoint_check_flag); 3784 purge(NonClassType); 3785 if (using_class_space()) { 3786 purge(ClassType); 3787 } 3788 } 3789 3790 void Metaspace::print_on(outputStream* out) const { 3791 // Print both class virtual space counts and metaspace. 3792 if (Verbose) { 3793 vsm()->print_on(out); 3794 if (using_class_space()) { 3795 class_vsm()->print_on(out); 3796 } 3797 } 3798 } 3799 3800 bool Metaspace::contains(const void* ptr) { 3801 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3802 return true; 3803 } 3804 3805 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3806 return true; 3807 } 3808 3809 return get_space_list(NonClassType)->contains(ptr); 3810 } 3811 3812 void Metaspace::verify() { 3813 vsm()->verify(); 3814 if (using_class_space()) { 3815 class_vsm()->verify(); 3816 } 3817 } 3818 3819 void Metaspace::dump(outputStream* const out) const { 3820 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3821 vsm()->dump(out); 3822 if (using_class_space()) { 3823 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3824 class_vsm()->dump(out); 3825 } 3826 } 3827 3828 /////////////// Unit tests /////////////// 3829 3830 #ifndef PRODUCT 3831 3832 class TestMetaspaceAuxTest : AllStatic { 3833 public: 3834 static void test_reserved() { 3835 size_t reserved = MetaspaceAux::reserved_bytes(); 3836 3837 assert(reserved > 0, "assert"); 3838 3839 size_t committed = MetaspaceAux::committed_bytes(); 3840 assert(committed <= reserved, "assert"); 3841 3842 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3843 assert(reserved_metadata > 0, "assert"); 3844 assert(reserved_metadata <= reserved, "assert"); 3845 3846 if (UseCompressedClassPointers) { 3847 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3848 assert(reserved_class > 0, "assert"); 3849 assert(reserved_class < reserved, "assert"); 3850 } 3851 } 3852 3853 static void test_committed() { 3854 size_t committed = MetaspaceAux::committed_bytes(); 3855 3856 assert(committed > 0, "assert"); 3857 3858 size_t reserved = MetaspaceAux::reserved_bytes(); 3859 assert(committed <= reserved, "assert"); 3860 3861 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3862 assert(committed_metadata > 0, "assert"); 3863 assert(committed_metadata <= committed, "assert"); 3864 3865 if (UseCompressedClassPointers) { 3866 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3867 assert(committed_class > 0, "assert"); 3868 assert(committed_class < committed, "assert"); 3869 } 3870 } 3871 3872 static void test_virtual_space_list_large_chunk() { 3873 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3874 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3875 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3876 // vm_allocation_granularity aligned on Windows. 3877 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3878 large_size += (os::vm_page_size()/BytesPerWord); 3879 vs_list->get_new_chunk(large_size, large_size, 0); 3880 } 3881 3882 static void test() { 3883 test_reserved(); 3884 test_committed(); 3885 test_virtual_space_list_large_chunk(); 3886 } 3887 }; 3888 3889 void TestMetaspaceAux_test() { 3890 TestMetaspaceAuxTest::test(); 3891 } 3892 3893 class TestVirtualSpaceNodeTest { 3894 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3895 size_t& num_small_chunks, 3896 size_t& num_specialized_chunks) { 3897 num_medium_chunks = words_left / MediumChunk; 3898 words_left = words_left % MediumChunk; 3899 3900 num_small_chunks = words_left / SmallChunk; 3901 words_left = words_left % SmallChunk; 3902 // how many specialized chunks can we get? 3903 num_specialized_chunks = words_left / SpecializedChunk; 3904 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3905 } 3906 3907 public: 3908 static void test() { 3909 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3910 const size_t vsn_test_size_words = MediumChunk * 4; 3911 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3912 3913 // The chunk sizes must be multiples of eachother, or this will fail 3914 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3915 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3916 3917 { // No committed memory in VSN 3918 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3919 VirtualSpaceNode vsn(vsn_test_size_bytes); 3920 vsn.initialize(); 3921 vsn.retire(&cm); 3922 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3923 } 3924 3925 { // All of VSN is committed, half is used by chunks 3926 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3927 VirtualSpaceNode vsn(vsn_test_size_bytes); 3928 vsn.initialize(); 3929 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3930 vsn.get_chunk_vs(MediumChunk); 3931 vsn.get_chunk_vs(MediumChunk); 3932 vsn.retire(&cm); 3933 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3934 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3935 } 3936 3937 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3938 // This doesn't work for systems with vm_page_size >= 16K. 3939 if (page_chunks < MediumChunk) { 3940 // 4 pages of VSN is committed, some is used by chunks 3941 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3942 VirtualSpaceNode vsn(vsn_test_size_bytes); 3943 3944 vsn.initialize(); 3945 vsn.expand_by(page_chunks, page_chunks); 3946 vsn.get_chunk_vs(SmallChunk); 3947 vsn.get_chunk_vs(SpecializedChunk); 3948 vsn.retire(&cm); 3949 3950 // committed - used = words left to retire 3951 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3952 3953 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3954 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3955 3956 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3957 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3958 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3959 } 3960 3961 { // Half of VSN is committed, a humongous chunk is used 3962 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3963 VirtualSpaceNode vsn(vsn_test_size_bytes); 3964 vsn.initialize(); 3965 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3966 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3967 vsn.retire(&cm); 3968 3969 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3970 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3971 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3972 3973 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3974 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3975 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3976 } 3977 3978 } 3979 3980 #define assert_is_available_positive(word_size) \ 3981 assert(vsn.is_available(word_size), \ 3982 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3983 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3984 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3985 3986 #define assert_is_available_negative(word_size) \ 3987 assert(!vsn.is_available(word_size), \ 3988 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 3989 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3990 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3991 3992 static void test_is_available_positive() { 3993 // Reserve some memory. 3994 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3995 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3996 3997 // Commit some memory. 3998 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3999 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4000 assert(expanded, "Failed to commit"); 4001 4002 // Check that is_available accepts the committed size. 4003 assert_is_available_positive(commit_word_size); 4004 4005 // Check that is_available accepts half the committed size. 4006 size_t expand_word_size = commit_word_size / 2; 4007 assert_is_available_positive(expand_word_size); 4008 } 4009 4010 static void test_is_available_negative() { 4011 // Reserve some memory. 4012 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4013 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4014 4015 // Commit some memory. 4016 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4017 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4018 assert(expanded, "Failed to commit"); 4019 4020 // Check that is_available doesn't accept a too large size. 4021 size_t two_times_commit_word_size = commit_word_size * 2; 4022 assert_is_available_negative(two_times_commit_word_size); 4023 } 4024 4025 static void test_is_available_overflow() { 4026 // Reserve some memory. 4027 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 4028 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 4029 4030 // Commit some memory. 4031 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 4032 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 4033 assert(expanded, "Failed to commit"); 4034 4035 // Calculate a size that will overflow the virtual space size. 4036 void* virtual_space_max = (void*)(uintptr_t)-1; 4037 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4038 size_t overflow_size = bottom_to_max + BytesPerWord; 4039 size_t overflow_word_size = overflow_size / BytesPerWord; 4040 4041 // Check that is_available can handle the overflow. 4042 assert_is_available_negative(overflow_word_size); 4043 } 4044 4045 static void test_is_available() { 4046 TestVirtualSpaceNodeTest::test_is_available_positive(); 4047 TestVirtualSpaceNodeTest::test_is_available_negative(); 4048 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4049 } 4050 }; 4051 4052 void TestVirtualSpaceNode_test() { 4053 TestVirtualSpaceNodeTest::test(); 4054 TestVirtualSpaceNodeTest::test_is_available(); 4055 } 4056 #endif