1 /* 2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc_interface/collectedHeap.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/binaryTreeDictionary.hpp" 28 #include "memory/freeList.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/freeList.hpp" 32 #include "memory/gcLocker.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 52 53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 55 56 // Set this constant to enable slow integrity checking of the free chunk lists 57 const bool metaspace_slow_verify = false; 58 59 size_t const allocation_from_dictionary_limit = 4 * K; 60 61 MetaWord* last_allocated = 0; 62 63 size_t Metaspace::_compressed_class_space_size; 64 const MetaspaceTracer* Metaspace::_tracer = NULL; 65 66 // Used in declarations in SpaceManager and ChunkManager 67 enum ChunkIndex { 68 ZeroIndex = 0, 69 SpecializedIndex = ZeroIndex, 70 SmallIndex = SpecializedIndex + 1, 71 MediumIndex = SmallIndex + 1, 72 HumongousIndex = MediumIndex + 1, 73 NumberOfFreeLists = 3, 74 NumberOfInUseLists = 4 75 }; 76 77 enum ChunkSizes { // in words. 78 ClassSpecializedChunk = 128, 79 SpecializedChunk = 128, 80 ClassSmallChunk = 256, 81 SmallChunk = 512, 82 ClassMediumChunk = 4 * K, 83 MediumChunk = 8 * K 84 }; 85 86 static ChunkIndex next_chunk_index(ChunkIndex i) { 87 assert(i < NumberOfInUseLists, "Out of bound"); 88 return (ChunkIndex) (i+1); 89 } 90 91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 92 uint MetaspaceGC::_shrink_factor = 0; 93 bool MetaspaceGC::_should_concurrent_collect = false; 94 95 typedef class FreeList<Metachunk> ChunkList; 96 97 // Manages the global free lists of chunks. 98 class ChunkManager : public CHeapObj<mtInternal> { 99 friend class TestVirtualSpaceNodeTest; 100 101 // Free list of chunks of different sizes. 102 // SpecializedChunk 103 // SmallChunk 104 // MediumChunk 105 // HumongousChunk 106 ChunkList _free_chunks[NumberOfFreeLists]; 107 108 // HumongousChunk 109 ChunkTreeDictionary _humongous_dictionary; 110 111 // ChunkManager in all lists of this type 112 size_t _free_chunks_total; 113 size_t _free_chunks_count; 114 115 void dec_free_chunks_total(size_t v) { 116 assert(_free_chunks_count > 0 && 117 _free_chunks_total > 0, 118 "About to go negative"); 119 Atomic::add_ptr(-1, &_free_chunks_count); 120 jlong minus_v = (jlong) - (jlong) v; 121 Atomic::add_ptr(minus_v, &_free_chunks_total); 122 } 123 124 // Debug support 125 126 size_t sum_free_chunks(); 127 size_t sum_free_chunks_count(); 128 129 void locked_verify_free_chunks_total(); 130 void slow_locked_verify_free_chunks_total() { 131 if (metaspace_slow_verify) { 132 locked_verify_free_chunks_total(); 133 } 134 } 135 void locked_verify_free_chunks_count(); 136 void slow_locked_verify_free_chunks_count() { 137 if (metaspace_slow_verify) { 138 locked_verify_free_chunks_count(); 139 } 140 } 141 void verify_free_chunks_count(); 142 143 public: 144 145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 146 : _free_chunks_total(0), _free_chunks_count(0) { 147 _free_chunks[SpecializedIndex].set_size(specialized_size); 148 _free_chunks[SmallIndex].set_size(small_size); 149 _free_chunks[MediumIndex].set_size(medium_size); 150 } 151 152 // add or delete (return) a chunk to the global freelist. 153 Metachunk* chunk_freelist_allocate(size_t word_size); 154 155 // Map a size to a list index assuming that there are lists 156 // for special, small, medium, and humongous chunks. 157 ChunkIndex list_index(size_t size); 158 159 // Remove the chunk from its freelist. It is 160 // expected to be on one of the _free_chunks[] lists. 161 void remove_chunk(Metachunk* chunk); 162 163 // Add the simple linked list of chunks to the freelist of chunks 164 // of type index. 165 void return_chunks(ChunkIndex index, Metachunk* chunks); 166 167 // Total of the space in the free chunks list 168 size_t free_chunks_total_words(); 169 size_t free_chunks_total_bytes(); 170 171 // Number of chunks in the free chunks list 172 size_t free_chunks_count(); 173 174 void inc_free_chunks_total(size_t v, size_t count = 1) { 175 Atomic::add_ptr(count, &_free_chunks_count); 176 Atomic::add_ptr(v, &_free_chunks_total); 177 } 178 ChunkTreeDictionary* humongous_dictionary() { 179 return &_humongous_dictionary; 180 } 181 182 ChunkList* free_chunks(ChunkIndex index); 183 184 // Returns the list for the given chunk word size. 185 ChunkList* find_free_chunks_list(size_t word_size); 186 187 // Remove from a list by size. Selects list based on size of chunk. 188 Metachunk* free_chunks_get(size_t chunk_word_size); 189 190 #define index_bounds_check(index) \ 191 assert(index == SpecializedIndex || \ 192 index == SmallIndex || \ 193 index == MediumIndex || \ 194 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) 195 196 size_t num_free_chunks(ChunkIndex index) const { 197 index_bounds_check(index); 198 199 if (index == HumongousIndex) { 200 return _humongous_dictionary.total_free_blocks(); 201 } 202 203 ssize_t count = _free_chunks[index].count(); 204 return count == -1 ? 0 : (size_t) count; 205 } 206 207 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 208 index_bounds_check(index); 209 210 size_t word_size = 0; 211 if (index == HumongousIndex) { 212 word_size = _humongous_dictionary.total_size(); 213 } else { 214 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 215 word_size = size_per_chunk_in_words * num_free_chunks(index); 216 } 217 218 return word_size * BytesPerWord; 219 } 220 221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 223 num_free_chunks(SmallIndex), 224 num_free_chunks(MediumIndex), 225 num_free_chunks(HumongousIndex), 226 size_free_chunks_in_bytes(SpecializedIndex), 227 size_free_chunks_in_bytes(SmallIndex), 228 size_free_chunks_in_bytes(MediumIndex), 229 size_free_chunks_in_bytes(HumongousIndex)); 230 } 231 232 // Debug support 233 void verify(); 234 void slow_verify() { 235 if (metaspace_slow_verify) { 236 verify(); 237 } 238 } 239 void locked_verify(); 240 void slow_locked_verify() { 241 if (metaspace_slow_verify) { 242 locked_verify(); 243 } 244 } 245 void verify_free_chunks_total(); 246 247 void locked_print_free_chunks(outputStream* st); 248 void locked_print_sum_free_chunks(outputStream* st); 249 250 void print_on(outputStream* st) const; 251 }; 252 253 // Used to manage the free list of Metablocks (a block corresponds 254 // to the allocation of a quantum of metadata). 255 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 256 BlockTreeDictionary* _dictionary; 257 258 // Only allocate and split from freelist if the size of the allocation 259 // is at least 1/4th the size of the available block. 260 const static int WasteMultiplier = 4; 261 262 // Accessors 263 BlockTreeDictionary* dictionary() const { return _dictionary; } 264 265 public: 266 BlockFreelist(); 267 ~BlockFreelist(); 268 269 // Get and return a block to the free list 270 MetaWord* get_block(size_t word_size); 271 void return_block(MetaWord* p, size_t word_size); 272 273 size_t total_size() { 274 if (dictionary() == NULL) { 275 return 0; 276 } else { 277 return dictionary()->total_size(); 278 } 279 } 280 281 void print_on(outputStream* st) const; 282 }; 283 284 // A VirtualSpaceList node. 285 class VirtualSpaceNode : public CHeapObj<mtClass> { 286 friend class VirtualSpaceList; 287 288 // Link to next VirtualSpaceNode 289 VirtualSpaceNode* _next; 290 291 // total in the VirtualSpace 292 MemRegion _reserved; 293 ReservedSpace _rs; 294 VirtualSpace _virtual_space; 295 MetaWord* _top; 296 // count of chunks contained in this VirtualSpace 297 uintx _container_count; 298 299 // Convenience functions to access the _virtual_space 300 char* low() const { return virtual_space()->low(); } 301 char* high() const { return virtual_space()->high(); } 302 303 // The first Metachunk will be allocated at the bottom of the 304 // VirtualSpace 305 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 306 307 // Committed but unused space in the virtual space 308 size_t free_words_in_vs() const; 309 public: 310 311 VirtualSpaceNode(size_t byte_size); 312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 313 ~VirtualSpaceNode(); 314 315 // Convenience functions for logical bottom and end 316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 318 319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 320 321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 323 324 bool is_pre_committed() const { return _virtual_space.special(); } 325 326 // address of next available space in _virtual_space; 327 // Accessors 328 VirtualSpaceNode* next() { return _next; } 329 void set_next(VirtualSpaceNode* v) { _next = v; } 330 331 void set_reserved(MemRegion const v) { _reserved = v; } 332 void set_top(MetaWord* v) { _top = v; } 333 334 // Accessors 335 MemRegion* reserved() { return &_reserved; } 336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 337 338 // Returns true if "word_size" is available in the VirtualSpace 339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 340 341 MetaWord* top() const { return _top; } 342 void inc_top(size_t word_size) { _top += word_size; } 343 344 uintx container_count() { return _container_count; } 345 void inc_container_count(); 346 void dec_container_count(); 347 #ifdef ASSERT 348 uint container_count_slow(); 349 void verify_container_count(); 350 #endif 351 352 // used and capacity in this single entry in the list 353 size_t used_words_in_vs() const; 354 size_t capacity_words_in_vs() const; 355 356 bool initialize(); 357 358 // get space from the virtual space 359 Metachunk* take_from_committed(size_t chunk_word_size); 360 361 // Allocate a chunk from the virtual space and return it. 362 Metachunk* get_chunk_vs(size_t chunk_word_size); 363 364 // Expands/shrinks the committed space in a virtual space. Delegates 365 // to Virtualspace 366 bool expand_by(size_t min_words, size_t preferred_words); 367 368 // In preparation for deleting this node, remove all the chunks 369 // in the node from any freelist. 370 void purge(ChunkManager* chunk_manager); 371 372 // If an allocation doesn't fit in the current node a new node is created. 373 // Allocate chunks out of the remaining committed space in this node 374 // to avoid wasting that memory. 375 // This always adds up because all the chunk sizes are multiples of 376 // the smallest chunk size. 377 void retire(ChunkManager* chunk_manager); 378 379 #ifdef ASSERT 380 // Debug support 381 void mangle(); 382 #endif 383 384 void print_on(outputStream* st) const; 385 }; 386 387 #define assert_is_ptr_aligned(ptr, alignment) \ 388 assert(is_ptr_aligned(ptr, alignment), \ 389 err_msg(PTR_FORMAT " is not aligned to " \ 390 SIZE_FORMAT, ptr, alignment)) 391 392 #define assert_is_size_aligned(size, alignment) \ 393 assert(is_size_aligned(size, alignment), \ 394 err_msg(SIZE_FORMAT " is not aligned to " \ 395 SIZE_FORMAT, size, alignment)) 396 397 398 // Decide if large pages should be committed when the memory is reserved. 399 static bool should_commit_large_pages_when_reserving(size_t bytes) { 400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 401 size_t words = bytes / BytesPerWord; 402 bool is_class = false; // We never reserve large pages for the class space. 403 if (MetaspaceGC::can_expand(words, is_class) && 404 MetaspaceGC::allowed_expansion() >= words) { 405 return true; 406 } 407 } 408 409 return false; 410 } 411 412 // byte_size is the size of the associated virtualspace. 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 415 416 #if INCLUDE_CDS 417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 418 // configurable address, generally at the top of the Java heap so other 419 // memory addresses don't conflict. 420 if (DumpSharedSpaces) { 421 bool large_pages = false; // No large pages when dumping the CDS archive. 422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 423 424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); 425 if (_rs.is_reserved()) { 426 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 427 } else { 428 // Get a mmap region anywhere if the SharedBaseAddress fails. 429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 430 } 431 MetaspaceShared::set_shared_rs(&_rs); 432 } else 433 #endif 434 { 435 bool large_pages = should_commit_large_pages_when_reserving(bytes); 436 437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 438 } 439 440 if (_rs.is_reserved()) { 441 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 442 assert(_rs.size() != 0, "Catch if we get a 0 size"); 443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 445 446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 447 } 448 } 449 450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 451 Metachunk* chunk = first_chunk(); 452 Metachunk* invalid_chunk = (Metachunk*) top(); 453 while (chunk < invalid_chunk ) { 454 assert(chunk->is_tagged_free(), "Should be tagged free"); 455 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 456 chunk_manager->remove_chunk(chunk); 457 assert(chunk->next() == NULL && 458 chunk->prev() == NULL, 459 "Was not removed from its list"); 460 chunk = (Metachunk*) next; 461 } 462 } 463 464 #ifdef ASSERT 465 uint VirtualSpaceNode::container_count_slow() { 466 uint count = 0; 467 Metachunk* chunk = first_chunk(); 468 Metachunk* invalid_chunk = (Metachunk*) top(); 469 while (chunk < invalid_chunk ) { 470 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 471 // Don't count the chunks on the free lists. Those are 472 // still part of the VirtualSpaceNode but not currently 473 // counted. 474 if (!chunk->is_tagged_free()) { 475 count++; 476 } 477 chunk = (Metachunk*) next; 478 } 479 return count; 480 } 481 #endif 482 483 // List of VirtualSpaces for metadata allocation. 484 class VirtualSpaceList : public CHeapObj<mtClass> { 485 friend class VirtualSpaceNode; 486 487 enum VirtualSpaceSizes { 488 VirtualSpaceSize = 256 * K 489 }; 490 491 // Head of the list 492 VirtualSpaceNode* _virtual_space_list; 493 // virtual space currently being used for allocations 494 VirtualSpaceNode* _current_virtual_space; 495 496 // Is this VirtualSpaceList used for the compressed class space 497 bool _is_class; 498 499 // Sum of reserved and committed memory in the virtual spaces 500 size_t _reserved_words; 501 size_t _committed_words; 502 503 // Number of virtual spaces 504 size_t _virtual_space_count; 505 506 ~VirtualSpaceList(); 507 508 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 509 510 void set_virtual_space_list(VirtualSpaceNode* v) { 511 _virtual_space_list = v; 512 } 513 void set_current_virtual_space(VirtualSpaceNode* v) { 514 _current_virtual_space = v; 515 } 516 517 void link_vs(VirtualSpaceNode* new_entry); 518 519 // Get another virtual space and add it to the list. This 520 // is typically prompted by a failed attempt to allocate a chunk 521 // and is typically followed by the allocation of a chunk. 522 bool create_new_virtual_space(size_t vs_word_size); 523 524 // Chunk up the unused committed space in the current 525 // virtual space and add the chunks to the free list. 526 void retire_current_virtual_space(); 527 528 public: 529 VirtualSpaceList(size_t word_size); 530 VirtualSpaceList(ReservedSpace rs); 531 532 size_t free_bytes(); 533 534 Metachunk* get_new_chunk(size_t chunk_word_size, 535 size_t suggested_commit_granularity); 536 537 bool expand_node_by(VirtualSpaceNode* node, 538 size_t min_words, 539 size_t preferred_words); 540 541 bool expand_by(size_t min_words, 542 size_t preferred_words); 543 544 VirtualSpaceNode* current_virtual_space() { 545 return _current_virtual_space; 546 } 547 548 bool is_class() const { return _is_class; } 549 550 bool initialization_succeeded() { return _virtual_space_list != NULL; } 551 552 size_t reserved_words() { return _reserved_words; } 553 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 554 size_t committed_words() { return _committed_words; } 555 size_t committed_bytes() { return committed_words() * BytesPerWord; } 556 557 void inc_reserved_words(size_t v); 558 void dec_reserved_words(size_t v); 559 void inc_committed_words(size_t v); 560 void dec_committed_words(size_t v); 561 void inc_virtual_space_count(); 562 void dec_virtual_space_count(); 563 564 bool contains(const void* ptr); 565 566 // Unlink empty VirtualSpaceNodes and free it. 567 void purge(ChunkManager* chunk_manager); 568 569 void print_on(outputStream* st) const; 570 571 class VirtualSpaceListIterator : public StackObj { 572 VirtualSpaceNode* _virtual_spaces; 573 public: 574 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 575 _virtual_spaces(virtual_spaces) {} 576 577 bool repeat() { 578 return _virtual_spaces != NULL; 579 } 580 581 VirtualSpaceNode* get_next() { 582 VirtualSpaceNode* result = _virtual_spaces; 583 if (_virtual_spaces != NULL) { 584 _virtual_spaces = _virtual_spaces->next(); 585 } 586 return result; 587 } 588 }; 589 }; 590 591 class Metadebug : AllStatic { 592 // Debugging support for Metaspaces 593 static int _allocation_fail_alot_count; 594 595 public: 596 597 static void init_allocation_fail_alot_count(); 598 #ifdef ASSERT 599 static bool test_metadata_failure(); 600 #endif 601 }; 602 603 int Metadebug::_allocation_fail_alot_count = 0; 604 605 // SpaceManager - used by Metaspace to handle allocations 606 class SpaceManager : public CHeapObj<mtClass> { 607 friend class Metaspace; 608 friend class Metadebug; 609 610 private: 611 612 // protects allocations 613 Mutex* const _lock; 614 615 // Type of metadata allocated. 616 Metaspace::MetadataType _mdtype; 617 618 // List of chunks in use by this SpaceManager. Allocations 619 // are done from the current chunk. The list is used for deallocating 620 // chunks when the SpaceManager is freed. 621 Metachunk* _chunks_in_use[NumberOfInUseLists]; 622 Metachunk* _current_chunk; 623 624 // Number of small chunks to allocate to a manager 625 // If class space manager, small chunks are unlimited 626 static uint const _small_chunk_limit; 627 628 // Sum of all space in allocated chunks 629 size_t _allocated_blocks_words; 630 631 // Sum of all allocated chunks 632 size_t _allocated_chunks_words; 633 size_t _allocated_chunks_count; 634 635 // Free lists of blocks are per SpaceManager since they 636 // are assumed to be in chunks in use by the SpaceManager 637 // and all chunks in use by a SpaceManager are freed when 638 // the class loader using the SpaceManager is collected. 639 BlockFreelist _block_freelists; 640 641 // protects virtualspace and chunk expansions 642 static const char* _expand_lock_name; 643 static const int _expand_lock_rank; 644 static Mutex* const _expand_lock; 645 646 private: 647 // Accessors 648 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 649 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 650 _chunks_in_use[index] = v; 651 } 652 653 BlockFreelist* block_freelists() const { 654 return (BlockFreelist*) &_block_freelists; 655 } 656 657 Metaspace::MetadataType mdtype() { return _mdtype; } 658 659 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 660 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 661 662 Metachunk* current_chunk() const { return _current_chunk; } 663 void set_current_chunk(Metachunk* v) { 664 _current_chunk = v; 665 } 666 667 Metachunk* find_current_chunk(size_t word_size); 668 669 // Add chunk to the list of chunks in use 670 void add_chunk(Metachunk* v, bool make_current); 671 void retire_current_chunk(); 672 673 Mutex* lock() const { return _lock; } 674 675 const char* chunk_size_name(ChunkIndex index) const; 676 677 protected: 678 void initialize(); 679 680 public: 681 SpaceManager(Metaspace::MetadataType mdtype, 682 Mutex* lock); 683 ~SpaceManager(); 684 685 enum ChunkMultiples { 686 MediumChunkMultiple = 4 687 }; 688 689 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } 690 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } 691 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } 692 693 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } 694 695 // Accessors 696 bool is_class() const { return _mdtype == Metaspace::ClassType; } 697 698 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } 699 size_t small_chunk_size() const { return small_chunk_size(is_class()); } 700 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } 701 702 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } 703 704 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } 705 706 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 707 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 708 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 709 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 710 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 711 712 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 713 714 static Mutex* expand_lock() { return _expand_lock; } 715 716 // Increment the per Metaspace and global running sums for Metachunks 717 // by the given size. This is used when a Metachunk to added to 718 // the in-use list. 719 void inc_size_metrics(size_t words); 720 // Increment the per Metaspace and global running sums Metablocks by the given 721 // size. This is used when a Metablock is allocated. 722 void inc_used_metrics(size_t words); 723 // Delete the portion of the running sums for this SpaceManager. That is, 724 // the globals running sums for the Metachunks and Metablocks are 725 // decremented for all the Metachunks in-use by this SpaceManager. 726 void dec_total_from_size_metrics(); 727 728 // Adjust the initial chunk size to match one of the fixed chunk list sizes, 729 // or return the unadjusted size if the requested size is humongous. 730 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); 731 size_t adjust_initial_chunk_size(size_t requested) const; 732 733 // Get the initial chunks size for this metaspace type. 734 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; 735 736 size_t sum_capacity_in_chunks_in_use() const; 737 size_t sum_used_in_chunks_in_use() const; 738 size_t sum_free_in_chunks_in_use() const; 739 size_t sum_waste_in_chunks_in_use() const; 740 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 741 742 size_t sum_count_in_chunks_in_use(); 743 size_t sum_count_in_chunks_in_use(ChunkIndex i); 744 745 Metachunk* get_new_chunk(size_t chunk_word_size); 746 747 // Block allocation and deallocation. 748 // Allocates a block from the current chunk 749 MetaWord* allocate(size_t word_size); 750 751 // Helper for allocations 752 MetaWord* allocate_work(size_t word_size); 753 754 // Returns a block to the per manager freelist 755 void deallocate(MetaWord* p, size_t word_size); 756 757 // Based on the allocation size and a minimum chunk size, 758 // returned chunk size (for expanding space for chunk allocation). 759 size_t calc_chunk_size(size_t allocation_word_size); 760 761 // Called when an allocation from the current chunk fails. 762 // Gets a new chunk (may require getting a new virtual space), 763 // and allocates from that chunk. 764 MetaWord* grow_and_allocate(size_t word_size); 765 766 // Notify memory usage to MemoryService. 767 void track_metaspace_memory_usage(); 768 769 // debugging support. 770 771 void dump(outputStream* const out) const; 772 void print_on(outputStream* st) const; 773 void locked_print_chunks_in_use_on(outputStream* st) const; 774 775 void verify(); 776 void verify_chunk_size(Metachunk* chunk); 777 NOT_PRODUCT(void mangle_freed_chunks();) 778 #ifdef ASSERT 779 void verify_allocated_blocks_words(); 780 #endif 781 782 size_t get_raw_word_size(size_t word_size) { 783 size_t byte_size = word_size * BytesPerWord; 784 785 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 786 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 787 788 size_t raw_word_size = raw_bytes_size / BytesPerWord; 789 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 790 791 return raw_word_size; 792 } 793 }; 794 795 uint const SpaceManager::_small_chunk_limit = 4; 796 797 const char* SpaceManager::_expand_lock_name = 798 "SpaceManager chunk allocation lock"; 799 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 800 Mutex* const SpaceManager::_expand_lock = 801 new Mutex(SpaceManager::_expand_lock_rank, 802 SpaceManager::_expand_lock_name, 803 Mutex::_allow_vm_block_flag); 804 805 void VirtualSpaceNode::inc_container_count() { 806 assert_lock_strong(SpaceManager::expand_lock()); 807 _container_count++; 808 assert(_container_count == container_count_slow(), 809 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT 810 " container_count_slow() " SIZE_FORMAT, 811 _container_count, container_count_slow())); 812 } 813 814 void VirtualSpaceNode::dec_container_count() { 815 assert_lock_strong(SpaceManager::expand_lock()); 816 _container_count--; 817 } 818 819 #ifdef ASSERT 820 void VirtualSpaceNode::verify_container_count() { 821 assert(_container_count == container_count_slow(), 822 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT 823 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); 824 } 825 #endif 826 827 // BlockFreelist methods 828 829 BlockFreelist::BlockFreelist() : _dictionary(NULL) {} 830 831 BlockFreelist::~BlockFreelist() { 832 if (_dictionary != NULL) { 833 if (Verbose && TraceMetadataChunkAllocation) { 834 _dictionary->print_free_lists(gclog_or_tty); 835 } 836 delete _dictionary; 837 } 838 } 839 840 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 841 Metablock* free_chunk = ::new (p) Metablock(word_size); 842 if (dictionary() == NULL) { 843 _dictionary = new BlockTreeDictionary(); 844 } 845 dictionary()->return_chunk(free_chunk); 846 } 847 848 MetaWord* BlockFreelist::get_block(size_t word_size) { 849 if (dictionary() == NULL) { 850 return NULL; 851 } 852 853 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 854 // Dark matter. Too small for dictionary. 855 return NULL; 856 } 857 858 Metablock* free_block = 859 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 860 if (free_block == NULL) { 861 return NULL; 862 } 863 864 const size_t block_size = free_block->size(); 865 if (block_size > WasteMultiplier * word_size) { 866 return_block((MetaWord*)free_block, block_size); 867 return NULL; 868 } 869 870 MetaWord* new_block = (MetaWord*)free_block; 871 assert(block_size >= word_size, "Incorrect size of block from freelist"); 872 const size_t unused = block_size - word_size; 873 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 874 return_block(new_block + word_size, unused); 875 } 876 877 return new_block; 878 } 879 880 void BlockFreelist::print_on(outputStream* st) const { 881 if (dictionary() == NULL) { 882 return; 883 } 884 dictionary()->print_free_lists(st); 885 } 886 887 // VirtualSpaceNode methods 888 889 VirtualSpaceNode::~VirtualSpaceNode() { 890 _rs.release(); 891 #ifdef ASSERT 892 size_t word_size = sizeof(*this) / BytesPerWord; 893 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 894 #endif 895 } 896 897 size_t VirtualSpaceNode::used_words_in_vs() const { 898 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 899 } 900 901 // Space committed in the VirtualSpace 902 size_t VirtualSpaceNode::capacity_words_in_vs() const { 903 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 904 } 905 906 size_t VirtualSpaceNode::free_words_in_vs() const { 907 return pointer_delta(end(), top(), sizeof(MetaWord)); 908 } 909 910 // Allocates the chunk from the virtual space only. 911 // This interface is also used internally for debugging. Not all 912 // chunks removed here are necessarily used for allocation. 913 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 914 // Bottom of the new chunk 915 MetaWord* chunk_limit = top(); 916 assert(chunk_limit != NULL, "Not safe to call this method"); 917 918 // The virtual spaces are always expanded by the 919 // commit granularity to enforce the following condition. 920 // Without this the is_available check will not work correctly. 921 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 922 "The committed memory doesn't match the expanded memory."); 923 924 if (!is_available(chunk_word_size)) { 925 if (TraceMetadataChunkAllocation) { 926 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 927 // Dump some information about the virtual space that is nearly full 928 print_on(gclog_or_tty); 929 } 930 return NULL; 931 } 932 933 // Take the space (bump top on the current virtual space). 934 inc_top(chunk_word_size); 935 936 // Initialize the chunk 937 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 938 return result; 939 } 940 941 942 // Expand the virtual space (commit more of the reserved space) 943 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 944 size_t min_bytes = min_words * BytesPerWord; 945 size_t preferred_bytes = preferred_words * BytesPerWord; 946 947 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 948 949 if (uncommitted < min_bytes) { 950 return false; 951 } 952 953 size_t commit = MIN2(preferred_bytes, uncommitted); 954 bool result = virtual_space()->expand_by(commit, false); 955 956 assert(result, "Failed to commit memory"); 957 958 return result; 959 } 960 961 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 962 assert_lock_strong(SpaceManager::expand_lock()); 963 Metachunk* result = take_from_committed(chunk_word_size); 964 if (result != NULL) { 965 inc_container_count(); 966 } 967 return result; 968 } 969 970 bool VirtualSpaceNode::initialize() { 971 972 if (!_rs.is_reserved()) { 973 return false; 974 } 975 976 // These are necessary restriction to make sure that the virtual space always 977 // grows in steps of Metaspace::commit_alignment(). If both base and size are 978 // aligned only the middle alignment of the VirtualSpace is used. 979 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 980 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 981 982 // ReservedSpaces marked as special will have the entire memory 983 // pre-committed. Setting a committed size will make sure that 984 // committed_size and actual_committed_size agrees. 985 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 986 987 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 988 Metaspace::commit_alignment()); 989 if (result) { 990 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 991 "Checking that the pre-committed memory was registered by the VirtualSpace"); 992 993 set_top((MetaWord*)virtual_space()->low()); 994 set_reserved(MemRegion((HeapWord*)_rs.base(), 995 (HeapWord*)(_rs.base() + _rs.size()))); 996 997 assert(reserved()->start() == (HeapWord*) _rs.base(), 998 err_msg("Reserved start was not set properly " PTR_FORMAT 999 " != " PTR_FORMAT, reserved()->start(), _rs.base())); 1000 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1001 err_msg("Reserved size was not set properly " SIZE_FORMAT 1002 " != " SIZE_FORMAT, reserved()->word_size(), 1003 _rs.size() / BytesPerWord)); 1004 } 1005 1006 return result; 1007 } 1008 1009 void VirtualSpaceNode::print_on(outputStream* st) const { 1010 size_t used = used_words_in_vs(); 1011 size_t capacity = capacity_words_in_vs(); 1012 VirtualSpace* vs = virtual_space(); 1013 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used " 1014 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1015 PTR_FORMAT ", " PTR_FORMAT ")", 1016 vs, capacity / K, 1017 capacity == 0 ? 0 : used * 100 / capacity, 1018 bottom(), top(), end(), 1019 vs->high_boundary()); 1020 } 1021 1022 #ifdef ASSERT 1023 void VirtualSpaceNode::mangle() { 1024 size_t word_size = capacity_words_in_vs(); 1025 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1026 } 1027 #endif // ASSERT 1028 1029 // VirtualSpaceList methods 1030 // Space allocated from the VirtualSpace 1031 1032 VirtualSpaceList::~VirtualSpaceList() { 1033 VirtualSpaceListIterator iter(virtual_space_list()); 1034 while (iter.repeat()) { 1035 VirtualSpaceNode* vsl = iter.get_next(); 1036 delete vsl; 1037 } 1038 } 1039 1040 void VirtualSpaceList::inc_reserved_words(size_t v) { 1041 assert_lock_strong(SpaceManager::expand_lock()); 1042 _reserved_words = _reserved_words + v; 1043 } 1044 void VirtualSpaceList::dec_reserved_words(size_t v) { 1045 assert_lock_strong(SpaceManager::expand_lock()); 1046 _reserved_words = _reserved_words - v; 1047 } 1048 1049 #define assert_committed_below_limit() \ 1050 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1051 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 1052 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1053 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 1054 1055 void VirtualSpaceList::inc_committed_words(size_t v) { 1056 assert_lock_strong(SpaceManager::expand_lock()); 1057 _committed_words = _committed_words + v; 1058 1059 assert_committed_below_limit(); 1060 } 1061 void VirtualSpaceList::dec_committed_words(size_t v) { 1062 assert_lock_strong(SpaceManager::expand_lock()); 1063 _committed_words = _committed_words - v; 1064 1065 assert_committed_below_limit(); 1066 } 1067 1068 void VirtualSpaceList::inc_virtual_space_count() { 1069 assert_lock_strong(SpaceManager::expand_lock()); 1070 _virtual_space_count++; 1071 } 1072 void VirtualSpaceList::dec_virtual_space_count() { 1073 assert_lock_strong(SpaceManager::expand_lock()); 1074 _virtual_space_count--; 1075 } 1076 1077 void ChunkManager::remove_chunk(Metachunk* chunk) { 1078 size_t word_size = chunk->word_size(); 1079 ChunkIndex index = list_index(word_size); 1080 if (index != HumongousIndex) { 1081 free_chunks(index)->remove_chunk(chunk); 1082 } else { 1083 humongous_dictionary()->remove_chunk(chunk); 1084 } 1085 1086 // Chunk is being removed from the chunks free list. 1087 dec_free_chunks_total(chunk->word_size()); 1088 } 1089 1090 // Walk the list of VirtualSpaceNodes and delete 1091 // nodes with a 0 container_count. Remove Metachunks in 1092 // the node from their respective freelists. 1093 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1094 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1095 assert_lock_strong(SpaceManager::expand_lock()); 1096 // Don't use a VirtualSpaceListIterator because this 1097 // list is being changed and a straightforward use of an iterator is not safe. 1098 VirtualSpaceNode* purged_vsl = NULL; 1099 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1100 VirtualSpaceNode* next_vsl = prev_vsl; 1101 while (next_vsl != NULL) { 1102 VirtualSpaceNode* vsl = next_vsl; 1103 next_vsl = vsl->next(); 1104 // Don't free the current virtual space since it will likely 1105 // be needed soon. 1106 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1107 // Unlink it from the list 1108 if (prev_vsl == vsl) { 1109 // This is the case of the current node being the first node. 1110 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1111 set_virtual_space_list(vsl->next()); 1112 } else { 1113 prev_vsl->set_next(vsl->next()); 1114 } 1115 1116 vsl->purge(chunk_manager); 1117 dec_reserved_words(vsl->reserved_words()); 1118 dec_committed_words(vsl->committed_words()); 1119 dec_virtual_space_count(); 1120 purged_vsl = vsl; 1121 delete vsl; 1122 } else { 1123 prev_vsl = vsl; 1124 } 1125 } 1126 #ifdef ASSERT 1127 if (purged_vsl != NULL) { 1128 // List should be stable enough to use an iterator here. 1129 VirtualSpaceListIterator iter(virtual_space_list()); 1130 while (iter.repeat()) { 1131 VirtualSpaceNode* vsl = iter.get_next(); 1132 assert(vsl != purged_vsl, "Purge of vsl failed"); 1133 } 1134 } 1135 #endif 1136 } 1137 1138 1139 // This function looks at the mmap regions in the metaspace without locking. 1140 // The chunks are added with store ordering and not deleted except for at 1141 // unloading time during a safepoint. 1142 bool VirtualSpaceList::contains(const void* ptr) { 1143 // List should be stable enough to use an iterator here because removing virtual 1144 // space nodes is only allowed at a safepoint. 1145 VirtualSpaceListIterator iter(virtual_space_list()); 1146 while (iter.repeat()) { 1147 VirtualSpaceNode* vsn = iter.get_next(); 1148 if (vsn->contains(ptr)) { 1149 return true; 1150 } 1151 } 1152 return false; 1153 } 1154 1155 void VirtualSpaceList::retire_current_virtual_space() { 1156 assert_lock_strong(SpaceManager::expand_lock()); 1157 1158 VirtualSpaceNode* vsn = current_virtual_space(); 1159 1160 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1161 Metaspace::chunk_manager_metadata(); 1162 1163 vsn->retire(cm); 1164 } 1165 1166 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1167 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1168 ChunkIndex index = (ChunkIndex)i; 1169 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1170 1171 while (free_words_in_vs() >= chunk_size) { 1172 DEBUG_ONLY(verify_container_count();) 1173 Metachunk* chunk = get_chunk_vs(chunk_size); 1174 assert(chunk != NULL, "allocation should have been successful"); 1175 1176 chunk_manager->return_chunks(index, chunk); 1177 chunk_manager->inc_free_chunks_total(chunk_size); 1178 DEBUG_ONLY(verify_container_count();) 1179 } 1180 } 1181 assert(free_words_in_vs() == 0, "should be empty now"); 1182 } 1183 1184 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1185 _is_class(false), 1186 _virtual_space_list(NULL), 1187 _current_virtual_space(NULL), 1188 _reserved_words(0), 1189 _committed_words(0), 1190 _virtual_space_count(0) { 1191 MutexLockerEx cl(SpaceManager::expand_lock(), 1192 Mutex::_no_safepoint_check_flag); 1193 create_new_virtual_space(word_size); 1194 } 1195 1196 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1197 _is_class(true), 1198 _virtual_space_list(NULL), 1199 _current_virtual_space(NULL), 1200 _reserved_words(0), 1201 _committed_words(0), 1202 _virtual_space_count(0) { 1203 MutexLockerEx cl(SpaceManager::expand_lock(), 1204 Mutex::_no_safepoint_check_flag); 1205 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1206 bool succeeded = class_entry->initialize(); 1207 if (succeeded) { 1208 link_vs(class_entry); 1209 } 1210 } 1211 1212 size_t VirtualSpaceList::free_bytes() { 1213 return current_virtual_space()->free_words_in_vs() * BytesPerWord; 1214 } 1215 1216 // Allocate another meta virtual space and add it to the list. 1217 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1218 assert_lock_strong(SpaceManager::expand_lock()); 1219 1220 if (is_class()) { 1221 assert(false, "We currently don't support more than one VirtualSpace for" 1222 " the compressed class space. The initialization of the" 1223 " CCS uses another code path and should not hit this path."); 1224 return false; 1225 } 1226 1227 if (vs_word_size == 0) { 1228 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1229 return false; 1230 } 1231 1232 // Reserve the space 1233 size_t vs_byte_size = vs_word_size * BytesPerWord; 1234 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1235 1236 // Allocate the meta virtual space and initialize it. 1237 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1238 if (!new_entry->initialize()) { 1239 delete new_entry; 1240 return false; 1241 } else { 1242 assert(new_entry->reserved_words() == vs_word_size, 1243 "Reserved memory size differs from requested memory size"); 1244 // ensure lock-free iteration sees fully initialized node 1245 OrderAccess::storestore(); 1246 link_vs(new_entry); 1247 return true; 1248 } 1249 } 1250 1251 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1252 if (virtual_space_list() == NULL) { 1253 set_virtual_space_list(new_entry); 1254 } else { 1255 current_virtual_space()->set_next(new_entry); 1256 } 1257 set_current_virtual_space(new_entry); 1258 inc_reserved_words(new_entry->reserved_words()); 1259 inc_committed_words(new_entry->committed_words()); 1260 inc_virtual_space_count(); 1261 #ifdef ASSERT 1262 new_entry->mangle(); 1263 #endif 1264 if (TraceMetavirtualspaceAllocation && Verbose) { 1265 VirtualSpaceNode* vsl = current_virtual_space(); 1266 vsl->print_on(gclog_or_tty); 1267 } 1268 } 1269 1270 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1271 size_t min_words, 1272 size_t preferred_words) { 1273 size_t before = node->committed_words(); 1274 1275 bool result = node->expand_by(min_words, preferred_words); 1276 1277 size_t after = node->committed_words(); 1278 1279 // after and before can be the same if the memory was pre-committed. 1280 assert(after >= before, "Inconsistency"); 1281 inc_committed_words(after - before); 1282 1283 return result; 1284 } 1285 1286 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1287 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1288 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1289 assert(min_words <= preferred_words, "Invalid arguments"); 1290 1291 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1292 return false; 1293 } 1294 1295 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1296 if (allowed_expansion_words < min_words) { 1297 return false; 1298 } 1299 1300 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1301 1302 // Commit more memory from the the current virtual space. 1303 bool vs_expanded = expand_node_by(current_virtual_space(), 1304 min_words, 1305 max_expansion_words); 1306 if (vs_expanded) { 1307 return true; 1308 } 1309 retire_current_virtual_space(); 1310 1311 // Get another virtual space. 1312 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1313 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1314 1315 if (create_new_virtual_space(grow_vs_words)) { 1316 if (current_virtual_space()->is_pre_committed()) { 1317 // The memory was pre-committed, so we are done here. 1318 assert(min_words <= current_virtual_space()->committed_words(), 1319 "The new VirtualSpace was pre-committed, so it" 1320 "should be large enough to fit the alloc request."); 1321 return true; 1322 } 1323 1324 return expand_node_by(current_virtual_space(), 1325 min_words, 1326 max_expansion_words); 1327 } 1328 1329 return false; 1330 } 1331 1332 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 1333 1334 // Allocate a chunk out of the current virtual space. 1335 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1336 1337 if (next != NULL) { 1338 return next; 1339 } 1340 1341 // The expand amount is currently only determined by the requested sizes 1342 // and not how much committed memory is left in the current virtual space. 1343 1344 size_t min_word_size = align_size_up(chunk_word_size, Metaspace::commit_alignment_words()); 1345 size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 1346 if (min_word_size >= preferred_word_size) { 1347 // Can happen when humongous chunks are allocated. 1348 preferred_word_size = min_word_size; 1349 } 1350 1351 bool expanded = expand_by(min_word_size, preferred_word_size); 1352 if (expanded) { 1353 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 1354 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1355 } 1356 1357 return next; 1358 } 1359 1360 void VirtualSpaceList::print_on(outputStream* st) const { 1361 if (TraceMetadataChunkAllocation && Verbose) { 1362 VirtualSpaceListIterator iter(virtual_space_list()); 1363 while (iter.repeat()) { 1364 VirtualSpaceNode* node = iter.get_next(); 1365 node->print_on(st); 1366 } 1367 } 1368 } 1369 1370 // MetaspaceGC methods 1371 1372 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1373 // Within the VM operation after the GC the attempt to allocate the metadata 1374 // should succeed. If the GC did not free enough space for the metaspace 1375 // allocation, the HWM is increased so that another virtualspace will be 1376 // allocated for the metadata. With perm gen the increase in the perm 1377 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1378 // metaspace policy uses those as the small and large steps for the HWM. 1379 // 1380 // After the GC the compute_new_size() for MetaspaceGC is called to 1381 // resize the capacity of the metaspaces. The current implementation 1382 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1383 // to resize the Java heap by some GC's. New flags can be implemented 1384 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1385 // free space is desirable in the metaspace capacity to decide how much 1386 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1387 // free space is desirable in the metaspace capacity before decreasing 1388 // the HWM. 1389 1390 // Calculate the amount to increase the high water mark (HWM). 1391 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1392 // another expansion is not requested too soon. If that is not 1393 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1394 // If that is still not enough, expand by the size of the allocation 1395 // plus some. 1396 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1397 size_t min_delta = MinMetaspaceExpansion; 1398 size_t max_delta = MaxMetaspaceExpansion; 1399 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1400 1401 if (delta <= min_delta) { 1402 delta = min_delta; 1403 } else if (delta <= max_delta) { 1404 // Don't want to hit the high water mark on the next 1405 // allocation so make the delta greater than just enough 1406 // for this allocation. 1407 delta = max_delta; 1408 } else { 1409 // This allocation is large but the next ones are probably not 1410 // so increase by the minimum. 1411 delta = delta + min_delta; 1412 } 1413 1414 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1415 1416 return delta; 1417 } 1418 1419 size_t MetaspaceGC::capacity_until_GC() { 1420 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1421 assert(value >= MetaspaceSize, "Not initialied properly?"); 1422 return value; 1423 } 1424 1425 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1426 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1427 1428 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1429 size_t new_value = capacity_until_GC + v; 1430 1431 if (new_value < capacity_until_GC) { 1432 // The addition wrapped around, set new_value to aligned max value. 1433 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1434 } 1435 1436 intptr_t expected = (intptr_t) capacity_until_GC; 1437 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1438 1439 if (expected != actual) { 1440 return false; 1441 } 1442 1443 if (new_cap_until_GC != NULL) { 1444 *new_cap_until_GC = new_value; 1445 } 1446 if (old_cap_until_GC != NULL) { 1447 *old_cap_until_GC = capacity_until_GC; 1448 } 1449 return true; 1450 } 1451 1452 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1453 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1454 1455 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1456 } 1457 1458 void MetaspaceGC::initialize() { 1459 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1460 // we can't do a GC during initialization. 1461 _capacity_until_GC = MaxMetaspaceSize; 1462 } 1463 1464 void MetaspaceGC::post_initialize() { 1465 // Reset the high-water mark once the VM initialization is done. 1466 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1467 } 1468 1469 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1470 // Check if the compressed class space is full. 1471 if (is_class && Metaspace::using_class_space()) { 1472 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1473 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1474 return false; 1475 } 1476 } 1477 1478 // Check if the user has imposed a limit on the metaspace memory. 1479 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1480 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1481 return false; 1482 } 1483 1484 return true; 1485 } 1486 1487 size_t MetaspaceGC::allowed_expansion() { 1488 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1489 size_t capacity_until_gc = capacity_until_GC(); 1490 1491 assert(capacity_until_gc >= committed_bytes, 1492 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1493 capacity_until_gc, committed_bytes)); 1494 1495 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1496 size_t left_until_GC = capacity_until_gc - committed_bytes; 1497 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1498 1499 return left_to_commit / BytesPerWord; 1500 } 1501 1502 void MetaspaceGC::compute_new_size() { 1503 assert(_shrink_factor <= 100, "invalid shrink factor"); 1504 uint current_shrink_factor = _shrink_factor; 1505 _shrink_factor = 0; 1506 1507 // Using committed_bytes() for used_after_gc is an overestimation, since the 1508 // chunk free lists are included in committed_bytes() and the memory in an 1509 // un-fragmented chunk free list is available for future allocations. 1510 // However, if the chunk free lists becomes fragmented, then the memory may 1511 // not be available for future allocations and the memory is therefore "in use". 1512 // Including the chunk free lists in the definition of "in use" is therefore 1513 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1514 // shrink below committed_bytes() and this has caused serious bugs in the past. 1515 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1516 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1517 1518 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1519 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1520 1521 const double min_tmp = used_after_gc / maximum_used_percentage; 1522 size_t minimum_desired_capacity = 1523 (size_t)MIN2(min_tmp, double(max_uintx)); 1524 // Don't shrink less than the initial generation size 1525 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1526 MetaspaceSize); 1527 1528 if (PrintGCDetails && Verbose) { 1529 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); 1530 gclog_or_tty->print_cr(" " 1531 " minimum_free_percentage: %6.2f" 1532 " maximum_used_percentage: %6.2f", 1533 minimum_free_percentage, 1534 maximum_used_percentage); 1535 gclog_or_tty->print_cr(" " 1536 " used_after_gc : %6.1fKB", 1537 used_after_gc / (double) K); 1538 } 1539 1540 1541 size_t shrink_bytes = 0; 1542 if (capacity_until_GC < minimum_desired_capacity) { 1543 // If we have less capacity below the metaspace HWM, then 1544 // increment the HWM. 1545 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1546 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1547 // Don't expand unless it's significant 1548 if (expand_bytes >= MinMetaspaceExpansion) { 1549 size_t new_capacity_until_GC = 0; 1550 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1551 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1552 1553 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1554 new_capacity_until_GC, 1555 MetaspaceGCThresholdUpdater::ComputeNewSize); 1556 if (PrintGCDetails && Verbose) { 1557 gclog_or_tty->print_cr(" expanding:" 1558 " minimum_desired_capacity: %6.1fKB" 1559 " expand_bytes: %6.1fKB" 1560 " MinMetaspaceExpansion: %6.1fKB" 1561 " new metaspace HWM: %6.1fKB", 1562 minimum_desired_capacity / (double) K, 1563 expand_bytes / (double) K, 1564 MinMetaspaceExpansion / (double) K, 1565 new_capacity_until_GC / (double) K); 1566 } 1567 } 1568 return; 1569 } 1570 1571 // No expansion, now see if we want to shrink 1572 // We would never want to shrink more than this 1573 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1574 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, 1575 max_shrink_bytes)); 1576 1577 // Should shrinking be considered? 1578 if (MaxMetaspaceFreeRatio < 100) { 1579 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1580 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1581 const double max_tmp = used_after_gc / minimum_used_percentage; 1582 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1583 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1584 MetaspaceSize); 1585 if (PrintGCDetails && Verbose) { 1586 gclog_or_tty->print_cr(" " 1587 " maximum_free_percentage: %6.2f" 1588 " minimum_used_percentage: %6.2f", 1589 maximum_free_percentage, 1590 minimum_used_percentage); 1591 gclog_or_tty->print_cr(" " 1592 " minimum_desired_capacity: %6.1fKB" 1593 " maximum_desired_capacity: %6.1fKB", 1594 minimum_desired_capacity / (double) K, 1595 maximum_desired_capacity / (double) K); 1596 } 1597 1598 assert(minimum_desired_capacity <= maximum_desired_capacity, 1599 "sanity check"); 1600 1601 if (capacity_until_GC > maximum_desired_capacity) { 1602 // Capacity too large, compute shrinking size 1603 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1604 // We don't want shrink all the way back to initSize if people call 1605 // System.gc(), because some programs do that between "phases" and then 1606 // we'd just have to grow the heap up again for the next phase. So we 1607 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1608 // on the third call, and 100% by the fourth call. But if we recompute 1609 // size without shrinking, it goes back to 0%. 1610 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1611 1612 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1613 1614 assert(shrink_bytes <= max_shrink_bytes, 1615 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1616 shrink_bytes, max_shrink_bytes)); 1617 if (current_shrink_factor == 0) { 1618 _shrink_factor = 10; 1619 } else { 1620 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1621 } 1622 if (PrintGCDetails && Verbose) { 1623 gclog_or_tty->print_cr(" " 1624 " shrinking:" 1625 " initSize: %.1fK" 1626 " maximum_desired_capacity: %.1fK", 1627 MetaspaceSize / (double) K, 1628 maximum_desired_capacity / (double) K); 1629 gclog_or_tty->print_cr(" " 1630 " shrink_bytes: %.1fK" 1631 " current_shrink_factor: %d" 1632 " new shrink factor: %d" 1633 " MinMetaspaceExpansion: %.1fK", 1634 shrink_bytes / (double) K, 1635 current_shrink_factor, 1636 _shrink_factor, 1637 MinMetaspaceExpansion / (double) K); 1638 } 1639 } 1640 } 1641 1642 // Don't shrink unless it's significant 1643 if (shrink_bytes >= MinMetaspaceExpansion && 1644 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1645 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1646 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1647 new_capacity_until_GC, 1648 MetaspaceGCThresholdUpdater::ComputeNewSize); 1649 } 1650 } 1651 1652 // Metadebug methods 1653 1654 void Metadebug::init_allocation_fail_alot_count() { 1655 if (MetadataAllocationFailALot) { 1656 _allocation_fail_alot_count = 1657 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1658 } 1659 } 1660 1661 #ifdef ASSERT 1662 bool Metadebug::test_metadata_failure() { 1663 if (MetadataAllocationFailALot && 1664 Threads::is_vm_complete()) { 1665 if (_allocation_fail_alot_count > 0) { 1666 _allocation_fail_alot_count--; 1667 } else { 1668 if (TraceMetadataChunkAllocation && Verbose) { 1669 gclog_or_tty->print_cr("Metadata allocation failing for " 1670 "MetadataAllocationFailALot"); 1671 } 1672 init_allocation_fail_alot_count(); 1673 return true; 1674 } 1675 } 1676 return false; 1677 } 1678 #endif 1679 1680 // ChunkManager methods 1681 1682 size_t ChunkManager::free_chunks_total_words() { 1683 return _free_chunks_total; 1684 } 1685 1686 size_t ChunkManager::free_chunks_total_bytes() { 1687 return free_chunks_total_words() * BytesPerWord; 1688 } 1689 1690 size_t ChunkManager::free_chunks_count() { 1691 #ifdef ASSERT 1692 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1693 MutexLockerEx cl(SpaceManager::expand_lock(), 1694 Mutex::_no_safepoint_check_flag); 1695 // This lock is only needed in debug because the verification 1696 // of the _free_chunks_totals walks the list of free chunks 1697 slow_locked_verify_free_chunks_count(); 1698 } 1699 #endif 1700 return _free_chunks_count; 1701 } 1702 1703 void ChunkManager::locked_verify_free_chunks_total() { 1704 assert_lock_strong(SpaceManager::expand_lock()); 1705 assert(sum_free_chunks() == _free_chunks_total, 1706 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" 1707 " same as sum " SIZE_FORMAT, _free_chunks_total, 1708 sum_free_chunks())); 1709 } 1710 1711 void ChunkManager::verify_free_chunks_total() { 1712 MutexLockerEx cl(SpaceManager::expand_lock(), 1713 Mutex::_no_safepoint_check_flag); 1714 locked_verify_free_chunks_total(); 1715 } 1716 1717 void ChunkManager::locked_verify_free_chunks_count() { 1718 assert_lock_strong(SpaceManager::expand_lock()); 1719 assert(sum_free_chunks_count() == _free_chunks_count, 1720 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" 1721 " same as sum " SIZE_FORMAT, _free_chunks_count, 1722 sum_free_chunks_count())); 1723 } 1724 1725 void ChunkManager::verify_free_chunks_count() { 1726 #ifdef ASSERT 1727 MutexLockerEx cl(SpaceManager::expand_lock(), 1728 Mutex::_no_safepoint_check_flag); 1729 locked_verify_free_chunks_count(); 1730 #endif 1731 } 1732 1733 void ChunkManager::verify() { 1734 MutexLockerEx cl(SpaceManager::expand_lock(), 1735 Mutex::_no_safepoint_check_flag); 1736 locked_verify(); 1737 } 1738 1739 void ChunkManager::locked_verify() { 1740 locked_verify_free_chunks_count(); 1741 locked_verify_free_chunks_total(); 1742 } 1743 1744 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1745 assert_lock_strong(SpaceManager::expand_lock()); 1746 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1747 _free_chunks_total, _free_chunks_count); 1748 } 1749 1750 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1751 assert_lock_strong(SpaceManager::expand_lock()); 1752 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1753 sum_free_chunks(), sum_free_chunks_count()); 1754 } 1755 1756 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1757 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, 1758 err_msg("Bad index: %d", (int)index)); 1759 1760 return &_free_chunks[index]; 1761 } 1762 1763 // These methods that sum the free chunk lists are used in printing 1764 // methods that are used in product builds. 1765 size_t ChunkManager::sum_free_chunks() { 1766 assert_lock_strong(SpaceManager::expand_lock()); 1767 size_t result = 0; 1768 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1769 ChunkList* list = free_chunks(i); 1770 1771 if (list == NULL) { 1772 continue; 1773 } 1774 1775 result = result + list->count() * list->size(); 1776 } 1777 result = result + humongous_dictionary()->total_size(); 1778 return result; 1779 } 1780 1781 size_t ChunkManager::sum_free_chunks_count() { 1782 assert_lock_strong(SpaceManager::expand_lock()); 1783 size_t count = 0; 1784 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1785 ChunkList* list = free_chunks(i); 1786 if (list == NULL) { 1787 continue; 1788 } 1789 count = count + list->count(); 1790 } 1791 count = count + humongous_dictionary()->total_free_blocks(); 1792 return count; 1793 } 1794 1795 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1796 ChunkIndex index = list_index(word_size); 1797 assert(index < HumongousIndex, "No humongous list"); 1798 return free_chunks(index); 1799 } 1800 1801 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1802 assert_lock_strong(SpaceManager::expand_lock()); 1803 1804 slow_locked_verify(); 1805 1806 Metachunk* chunk = NULL; 1807 if (list_index(word_size) != HumongousIndex) { 1808 ChunkList* free_list = find_free_chunks_list(word_size); 1809 assert(free_list != NULL, "Sanity check"); 1810 1811 chunk = free_list->head(); 1812 1813 if (chunk == NULL) { 1814 return NULL; 1815 } 1816 1817 // Remove the chunk as the head of the list. 1818 free_list->remove_chunk(chunk); 1819 1820 if (TraceMetadataChunkAllocation && Verbose) { 1821 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1822 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1823 free_list, chunk, chunk->word_size()); 1824 } 1825 } else { 1826 chunk = humongous_dictionary()->get_chunk( 1827 word_size, 1828 FreeBlockDictionary<Metachunk>::atLeast); 1829 1830 if (chunk == NULL) { 1831 return NULL; 1832 } 1833 1834 if (TraceMetadataHumongousAllocation) { 1835 size_t waste = chunk->word_size() - word_size; 1836 gclog_or_tty->print_cr("Free list allocate humongous chunk size " 1837 SIZE_FORMAT " for requested size " SIZE_FORMAT 1838 " waste " SIZE_FORMAT, 1839 chunk->word_size(), word_size, waste); 1840 } 1841 } 1842 1843 // Chunk is being removed from the chunks free list. 1844 dec_free_chunks_total(chunk->word_size()); 1845 1846 // Remove it from the links to this freelist 1847 chunk->set_next(NULL); 1848 chunk->set_prev(NULL); 1849 #ifdef ASSERT 1850 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1851 // work. 1852 chunk->set_is_tagged_free(false); 1853 #endif 1854 chunk->container()->inc_container_count(); 1855 1856 slow_locked_verify(); 1857 return chunk; 1858 } 1859 1860 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1861 assert_lock_strong(SpaceManager::expand_lock()); 1862 slow_locked_verify(); 1863 1864 // Take from the beginning of the list 1865 Metachunk* chunk = free_chunks_get(word_size); 1866 if (chunk == NULL) { 1867 return NULL; 1868 } 1869 1870 assert((word_size <= chunk->word_size()) || 1871 (list_index(chunk->word_size()) == HumongousIndex), 1872 "Non-humongous variable sized chunk"); 1873 if (TraceMetadataChunkAllocation) { 1874 size_t list_count; 1875 if (list_index(word_size) < HumongousIndex) { 1876 ChunkList* list = find_free_chunks_list(word_size); 1877 list_count = list->count(); 1878 } else { 1879 list_count = humongous_dictionary()->total_count(); 1880 } 1881 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1882 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1883 this, chunk, chunk->word_size(), list_count); 1884 locked_print_free_chunks(gclog_or_tty); 1885 } 1886 1887 return chunk; 1888 } 1889 1890 void ChunkManager::print_on(outputStream* out) const { 1891 if (PrintFLSStatistics != 0) { 1892 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); 1893 } 1894 } 1895 1896 // SpaceManager methods 1897 1898 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { 1899 size_t chunk_sizes[] = { 1900 specialized_chunk_size(is_class_space), 1901 small_chunk_size(is_class_space), 1902 medium_chunk_size(is_class_space) 1903 }; 1904 1905 // Adjust up to one of the fixed chunk sizes ... 1906 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { 1907 if (requested <= chunk_sizes[i]) { 1908 return chunk_sizes[i]; 1909 } 1910 } 1911 1912 // ... or return the size as a humongous chunk. 1913 return requested; 1914 } 1915 1916 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { 1917 return adjust_initial_chunk_size(requested, is_class()); 1918 } 1919 1920 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { 1921 size_t requested; 1922 1923 if (is_class()) { 1924 switch (type) { 1925 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; 1926 case Metaspace::ROMetaspaceType: requested = ClassSpecializedChunk; break; 1927 case Metaspace::ReadWriteMetaspaceType: requested = ClassSpecializedChunk; break; 1928 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break; 1929 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; 1930 default: requested = ClassSmallChunk; break; 1931 } 1932 } else { 1933 switch (type) { 1934 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; 1935 case Metaspace::ROMetaspaceType: requested = SharedReadOnlySize / wordSize; break; 1936 case Metaspace::ReadWriteMetaspaceType: requested = SharedReadWriteSize / wordSize; break; 1937 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break; 1938 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; 1939 default: requested = SmallChunk; break; 1940 } 1941 } 1942 1943 // Adjust to one of the fixed chunk sizes (unless humongous) 1944 const size_t adjusted = adjust_initial_chunk_size(requested); 1945 1946 assert(adjusted != 0, err_msg("Incorrect initial chunk size. Requested: " 1947 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted)); 1948 1949 return adjusted; 1950 } 1951 1952 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1953 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1954 size_t free = 0; 1955 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1956 Metachunk* chunk = chunks_in_use(i); 1957 while (chunk != NULL) { 1958 free += chunk->free_word_size(); 1959 chunk = chunk->next(); 1960 } 1961 } 1962 return free; 1963 } 1964 1965 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1966 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1967 size_t result = 0; 1968 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1969 result += sum_waste_in_chunks_in_use(i); 1970 } 1971 1972 return result; 1973 } 1974 1975 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1976 size_t result = 0; 1977 Metachunk* chunk = chunks_in_use(index); 1978 // Count the free space in all the chunk but not the 1979 // current chunk from which allocations are still being done. 1980 while (chunk != NULL) { 1981 if (chunk != current_chunk()) { 1982 result += chunk->free_word_size(); 1983 } 1984 chunk = chunk->next(); 1985 } 1986 return result; 1987 } 1988 1989 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1990 // For CMS use "allocated_chunks_words()" which does not need the 1991 // Metaspace lock. For the other collectors sum over the 1992 // lists. Use both methods as a check that "allocated_chunks_words()" 1993 // is correct. That is, sum_capacity_in_chunks() is too expensive 1994 // to use in the product and allocated_chunks_words() should be used 1995 // but allow for checking that allocated_chunks_words() returns the same 1996 // value as sum_capacity_in_chunks_in_use() which is the definitive 1997 // answer. 1998 if (UseConcMarkSweepGC) { 1999 return allocated_chunks_words(); 2000 } else { 2001 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2002 size_t sum = 0; 2003 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2004 Metachunk* chunk = chunks_in_use(i); 2005 while (chunk != NULL) { 2006 sum += chunk->word_size(); 2007 chunk = chunk->next(); 2008 } 2009 } 2010 return sum; 2011 } 2012 } 2013 2014 size_t SpaceManager::sum_count_in_chunks_in_use() { 2015 size_t count = 0; 2016 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2017 count = count + sum_count_in_chunks_in_use(i); 2018 } 2019 2020 return count; 2021 } 2022 2023 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2024 size_t count = 0; 2025 Metachunk* chunk = chunks_in_use(i); 2026 while (chunk != NULL) { 2027 count++; 2028 chunk = chunk->next(); 2029 } 2030 return count; 2031 } 2032 2033 2034 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2035 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2036 size_t used = 0; 2037 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2038 Metachunk* chunk = chunks_in_use(i); 2039 while (chunk != NULL) { 2040 used += chunk->used_word_size(); 2041 chunk = chunk->next(); 2042 } 2043 } 2044 return used; 2045 } 2046 2047 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2048 2049 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2050 Metachunk* chunk = chunks_in_use(i); 2051 st->print("SpaceManager: %s " PTR_FORMAT, 2052 chunk_size_name(i), chunk); 2053 if (chunk != NULL) { 2054 st->print_cr(" free " SIZE_FORMAT, 2055 chunk->free_word_size()); 2056 } else { 2057 st->cr(); 2058 } 2059 } 2060 2061 chunk_manager()->locked_print_free_chunks(st); 2062 chunk_manager()->locked_print_sum_free_chunks(st); 2063 } 2064 2065 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2066 2067 // Decide between a small chunk and a medium chunk. Up to 2068 // _small_chunk_limit small chunks can be allocated but 2069 // once a medium chunk has been allocated, no more small 2070 // chunks will be allocated. 2071 size_t chunk_word_size; 2072 if (chunks_in_use(MediumIndex) == NULL && 2073 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2074 chunk_word_size = (size_t) small_chunk_size(); 2075 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2076 chunk_word_size = medium_chunk_size(); 2077 } 2078 } else { 2079 chunk_word_size = medium_chunk_size(); 2080 } 2081 2082 // Might still need a humongous chunk. Enforce 2083 // humongous allocations sizes to be aligned up to 2084 // the smallest chunk size. 2085 size_t if_humongous_sized_chunk = 2086 align_size_up(word_size + Metachunk::overhead(), 2087 smallest_chunk_size()); 2088 chunk_word_size = 2089 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2090 2091 assert(!SpaceManager::is_humongous(word_size) || 2092 chunk_word_size == if_humongous_sized_chunk, 2093 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT 2094 " chunk_word_size " SIZE_FORMAT, 2095 word_size, chunk_word_size)); 2096 if (TraceMetadataHumongousAllocation && 2097 SpaceManager::is_humongous(word_size)) { 2098 gclog_or_tty->print_cr("Metadata humongous allocation:"); 2099 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); 2100 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, 2101 chunk_word_size); 2102 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, 2103 Metachunk::overhead()); 2104 } 2105 return chunk_word_size; 2106 } 2107 2108 void SpaceManager::track_metaspace_memory_usage() { 2109 if (is_init_completed()) { 2110 if (is_class()) { 2111 MemoryService::track_compressed_class_memory_usage(); 2112 } 2113 MemoryService::track_metaspace_memory_usage(); 2114 } 2115 } 2116 2117 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2118 assert(vs_list()->current_virtual_space() != NULL, 2119 "Should have been set"); 2120 assert(current_chunk() == NULL || 2121 current_chunk()->allocate(word_size) == NULL, 2122 "Don't need to expand"); 2123 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2124 2125 if (TraceMetadataChunkAllocation && Verbose) { 2126 size_t words_left = 0; 2127 size_t words_used = 0; 2128 if (current_chunk() != NULL) { 2129 words_left = current_chunk()->free_word_size(); 2130 words_used = current_chunk()->used_word_size(); 2131 } 2132 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT 2133 " words " SIZE_FORMAT " words used " SIZE_FORMAT 2134 " words left", 2135 word_size, words_used, words_left); 2136 } 2137 2138 // Get another chunk out of the virtual space 2139 size_t chunk_word_size = calc_chunk_size(word_size); 2140 Metachunk* next = get_new_chunk(chunk_word_size); 2141 2142 MetaWord* mem = NULL; 2143 2144 // If a chunk was available, add it to the in-use chunk list 2145 // and do an allocation from it. 2146 if (next != NULL) { 2147 // Add to this manager's list of chunks in use. 2148 add_chunk(next, false); 2149 mem = next->allocate(word_size); 2150 } 2151 2152 // Track metaspace memory usage statistic. 2153 track_metaspace_memory_usage(); 2154 2155 return mem; 2156 } 2157 2158 void SpaceManager::print_on(outputStream* st) const { 2159 2160 for (ChunkIndex i = ZeroIndex; 2161 i < NumberOfInUseLists ; 2162 i = next_chunk_index(i) ) { 2163 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, 2164 chunks_in_use(i), 2165 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2166 } 2167 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2168 " Humongous " SIZE_FORMAT, 2169 sum_waste_in_chunks_in_use(SmallIndex), 2170 sum_waste_in_chunks_in_use(MediumIndex), 2171 sum_waste_in_chunks_in_use(HumongousIndex)); 2172 // block free lists 2173 if (block_freelists() != NULL) { 2174 st->print_cr("total in block free lists " SIZE_FORMAT, 2175 block_freelists()->total_size()); 2176 } 2177 } 2178 2179 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2180 Mutex* lock) : 2181 _mdtype(mdtype), 2182 _allocated_blocks_words(0), 2183 _allocated_chunks_words(0), 2184 _allocated_chunks_count(0), 2185 _lock(lock) 2186 { 2187 initialize(); 2188 } 2189 2190 void SpaceManager::inc_size_metrics(size_t words) { 2191 assert_lock_strong(SpaceManager::expand_lock()); 2192 // Total of allocated Metachunks and allocated Metachunks count 2193 // for each SpaceManager 2194 _allocated_chunks_words = _allocated_chunks_words + words; 2195 _allocated_chunks_count++; 2196 // Global total of capacity in allocated Metachunks 2197 MetaspaceAux::inc_capacity(mdtype(), words); 2198 // Global total of allocated Metablocks. 2199 // used_words_slow() includes the overhead in each 2200 // Metachunk so include it in the used when the 2201 // Metachunk is first added (so only added once per 2202 // Metachunk). 2203 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2204 } 2205 2206 void SpaceManager::inc_used_metrics(size_t words) { 2207 // Add to the per SpaceManager total 2208 Atomic::add_ptr(words, &_allocated_blocks_words); 2209 // Add to the global total 2210 MetaspaceAux::inc_used(mdtype(), words); 2211 } 2212 2213 void SpaceManager::dec_total_from_size_metrics() { 2214 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2215 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2216 // Also deduct the overhead per Metachunk 2217 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2218 } 2219 2220 void SpaceManager::initialize() { 2221 Metadebug::init_allocation_fail_alot_count(); 2222 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2223 _chunks_in_use[i] = NULL; 2224 } 2225 _current_chunk = NULL; 2226 if (TraceMetadataChunkAllocation && Verbose) { 2227 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this); 2228 } 2229 } 2230 2231 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2232 if (chunks == NULL) { 2233 return; 2234 } 2235 ChunkList* list = free_chunks(index); 2236 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2237 assert_lock_strong(SpaceManager::expand_lock()); 2238 Metachunk* cur = chunks; 2239 2240 // This returns chunks one at a time. If a new 2241 // class List can be created that is a base class 2242 // of FreeList then something like FreeList::prepend() 2243 // can be used in place of this loop 2244 while (cur != NULL) { 2245 assert(cur->container() != NULL, "Container should have been set"); 2246 cur->container()->dec_container_count(); 2247 // Capture the next link before it is changed 2248 // by the call to return_chunk_at_head(); 2249 Metachunk* next = cur->next(); 2250 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2251 list->return_chunk_at_head(cur); 2252 cur = next; 2253 } 2254 } 2255 2256 SpaceManager::~SpaceManager() { 2257 // This call this->_lock which can't be done while holding expand_lock() 2258 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2259 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2260 " allocated_chunks_words() " SIZE_FORMAT, 2261 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2262 2263 MutexLockerEx fcl(SpaceManager::expand_lock(), 2264 Mutex::_no_safepoint_check_flag); 2265 2266 chunk_manager()->slow_locked_verify(); 2267 2268 dec_total_from_size_metrics(); 2269 2270 if (TraceMetadataChunkAllocation && Verbose) { 2271 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); 2272 locked_print_chunks_in_use_on(gclog_or_tty); 2273 } 2274 2275 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2276 // is during the freeing of a VirtualSpaceNodes. 2277 2278 // Have to update before the chunks_in_use lists are emptied 2279 // below. 2280 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2281 sum_count_in_chunks_in_use()); 2282 2283 // Add all the chunks in use by this space manager 2284 // to the global list of free chunks. 2285 2286 // Follow each list of chunks-in-use and add them to the 2287 // free lists. Each list is NULL terminated. 2288 2289 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2290 if (TraceMetadataChunkAllocation && Verbose) { 2291 gclog_or_tty->print_cr("returned %d %s chunks to freelist", 2292 sum_count_in_chunks_in_use(i), 2293 chunk_size_name(i)); 2294 } 2295 Metachunk* chunks = chunks_in_use(i); 2296 chunk_manager()->return_chunks(i, chunks); 2297 set_chunks_in_use(i, NULL); 2298 if (TraceMetadataChunkAllocation && Verbose) { 2299 gclog_or_tty->print_cr("updated freelist count %d %s", 2300 chunk_manager()->free_chunks(i)->count(), 2301 chunk_size_name(i)); 2302 } 2303 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2304 } 2305 2306 // The medium chunk case may be optimized by passing the head and 2307 // tail of the medium chunk list to add_at_head(). The tail is often 2308 // the current chunk but there are probably exceptions. 2309 2310 // Humongous chunks 2311 if (TraceMetadataChunkAllocation && Verbose) { 2312 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary", 2313 sum_count_in_chunks_in_use(HumongousIndex), 2314 chunk_size_name(HumongousIndex)); 2315 gclog_or_tty->print("Humongous chunk dictionary: "); 2316 } 2317 // Humongous chunks are never the current chunk. 2318 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2319 2320 while (humongous_chunks != NULL) { 2321 #ifdef ASSERT 2322 humongous_chunks->set_is_tagged_free(true); 2323 #endif 2324 if (TraceMetadataChunkAllocation && Verbose) { 2325 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2326 humongous_chunks, 2327 humongous_chunks->word_size()); 2328 } 2329 assert(humongous_chunks->word_size() == (size_t) 2330 align_size_up(humongous_chunks->word_size(), 2331 smallest_chunk_size()), 2332 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2333 " granularity %d", 2334 humongous_chunks->word_size(), smallest_chunk_size())); 2335 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2336 humongous_chunks->container()->dec_container_count(); 2337 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2338 humongous_chunks = next_humongous_chunks; 2339 } 2340 if (TraceMetadataChunkAllocation && Verbose) { 2341 gclog_or_tty->cr(); 2342 gclog_or_tty->print_cr("updated dictionary count %d %s", 2343 chunk_manager()->humongous_dictionary()->total_count(), 2344 chunk_size_name(HumongousIndex)); 2345 } 2346 chunk_manager()->slow_locked_verify(); 2347 } 2348 2349 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2350 switch (index) { 2351 case SpecializedIndex: 2352 return "Specialized"; 2353 case SmallIndex: 2354 return "Small"; 2355 case MediumIndex: 2356 return "Medium"; 2357 case HumongousIndex: 2358 return "Humongous"; 2359 default: 2360 return NULL; 2361 } 2362 } 2363 2364 ChunkIndex ChunkManager::list_index(size_t size) { 2365 if (free_chunks(SpecializedIndex)->size() == size) { 2366 return SpecializedIndex; 2367 } 2368 if (free_chunks(SmallIndex)->size() == size) { 2369 return SmallIndex; 2370 } 2371 if (free_chunks(MediumIndex)->size() == size) { 2372 return MediumIndex; 2373 } 2374 2375 assert(size > free_chunks(MediumIndex)->size(), "Not a humongous chunk"); 2376 return HumongousIndex; 2377 } 2378 2379 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2380 assert_lock_strong(_lock); 2381 size_t raw_word_size = get_raw_word_size(word_size); 2382 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2383 assert(raw_word_size >= min_size, 2384 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 2385 block_freelists()->return_block(p, raw_word_size); 2386 } 2387 2388 // Adds a chunk to the list of chunks in use. 2389 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2390 2391 assert(new_chunk != NULL, "Should not be NULL"); 2392 assert(new_chunk->next() == NULL, "Should not be on a list"); 2393 2394 new_chunk->reset_empty(); 2395 2396 // Find the correct list and and set the current 2397 // chunk for that list. 2398 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); 2399 2400 if (index != HumongousIndex) { 2401 retire_current_chunk(); 2402 set_current_chunk(new_chunk); 2403 new_chunk->set_next(chunks_in_use(index)); 2404 set_chunks_in_use(index, new_chunk); 2405 } else { 2406 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2407 // small, so small will be null. Link this first chunk as the current 2408 // chunk. 2409 if (make_current) { 2410 // Set as the current chunk but otherwise treat as a humongous chunk. 2411 set_current_chunk(new_chunk); 2412 } 2413 // Link at head. The _current_chunk only points to a humongous chunk for 2414 // the null class loader metaspace (class and data virtual space managers) 2415 // any humongous chunks so will not point to the tail 2416 // of the humongous chunks list. 2417 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2418 set_chunks_in_use(HumongousIndex, new_chunk); 2419 2420 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2421 } 2422 2423 // Add to the running sum of capacity 2424 inc_size_metrics(new_chunk->word_size()); 2425 2426 assert(new_chunk->is_empty(), "Not ready for reuse"); 2427 if (TraceMetadataChunkAllocation && Verbose) { 2428 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", 2429 sum_count_in_chunks_in_use()); 2430 new_chunk->print_on(gclog_or_tty); 2431 chunk_manager()->locked_print_free_chunks(gclog_or_tty); 2432 } 2433 } 2434 2435 void SpaceManager::retire_current_chunk() { 2436 if (current_chunk() != NULL) { 2437 size_t remaining_words = current_chunk()->free_word_size(); 2438 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2439 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2440 inc_used_metrics(remaining_words); 2441 } 2442 } 2443 } 2444 2445 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { 2446 // Get a chunk from the chunk freelist 2447 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); 2448 2449 if (next == NULL) { 2450 next = vs_list()->get_new_chunk(chunk_word_size, 2451 medium_chunk_bunch()); 2452 } 2453 2454 if (TraceMetadataHumongousAllocation && next != NULL && 2455 SpaceManager::is_humongous(next->word_size())) { 2456 gclog_or_tty->print_cr(" new humongous chunk word size " 2457 PTR_FORMAT, next->word_size()); 2458 } 2459 2460 return next; 2461 } 2462 2463 MetaWord* SpaceManager::allocate(size_t word_size) { 2464 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2465 2466 size_t raw_word_size = get_raw_word_size(word_size); 2467 BlockFreelist* fl = block_freelists(); 2468 MetaWord* p = NULL; 2469 // Allocation from the dictionary is expensive in the sense that 2470 // the dictionary has to be searched for a size. Don't allocate 2471 // from the dictionary until it starts to get fat. Is this 2472 // a reasonable policy? Maybe an skinny dictionary is fast enough 2473 // for allocations. Do some profiling. JJJ 2474 if (fl->total_size() > allocation_from_dictionary_limit) { 2475 p = fl->get_block(raw_word_size); 2476 } 2477 if (p == NULL) { 2478 p = allocate_work(raw_word_size); 2479 } 2480 2481 return p; 2482 } 2483 2484 // Returns the address of spaced allocated for "word_size". 2485 // This methods does not know about blocks (Metablocks) 2486 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2487 assert_lock_strong(_lock); 2488 #ifdef ASSERT 2489 if (Metadebug::test_metadata_failure()) { 2490 return NULL; 2491 } 2492 #endif 2493 // Is there space in the current chunk? 2494 MetaWord* result = NULL; 2495 2496 // For DumpSharedSpaces, only allocate out of the current chunk which is 2497 // never null because we gave it the size we wanted. Caller reports out 2498 // of memory if this returns null. 2499 if (DumpSharedSpaces) { 2500 assert(current_chunk() != NULL, "should never happen"); 2501 inc_used_metrics(word_size); 2502 return current_chunk()->allocate(word_size); // caller handles null result 2503 } 2504 2505 if (current_chunk() != NULL) { 2506 result = current_chunk()->allocate(word_size); 2507 } 2508 2509 if (result == NULL) { 2510 result = grow_and_allocate(word_size); 2511 } 2512 2513 if (result != NULL) { 2514 inc_used_metrics(word_size); 2515 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2516 "Head of the list is being allocated"); 2517 } 2518 2519 return result; 2520 } 2521 2522 void SpaceManager::verify() { 2523 // If there are blocks in the dictionary, then 2524 // verfication of chunks does not work since 2525 // being in the dictionary alters a chunk. 2526 if (block_freelists()->total_size() == 0) { 2527 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2528 Metachunk* curr = chunks_in_use(i); 2529 while (curr != NULL) { 2530 curr->verify(); 2531 verify_chunk_size(curr); 2532 curr = curr->next(); 2533 } 2534 } 2535 } 2536 } 2537 2538 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2539 assert(is_humongous(chunk->word_size()) || 2540 chunk->word_size() == medium_chunk_size() || 2541 chunk->word_size() == small_chunk_size() || 2542 chunk->word_size() == specialized_chunk_size(), 2543 "Chunk size is wrong"); 2544 return; 2545 } 2546 2547 #ifdef ASSERT 2548 void SpaceManager::verify_allocated_blocks_words() { 2549 // Verification is only guaranteed at a safepoint. 2550 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2551 "Verification can fail if the applications is running"); 2552 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2553 err_msg("allocation total is not consistent " SIZE_FORMAT 2554 " vs " SIZE_FORMAT, 2555 allocated_blocks_words(), sum_used_in_chunks_in_use())); 2556 } 2557 2558 #endif 2559 2560 void SpaceManager::dump(outputStream* const out) const { 2561 size_t curr_total = 0; 2562 size_t waste = 0; 2563 uint i = 0; 2564 size_t used = 0; 2565 size_t capacity = 0; 2566 2567 // Add up statistics for all chunks in this SpaceManager. 2568 for (ChunkIndex index = ZeroIndex; 2569 index < NumberOfInUseLists; 2570 index = next_chunk_index(index)) { 2571 for (Metachunk* curr = chunks_in_use(index); 2572 curr != NULL; 2573 curr = curr->next()) { 2574 out->print("%d) ", i++); 2575 curr->print_on(out); 2576 curr_total += curr->word_size(); 2577 used += curr->used_word_size(); 2578 capacity += curr->word_size(); 2579 waste += curr->free_word_size() + curr->overhead();; 2580 } 2581 } 2582 2583 if (TraceMetadataChunkAllocation && Verbose) { 2584 block_freelists()->print_on(out); 2585 } 2586 2587 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2588 // Free space isn't wasted. 2589 waste -= free; 2590 2591 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2592 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2593 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2594 } 2595 2596 #ifndef PRODUCT 2597 void SpaceManager::mangle_freed_chunks() { 2598 for (ChunkIndex index = ZeroIndex; 2599 index < NumberOfInUseLists; 2600 index = next_chunk_index(index)) { 2601 for (Metachunk* curr = chunks_in_use(index); 2602 curr != NULL; 2603 curr = curr->next()) { 2604 curr->mangle(); 2605 } 2606 } 2607 } 2608 #endif // PRODUCT 2609 2610 // MetaspaceAux 2611 2612 2613 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2614 size_t MetaspaceAux::_used_words[] = {0, 0}; 2615 2616 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2617 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2618 return list == NULL ? 0 : list->free_bytes(); 2619 } 2620 2621 size_t MetaspaceAux::free_bytes() { 2622 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2623 } 2624 2625 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2626 assert_lock_strong(SpaceManager::expand_lock()); 2627 assert(words <= capacity_words(mdtype), 2628 err_msg("About to decrement below 0: words " SIZE_FORMAT 2629 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2630 words, mdtype, capacity_words(mdtype))); 2631 _capacity_words[mdtype] -= words; 2632 } 2633 2634 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2635 assert_lock_strong(SpaceManager::expand_lock()); 2636 // Needs to be atomic 2637 _capacity_words[mdtype] += words; 2638 } 2639 2640 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2641 assert(words <= used_words(mdtype), 2642 err_msg("About to decrement below 0: words " SIZE_FORMAT 2643 " is greater than _used_words[%u] " SIZE_FORMAT, 2644 words, mdtype, used_words(mdtype))); 2645 // For CMS deallocation of the Metaspaces occurs during the 2646 // sweep which is a concurrent phase. Protection by the expand_lock() 2647 // is not enough since allocation is on a per Metaspace basis 2648 // and protected by the Metaspace lock. 2649 jlong minus_words = (jlong) - (jlong) words; 2650 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2651 } 2652 2653 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2654 // _used_words tracks allocations for 2655 // each piece of metadata. Those allocations are 2656 // generally done concurrently by different application 2657 // threads so must be done atomically. 2658 Atomic::add_ptr(words, &_used_words[mdtype]); 2659 } 2660 2661 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2662 size_t used = 0; 2663 ClassLoaderDataGraphMetaspaceIterator iter; 2664 while (iter.repeat()) { 2665 Metaspace* msp = iter.get_next(); 2666 // Sum allocated_blocks_words for each metaspace 2667 if (msp != NULL) { 2668 used += msp->used_words_slow(mdtype); 2669 } 2670 } 2671 return used * BytesPerWord; 2672 } 2673 2674 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2675 size_t free = 0; 2676 ClassLoaderDataGraphMetaspaceIterator iter; 2677 while (iter.repeat()) { 2678 Metaspace* msp = iter.get_next(); 2679 if (msp != NULL) { 2680 free += msp->free_words_slow(mdtype); 2681 } 2682 } 2683 return free * BytesPerWord; 2684 } 2685 2686 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2687 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2688 return 0; 2689 } 2690 // Don't count the space in the freelists. That space will be 2691 // added to the capacity calculation as needed. 2692 size_t capacity = 0; 2693 ClassLoaderDataGraphMetaspaceIterator iter; 2694 while (iter.repeat()) { 2695 Metaspace* msp = iter.get_next(); 2696 if (msp != NULL) { 2697 capacity += msp->capacity_words_slow(mdtype); 2698 } 2699 } 2700 return capacity * BytesPerWord; 2701 } 2702 2703 size_t MetaspaceAux::capacity_bytes_slow() { 2704 #ifdef PRODUCT 2705 // Use capacity_bytes() in PRODUCT instead of this function. 2706 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2707 #endif 2708 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2709 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2710 assert(capacity_bytes() == class_capacity + non_class_capacity, 2711 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT 2712 " class_capacity + non_class_capacity " SIZE_FORMAT 2713 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2714 capacity_bytes(), class_capacity + non_class_capacity, 2715 class_capacity, non_class_capacity)); 2716 2717 return class_capacity + non_class_capacity; 2718 } 2719 2720 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2721 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2722 return list == NULL ? 0 : list->reserved_bytes(); 2723 } 2724 2725 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2726 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2727 return list == NULL ? 0 : list->committed_bytes(); 2728 } 2729 2730 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2731 2732 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2733 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2734 if (chunk_manager == NULL) { 2735 return 0; 2736 } 2737 chunk_manager->slow_verify(); 2738 return chunk_manager->free_chunks_total_words(); 2739 } 2740 2741 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2742 return free_chunks_total_words(mdtype) * BytesPerWord; 2743 } 2744 2745 size_t MetaspaceAux::free_chunks_total_words() { 2746 return free_chunks_total_words(Metaspace::ClassType) + 2747 free_chunks_total_words(Metaspace::NonClassType); 2748 } 2749 2750 size_t MetaspaceAux::free_chunks_total_bytes() { 2751 return free_chunks_total_words() * BytesPerWord; 2752 } 2753 2754 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2755 return Metaspace::get_chunk_manager(mdtype) != NULL; 2756 } 2757 2758 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2759 if (!has_chunk_free_list(mdtype)) { 2760 return MetaspaceChunkFreeListSummary(); 2761 } 2762 2763 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2764 return cm->chunk_free_list_summary(); 2765 } 2766 2767 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2768 gclog_or_tty->print(", [Metaspace:"); 2769 if (PrintGCDetails && Verbose) { 2770 gclog_or_tty->print(" " SIZE_FORMAT 2771 "->" SIZE_FORMAT 2772 "(" SIZE_FORMAT ")", 2773 prev_metadata_used, 2774 used_bytes(), 2775 reserved_bytes()); 2776 } else { 2777 gclog_or_tty->print(" " SIZE_FORMAT "K" 2778 "->" SIZE_FORMAT "K" 2779 "(" SIZE_FORMAT "K)", 2780 prev_metadata_used/K, 2781 used_bytes()/K, 2782 reserved_bytes()/K); 2783 } 2784 2785 gclog_or_tty->print("]"); 2786 } 2787 2788 // This is printed when PrintGCDetails 2789 void MetaspaceAux::print_on(outputStream* out) { 2790 Metaspace::MetadataType nct = Metaspace::NonClassType; 2791 2792 out->print_cr(" Metaspace " 2793 "used " SIZE_FORMAT "K, " 2794 "capacity " SIZE_FORMAT "K, " 2795 "committed " SIZE_FORMAT "K, " 2796 "reserved " SIZE_FORMAT "K", 2797 used_bytes()/K, 2798 capacity_bytes()/K, 2799 committed_bytes()/K, 2800 reserved_bytes()/K); 2801 2802 if (Metaspace::using_class_space()) { 2803 Metaspace::MetadataType ct = Metaspace::ClassType; 2804 out->print_cr(" class space " 2805 "used " SIZE_FORMAT "K, " 2806 "capacity " SIZE_FORMAT "K, " 2807 "committed " SIZE_FORMAT "K, " 2808 "reserved " SIZE_FORMAT "K", 2809 used_bytes(ct)/K, 2810 capacity_bytes(ct)/K, 2811 committed_bytes(ct)/K, 2812 reserved_bytes(ct)/K); 2813 } 2814 } 2815 2816 // Print information for class space and data space separately. 2817 // This is almost the same as above. 2818 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2819 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2820 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2821 size_t used_bytes = used_bytes_slow(mdtype); 2822 size_t free_bytes = free_bytes_slow(mdtype); 2823 size_t used_and_free = used_bytes + free_bytes + 2824 free_chunks_capacity_bytes; 2825 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2826 "K + unused in chunks " SIZE_FORMAT "K + " 2827 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2828 "K capacity in allocated chunks " SIZE_FORMAT "K", 2829 used_bytes / K, 2830 free_bytes / K, 2831 free_chunks_capacity_bytes / K, 2832 used_and_free / K, 2833 capacity_bytes / K); 2834 // Accounting can only be correct if we got the values during a safepoint 2835 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2836 } 2837 2838 // Print total fragmentation for class metaspaces 2839 void MetaspaceAux::print_class_waste(outputStream* out) { 2840 assert(Metaspace::using_class_space(), "class metaspace not used"); 2841 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2842 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2843 ClassLoaderDataGraphMetaspaceIterator iter; 2844 while (iter.repeat()) { 2845 Metaspace* msp = iter.get_next(); 2846 if (msp != NULL) { 2847 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2848 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2849 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2850 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2851 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2852 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2853 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2854 } 2855 } 2856 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2857 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2858 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2859 "large count " SIZE_FORMAT, 2860 cls_specialized_count, cls_specialized_waste, 2861 cls_small_count, cls_small_waste, 2862 cls_medium_count, cls_medium_waste, cls_humongous_count); 2863 } 2864 2865 // Print total fragmentation for data and class metaspaces separately 2866 void MetaspaceAux::print_waste(outputStream* out) { 2867 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2868 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2869 2870 ClassLoaderDataGraphMetaspaceIterator iter; 2871 while (iter.repeat()) { 2872 Metaspace* msp = iter.get_next(); 2873 if (msp != NULL) { 2874 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2875 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2876 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2877 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2878 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2879 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2880 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2881 } 2882 } 2883 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2884 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2885 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2886 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2887 "large count " SIZE_FORMAT, 2888 specialized_count, specialized_waste, small_count, 2889 small_waste, medium_count, medium_waste, humongous_count); 2890 if (Metaspace::using_class_space()) { 2891 print_class_waste(out); 2892 } 2893 } 2894 2895 // Dump global metaspace things from the end of ClassLoaderDataGraph 2896 void MetaspaceAux::dump(outputStream* out) { 2897 out->print_cr("All Metaspace:"); 2898 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2899 out->print("class space: "); print_on(out, Metaspace::ClassType); 2900 print_waste(out); 2901 } 2902 2903 void MetaspaceAux::verify_free_chunks() { 2904 Metaspace::chunk_manager_metadata()->verify(); 2905 if (Metaspace::using_class_space()) { 2906 Metaspace::chunk_manager_class()->verify(); 2907 } 2908 } 2909 2910 void MetaspaceAux::verify_capacity() { 2911 #ifdef ASSERT 2912 size_t running_sum_capacity_bytes = capacity_bytes(); 2913 // For purposes of the running sum of capacity, verify against capacity 2914 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2915 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2916 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT 2917 " capacity_bytes_slow()" SIZE_FORMAT, 2918 running_sum_capacity_bytes, capacity_in_use_bytes)); 2919 for (Metaspace::MetadataType i = Metaspace::ClassType; 2920 i < Metaspace:: MetadataTypeCount; 2921 i = (Metaspace::MetadataType)(i + 1)) { 2922 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2923 assert(capacity_bytes(i) == capacity_in_use_bytes, 2924 err_msg("capacity_bytes(%u) " SIZE_FORMAT 2925 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2926 i, capacity_bytes(i), i, capacity_in_use_bytes)); 2927 } 2928 #endif 2929 } 2930 2931 void MetaspaceAux::verify_used() { 2932 #ifdef ASSERT 2933 size_t running_sum_used_bytes = used_bytes(); 2934 // For purposes of the running sum of used, verify against used 2935 size_t used_in_use_bytes = used_bytes_slow(); 2936 assert(used_bytes() == used_in_use_bytes, 2937 err_msg("used_bytes() " SIZE_FORMAT 2938 " used_bytes_slow()" SIZE_FORMAT, 2939 used_bytes(), used_in_use_bytes)); 2940 for (Metaspace::MetadataType i = Metaspace::ClassType; 2941 i < Metaspace:: MetadataTypeCount; 2942 i = (Metaspace::MetadataType)(i + 1)) { 2943 size_t used_in_use_bytes = used_bytes_slow(i); 2944 assert(used_bytes(i) == used_in_use_bytes, 2945 err_msg("used_bytes(%u) " SIZE_FORMAT 2946 " used_bytes_slow(%u)" SIZE_FORMAT, 2947 i, used_bytes(i), i, used_in_use_bytes)); 2948 } 2949 #endif 2950 } 2951 2952 void MetaspaceAux::verify_metrics() { 2953 verify_capacity(); 2954 verify_used(); 2955 } 2956 2957 2958 // Metaspace methods 2959 2960 size_t Metaspace::_first_chunk_word_size = 0; 2961 size_t Metaspace::_first_class_chunk_word_size = 0; 2962 2963 size_t Metaspace::_commit_alignment = 0; 2964 size_t Metaspace::_reserve_alignment = 0; 2965 2966 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2967 initialize(lock, type); 2968 } 2969 2970 Metaspace::~Metaspace() { 2971 delete _vsm; 2972 if (using_class_space()) { 2973 delete _class_vsm; 2974 } 2975 } 2976 2977 VirtualSpaceList* Metaspace::_space_list = NULL; 2978 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2979 2980 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2981 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2982 2983 #define VIRTUALSPACEMULTIPLIER 2 2984 2985 #ifdef _LP64 2986 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2987 2988 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2989 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2990 // narrow_klass_base is the lower of the metaspace base and the cds base 2991 // (if cds is enabled). The narrow_klass_shift depends on the distance 2992 // between the lower base and higher address. 2993 address lower_base; 2994 address higher_address; 2995 #if INCLUDE_CDS 2996 if (UseSharedSpaces) { 2997 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2998 (address)(metaspace_base + compressed_class_space_size())); 2999 lower_base = MIN2(metaspace_base, cds_base); 3000 } else 3001 #endif 3002 { 3003 higher_address = metaspace_base + compressed_class_space_size(); 3004 lower_base = metaspace_base; 3005 3006 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3007 // If compressed class space fits in lower 32G, we don't need a base. 3008 if (higher_address <= (address)klass_encoding_max) { 3009 lower_base = 0; // effectively lower base is zero. 3010 } 3011 } 3012 3013 Universe::set_narrow_klass_base(lower_base); 3014 3015 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3016 Universe::set_narrow_klass_shift(0); 3017 } else { 3018 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3019 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3020 } 3021 } 3022 3023 #if INCLUDE_CDS 3024 // Return TRUE if the specified metaspace_base and cds_base are close enough 3025 // to work with compressed klass pointers. 3026 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3027 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3028 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3029 address lower_base = MIN2((address)metaspace_base, cds_base); 3030 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3031 (address)(metaspace_base + compressed_class_space_size())); 3032 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3033 } 3034 #endif 3035 3036 // Try to allocate the metaspace at the requested addr. 3037 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3038 assert(using_class_space(), "called improperly"); 3039 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3040 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3041 "Metaspace size is too big"); 3042 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3043 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3044 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3045 3046 // Don't use large pages for the class space. 3047 bool large_pages = false; 3048 3049 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3050 _reserve_alignment, 3051 large_pages, 3052 requested_addr, 0); 3053 if (!metaspace_rs.is_reserved()) { 3054 #if INCLUDE_CDS 3055 if (UseSharedSpaces) { 3056 size_t increment = align_size_up(1*G, _reserve_alignment); 3057 3058 // Keep trying to allocate the metaspace, increasing the requested_addr 3059 // by 1GB each time, until we reach an address that will no longer allow 3060 // use of CDS with compressed klass pointers. 3061 char *addr = requested_addr; 3062 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3063 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3064 addr = addr + increment; 3065 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3066 _reserve_alignment, large_pages, addr, 0); 3067 } 3068 } 3069 #endif 3070 // If no successful allocation then try to allocate the space anywhere. If 3071 // that fails then OOM doom. At this point we cannot try allocating the 3072 // metaspace as if UseCompressedClassPointers is off because too much 3073 // initialization has happened that depends on UseCompressedClassPointers. 3074 // So, UseCompressedClassPointers cannot be turned off at this point. 3075 if (!metaspace_rs.is_reserved()) { 3076 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3077 _reserve_alignment, large_pages); 3078 if (!metaspace_rs.is_reserved()) { 3079 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 3080 compressed_class_space_size())); 3081 } 3082 } 3083 } 3084 3085 // If we got here then the metaspace got allocated. 3086 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3087 3088 #if INCLUDE_CDS 3089 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3090 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3091 FileMapInfo::stop_sharing_and_unmap( 3092 "Could not allocate metaspace at a compatible address"); 3093 } 3094 #endif 3095 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3096 UseSharedSpaces ? (address)cds_base : 0); 3097 3098 initialize_class_space(metaspace_rs); 3099 3100 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 3101 print_compressed_class_space(gclog_or_tty, requested_addr); 3102 } 3103 } 3104 3105 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3106 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3107 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3108 if (_class_space_list != NULL) { 3109 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3110 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3111 compressed_class_space_size(), p2i(base)); 3112 if (requested_addr != 0) { 3113 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3114 } 3115 st->cr(); 3116 } 3117 } 3118 3119 // For UseCompressedClassPointers the class space is reserved above the top of 3120 // the Java heap. The argument passed in is at the base of the compressed space. 3121 void Metaspace::initialize_class_space(ReservedSpace rs) { 3122 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3123 assert(rs.size() >= CompressedClassSpaceSize, 3124 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); 3125 assert(using_class_space(), "Must be using class space"); 3126 _class_space_list = new VirtualSpaceList(rs); 3127 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3128 3129 if (!_class_space_list->initialization_succeeded()) { 3130 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3131 } 3132 } 3133 3134 #endif 3135 3136 void Metaspace::ergo_initialize() { 3137 if (DumpSharedSpaces) { 3138 // Using large pages when dumping the shared archive is currently not implemented. 3139 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3140 } 3141 3142 size_t page_size = os::vm_page_size(); 3143 if (UseLargePages && UseLargePagesInMetaspace) { 3144 page_size = os::large_page_size(); 3145 } 3146 3147 _commit_alignment = page_size; 3148 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3149 3150 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3151 // override if MaxMetaspaceSize was set on the command line or not. 3152 // This information is needed later to conform to the specification of the 3153 // java.lang.management.MemoryUsage API. 3154 // 3155 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3156 // globals.hpp to the aligned value, but this is not possible, since the 3157 // alignment depends on other flags being parsed. 3158 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3159 3160 if (MetaspaceSize > MaxMetaspaceSize) { 3161 MetaspaceSize = MaxMetaspaceSize; 3162 } 3163 3164 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3165 3166 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3167 3168 if (MetaspaceSize < 256*K) { 3169 vm_exit_during_initialization("Too small initial Metaspace size"); 3170 } 3171 3172 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3173 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3174 3175 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3176 set_compressed_class_space_size(CompressedClassSpaceSize); 3177 3178 // Initial virtual space size will be calculated at global_initialize() 3179 uintx min_metaspace_sz = 3180 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize; 3181 if (UseCompressedClassPointers) { 3182 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) { 3183 if (min_metaspace_sz >= MaxMetaspaceSize) { 3184 vm_exit_during_initialization("MaxMetaspaceSize is too small."); 3185 } else { 3186 FLAG_SET_ERGO(uintx, CompressedClassSpaceSize, 3187 MaxMetaspaceSize - min_metaspace_sz); 3188 } 3189 } 3190 } else if (min_metaspace_sz >= MaxMetaspaceSize) { 3191 FLAG_SET_ERGO(uintx, InitialBootClassLoaderMetaspaceSize, 3192 min_metaspace_sz); 3193 } 3194 3195 } 3196 3197 void Metaspace::global_initialize() { 3198 MetaspaceGC::initialize(); 3199 3200 // Initialize the alignment for shared spaces. 3201 int max_alignment = os::vm_allocation_granularity(); 3202 size_t cds_total = 0; 3203 3204 MetaspaceShared::set_max_alignment(max_alignment); 3205 3206 if (DumpSharedSpaces) { 3207 #if INCLUDE_CDS 3208 MetaspaceShared::estimate_regions_size(); 3209 3210 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3211 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3212 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3213 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3214 3215 // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods() 3216 uintx min_misc_code_size = align_size_up( 3217 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * 3218 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, 3219 max_alignment); 3220 3221 if (SharedMiscCodeSize < min_misc_code_size) { 3222 report_out_of_shared_space(SharedMiscCode); 3223 } 3224 3225 // Initialize with the sum of the shared space sizes. The read-only 3226 // and read write metaspace chunks will be allocated out of this and the 3227 // remainder is the misc code and data chunks. 3228 cds_total = FileMapInfo::shared_spaces_size(); 3229 cds_total = align_size_up(cds_total, _reserve_alignment); 3230 _space_list = new VirtualSpaceList(cds_total/wordSize); 3231 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3232 3233 if (!_space_list->initialization_succeeded()) { 3234 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3235 } 3236 3237 #ifdef _LP64 3238 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3239 vm_exit_during_initialization("Unable to dump shared archive.", 3240 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3241 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3242 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), 3243 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3244 } 3245 3246 // Set the compressed klass pointer base so that decoding of these pointers works 3247 // properly when creating the shared archive. 3248 assert(UseCompressedOops && UseCompressedClassPointers, 3249 "UseCompressedOops and UseCompressedClassPointers must be set"); 3250 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3251 if (TraceMetavirtualspaceAllocation && Verbose) { 3252 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3253 _space_list->current_virtual_space()->bottom()); 3254 } 3255 3256 Universe::set_narrow_klass_shift(0); 3257 #endif // _LP64 3258 #endif // INCLUDE_CDS 3259 } else { 3260 #if INCLUDE_CDS 3261 // If using shared space, open the file that contains the shared space 3262 // and map in the memory before initializing the rest of metaspace (so 3263 // the addresses don't conflict) 3264 address cds_address = NULL; 3265 if (UseSharedSpaces) { 3266 FileMapInfo* mapinfo = new FileMapInfo(); 3267 3268 // Open the shared archive file, read and validate the header. If 3269 // initialization fails, shared spaces [UseSharedSpaces] are 3270 // disabled and the file is closed. 3271 // Map in spaces now also 3272 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3273 cds_total = FileMapInfo::shared_spaces_size(); 3274 cds_address = (address)mapinfo->region_base(0); 3275 } else { 3276 assert(!mapinfo->is_open() && !UseSharedSpaces, 3277 "archive file not closed or shared spaces not disabled."); 3278 } 3279 } 3280 #endif // INCLUDE_CDS 3281 #ifdef _LP64 3282 // If UseCompressedClassPointers is set then allocate the metaspace area 3283 // above the heap and above the CDS area (if it exists). 3284 if (using_class_space()) { 3285 if (UseSharedSpaces) { 3286 #if INCLUDE_CDS 3287 char* cds_end = (char*)(cds_address + cds_total); 3288 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3289 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3290 #endif 3291 } else { 3292 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3293 allocate_metaspace_compressed_klass_ptrs(base, 0); 3294 } 3295 } 3296 #endif // _LP64 3297 3298 // Initialize these before initializing the VirtualSpaceList 3299 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3300 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3301 // Make the first class chunk bigger than a medium chunk so it's not put 3302 // on the medium chunk list. The next chunk will be small and progress 3303 // from there. This size calculated by -version. 3304 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3305 (CompressedClassSpaceSize/BytesPerWord)*2); 3306 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3307 // Arbitrarily set the initial virtual space to a multiple 3308 // of the boot class loader size. 3309 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3310 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3311 3312 // Initialize the list of virtual spaces. 3313 _space_list = new VirtualSpaceList(word_size); 3314 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3315 3316 if (!_space_list->initialization_succeeded()) { 3317 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3318 } 3319 } 3320 3321 _tracer = new MetaspaceTracer(); 3322 } 3323 3324 void Metaspace::post_initialize() { 3325 MetaspaceGC::post_initialize(); 3326 } 3327 3328 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) { 3329 Metachunk* chunk = get_initialization_chunk(type, mdtype); 3330 if (chunk != NULL) { 3331 // Add to this manager's list of chunks in use and current_chunk(). 3332 get_space_manager(mdtype)->add_chunk(chunk, true); 3333 } 3334 } 3335 3336 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) { 3337 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 3338 3339 // Get a chunk from the chunk freelist 3340 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3341 3342 if (chunk == NULL) { 3343 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size, 3344 get_space_manager(mdtype)->medium_chunk_bunch()); 3345 } 3346 3347 // For dumping shared archive, report error if allocation has failed. 3348 if (DumpSharedSpaces && chunk == NULL) { 3349 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord); 3350 } 3351 3352 return chunk; 3353 } 3354 3355 void Metaspace::verify_global_initialization() { 3356 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 3357 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 3358 3359 if (using_class_space()) { 3360 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 3361 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 3362 } 3363 } 3364 3365 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3366 verify_global_initialization(); 3367 3368 // Allocate SpaceManager for metadata objects. 3369 _vsm = new SpaceManager(NonClassType, lock); 3370 3371 if (using_class_space()) { 3372 // Allocate SpaceManager for classes. 3373 _class_vsm = new SpaceManager(ClassType, lock); 3374 } 3375 3376 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3377 3378 // Allocate chunk for metadata objects 3379 initialize_first_chunk(type, NonClassType); 3380 3381 // Allocate chunk for class metadata objects 3382 if (using_class_space()) { 3383 initialize_first_chunk(type, ClassType); 3384 } 3385 3386 _alloc_record_head = NULL; 3387 _alloc_record_tail = NULL; 3388 } 3389 3390 size_t Metaspace::align_word_size_up(size_t word_size) { 3391 size_t byte_size = word_size * wordSize; 3392 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3393 } 3394 3395 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3396 // DumpSharedSpaces doesn't use class metadata area (yet) 3397 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3398 if (is_class_space_allocation(mdtype)) { 3399 return class_vsm()->allocate(word_size); 3400 } else { 3401 return vsm()->allocate(word_size); 3402 } 3403 } 3404 3405 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3406 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3407 assert(delta_bytes > 0, "Must be"); 3408 3409 size_t before = 0; 3410 size_t after = 0; 3411 MetaWord* res; 3412 bool incremented; 3413 3414 // Each thread increments the HWM at most once. Even if the thread fails to increment 3415 // the HWM, an allocation is still attempted. This is because another thread must then 3416 // have incremented the HWM and therefore the allocation might still succeed. 3417 do { 3418 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3419 res = allocate(word_size, mdtype); 3420 } while (!incremented && res == NULL); 3421 3422 if (incremented) { 3423 tracer()->report_gc_threshold(before, after, 3424 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3425 if (PrintGCDetails && Verbose) { 3426 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3427 " to " SIZE_FORMAT, before, after); 3428 } 3429 } 3430 3431 return res; 3432 } 3433 3434 // Space allocated in the Metaspace. This may 3435 // be across several metadata virtual spaces. 3436 char* Metaspace::bottom() const { 3437 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3438 return (char*)vsm()->current_chunk()->bottom(); 3439 } 3440 3441 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3442 if (mdtype == ClassType) { 3443 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3444 } else { 3445 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3446 } 3447 } 3448 3449 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3450 if (mdtype == ClassType) { 3451 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3452 } else { 3453 return vsm()->sum_free_in_chunks_in_use(); 3454 } 3455 } 3456 3457 // Space capacity in the Metaspace. It includes 3458 // space in the list of chunks from which allocations 3459 // have been made. Don't include space in the global freelist and 3460 // in the space available in the dictionary which 3461 // is already counted in some chunk. 3462 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3463 if (mdtype == ClassType) { 3464 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3465 } else { 3466 return vsm()->sum_capacity_in_chunks_in_use(); 3467 } 3468 } 3469 3470 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3471 return used_words_slow(mdtype) * BytesPerWord; 3472 } 3473 3474 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3475 return capacity_words_slow(mdtype) * BytesPerWord; 3476 } 3477 3478 size_t Metaspace::allocated_blocks_bytes() const { 3479 return vsm()->allocated_blocks_bytes() + 3480 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3481 } 3482 3483 size_t Metaspace::allocated_chunks_bytes() const { 3484 return vsm()->allocated_chunks_bytes() + 3485 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3486 } 3487 3488 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3489 if (SafepointSynchronize::is_at_safepoint()) { 3490 if (DumpSharedSpaces && PrintSharedSpaces) { 3491 record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); 3492 } 3493 3494 assert(Thread::current()->is_VM_thread(), "should be the VM thread"); 3495 // Don't take Heap_lock 3496 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3497 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3498 // Dark matter. Too small for dictionary. 3499 #ifdef ASSERT 3500 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3501 #endif 3502 return; 3503 } 3504 if (is_class && using_class_space()) { 3505 class_vsm()->deallocate(ptr, word_size); 3506 } else { 3507 vsm()->deallocate(ptr, word_size); 3508 } 3509 } else { 3510 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3511 3512 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3513 // Dark matter. Too small for dictionary. 3514 #ifdef ASSERT 3515 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3516 #endif 3517 return; 3518 } 3519 if (is_class && using_class_space()) { 3520 class_vsm()->deallocate(ptr, word_size); 3521 } else { 3522 vsm()->deallocate(ptr, word_size); 3523 } 3524 } 3525 } 3526 3527 3528 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3529 bool read_only, MetaspaceObj::Type type, TRAPS) { 3530 if (HAS_PENDING_EXCEPTION) { 3531 assert(false, "Should not allocate with exception pending"); 3532 return NULL; // caller does a CHECK_NULL too 3533 } 3534 3535 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3536 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3537 3538 // Allocate in metaspaces without taking out a lock, because it deadlocks 3539 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3540 // to revisit this for application class data sharing. 3541 if (DumpSharedSpaces) { 3542 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3543 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3544 MetaWord* result = space->allocate(word_size, NonClassType); 3545 if (result == NULL) { 3546 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3547 } 3548 if (PrintSharedSpaces) { 3549 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3550 } 3551 3552 // Zero initialize. 3553 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3554 3555 return result; 3556 } 3557 3558 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3559 3560 // Try to allocate metadata. 3561 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3562 3563 if (result == NULL) { 3564 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3565 3566 // Allocation failed. 3567 if (is_init_completed()) { 3568 // Only start a GC if the bootstrapping has completed. 3569 3570 // Try to clean out some memory and retry. 3571 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3572 loader_data, word_size, mdtype); 3573 } 3574 } 3575 3576 if (result == NULL) { 3577 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3578 } 3579 3580 // Zero initialize. 3581 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3582 3583 return result; 3584 } 3585 3586 size_t Metaspace::class_chunk_size(size_t word_size) { 3587 assert(using_class_space(), "Has to use class space"); 3588 return class_vsm()->calc_chunk_size(word_size); 3589 } 3590 3591 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3592 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3593 3594 // If result is still null, we are out of memory. 3595 if (Verbose && TraceMetadataChunkAllocation) { 3596 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3597 SIZE_FORMAT, word_size); 3598 if (loader_data->metaspace_or_null() != NULL) { 3599 loader_data->dump(gclog_or_tty); 3600 } 3601 MetaspaceAux::dump(gclog_or_tty); 3602 } 3603 3604 bool out_of_compressed_class_space = false; 3605 if (is_class_space_allocation(mdtype)) { 3606 Metaspace* metaspace = loader_data->metaspace_non_null(); 3607 out_of_compressed_class_space = 3608 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3609 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3610 CompressedClassSpaceSize; 3611 } 3612 3613 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3614 const char* space_string = out_of_compressed_class_space ? 3615 "Compressed class space" : "Metaspace"; 3616 3617 report_java_out_of_memory(space_string); 3618 3619 if (JvmtiExport::should_post_resource_exhausted()) { 3620 JvmtiExport::post_resource_exhausted( 3621 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3622 space_string); 3623 } 3624 3625 if (!is_init_completed()) { 3626 vm_exit_during_initialization("OutOfMemoryError", space_string); 3627 } 3628 3629 if (out_of_compressed_class_space) { 3630 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3631 } else { 3632 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3633 } 3634 } 3635 3636 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3637 switch (mdtype) { 3638 case Metaspace::ClassType: return "Class"; 3639 case Metaspace::NonClassType: return "Metadata"; 3640 default: 3641 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); 3642 return NULL; 3643 } 3644 } 3645 3646 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3647 assert(DumpSharedSpaces, "sanity"); 3648 3649 int byte_size = (int)word_size * HeapWordSize; 3650 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3651 3652 if (_alloc_record_head == NULL) { 3653 _alloc_record_head = _alloc_record_tail = rec; 3654 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3655 _alloc_record_tail->_next = rec; 3656 _alloc_record_tail = rec; 3657 } else { 3658 // slow linear search, but this doesn't happen that often, and only when dumping 3659 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3660 if (old->_ptr == ptr) { 3661 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3662 int remain_bytes = old->_byte_size - byte_size; 3663 assert(remain_bytes >= 0, "sanity"); 3664 old->_type = type; 3665 3666 if (remain_bytes == 0) { 3667 delete(rec); 3668 } else { 3669 address remain_ptr = address(ptr) + byte_size; 3670 rec->_ptr = remain_ptr; 3671 rec->_byte_size = remain_bytes; 3672 rec->_type = MetaspaceObj::DeallocatedType; 3673 rec->_next = old->_next; 3674 old->_byte_size = byte_size; 3675 old->_next = rec; 3676 } 3677 return; 3678 } 3679 } 3680 assert(0, "reallocating a freed pointer that was not recorded"); 3681 } 3682 } 3683 3684 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3685 assert(DumpSharedSpaces, "sanity"); 3686 3687 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3688 if (rec->_ptr == ptr) { 3689 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity"); 3690 rec->_type = MetaspaceObj::DeallocatedType; 3691 return; 3692 } 3693 } 3694 3695 assert(0, "deallocating a pointer that was not recorded"); 3696 } 3697 3698 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3699 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3700 3701 address last_addr = (address)bottom(); 3702 3703 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3704 address ptr = rec->_ptr; 3705 if (last_addr < ptr) { 3706 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3707 } 3708 closure->doit(ptr, rec->_type, rec->_byte_size); 3709 last_addr = ptr + rec->_byte_size; 3710 } 3711 3712 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3713 if (last_addr < top) { 3714 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3715 } 3716 } 3717 3718 void Metaspace::purge(MetadataType mdtype) { 3719 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3720 } 3721 3722 void Metaspace::purge() { 3723 MutexLockerEx cl(SpaceManager::expand_lock(), 3724 Mutex::_no_safepoint_check_flag); 3725 purge(NonClassType); 3726 if (using_class_space()) { 3727 purge(ClassType); 3728 } 3729 } 3730 3731 void Metaspace::print_on(outputStream* out) const { 3732 // Print both class virtual space counts and metaspace. 3733 if (Verbose) { 3734 vsm()->print_on(out); 3735 if (using_class_space()) { 3736 class_vsm()->print_on(out); 3737 } 3738 } 3739 } 3740 3741 bool Metaspace::contains(const void* ptr) { 3742 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3743 return true; 3744 } 3745 3746 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3747 return true; 3748 } 3749 3750 return get_space_list(NonClassType)->contains(ptr); 3751 } 3752 3753 void Metaspace::verify() { 3754 vsm()->verify(); 3755 if (using_class_space()) { 3756 class_vsm()->verify(); 3757 } 3758 } 3759 3760 void Metaspace::dump(outputStream* const out) const { 3761 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); 3762 vsm()->dump(out); 3763 if (using_class_space()) { 3764 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 3765 class_vsm()->dump(out); 3766 } 3767 } 3768 3769 /////////////// Unit tests /////////////// 3770 3771 #ifndef PRODUCT 3772 3773 class TestMetaspaceAuxTest : AllStatic { 3774 public: 3775 static void test_reserved() { 3776 size_t reserved = MetaspaceAux::reserved_bytes(); 3777 3778 assert(reserved > 0, "assert"); 3779 3780 size_t committed = MetaspaceAux::committed_bytes(); 3781 assert(committed <= reserved, "assert"); 3782 3783 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3784 assert(reserved_metadata > 0, "assert"); 3785 assert(reserved_metadata <= reserved, "assert"); 3786 3787 if (UseCompressedClassPointers) { 3788 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3789 assert(reserved_class > 0, "assert"); 3790 assert(reserved_class < reserved, "assert"); 3791 } 3792 } 3793 3794 static void test_committed() { 3795 size_t committed = MetaspaceAux::committed_bytes(); 3796 3797 assert(committed > 0, "assert"); 3798 3799 size_t reserved = MetaspaceAux::reserved_bytes(); 3800 assert(committed <= reserved, "assert"); 3801 3802 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3803 assert(committed_metadata > 0, "assert"); 3804 assert(committed_metadata <= committed, "assert"); 3805 3806 if (UseCompressedClassPointers) { 3807 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3808 assert(committed_class > 0, "assert"); 3809 assert(committed_class < committed, "assert"); 3810 } 3811 } 3812 3813 static void test_virtual_space_list_large_chunk() { 3814 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3815 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3816 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3817 // vm_allocation_granularity aligned on Windows. 3818 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3819 large_size += (os::vm_page_size()/BytesPerWord); 3820 vs_list->get_new_chunk(large_size, 0); 3821 } 3822 3823 static void test() { 3824 test_reserved(); 3825 test_committed(); 3826 test_virtual_space_list_large_chunk(); 3827 } 3828 }; 3829 3830 void TestMetaspaceAux_test() { 3831 TestMetaspaceAuxTest::test(); 3832 } 3833 3834 class TestVirtualSpaceNodeTest { 3835 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3836 size_t& num_small_chunks, 3837 size_t& num_specialized_chunks) { 3838 num_medium_chunks = words_left / MediumChunk; 3839 words_left = words_left % MediumChunk; 3840 3841 num_small_chunks = words_left / SmallChunk; 3842 words_left = words_left % SmallChunk; 3843 // how many specialized chunks can we get? 3844 num_specialized_chunks = words_left / SpecializedChunk; 3845 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3846 } 3847 3848 public: 3849 static void test() { 3850 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3851 const size_t vsn_test_size_words = MediumChunk * 4; 3852 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3853 3854 // The chunk sizes must be multiples of eachother, or this will fail 3855 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3856 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3857 3858 { // No committed memory in VSN 3859 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3860 VirtualSpaceNode vsn(vsn_test_size_bytes); 3861 vsn.initialize(); 3862 vsn.retire(&cm); 3863 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3864 } 3865 3866 { // All of VSN is committed, half is used by chunks 3867 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3868 VirtualSpaceNode vsn(vsn_test_size_bytes); 3869 vsn.initialize(); 3870 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3871 vsn.get_chunk_vs(MediumChunk); 3872 vsn.get_chunk_vs(MediumChunk); 3873 vsn.retire(&cm); 3874 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3875 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3876 } 3877 3878 { // 4 pages of VSN is committed, some is used by chunks 3879 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3880 VirtualSpaceNode vsn(vsn_test_size_bytes); 3881 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3882 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); 3883 vsn.initialize(); 3884 vsn.expand_by(page_chunks, page_chunks); 3885 vsn.get_chunk_vs(SmallChunk); 3886 vsn.get_chunk_vs(SpecializedChunk); 3887 vsn.retire(&cm); 3888 3889 // committed - used = words left to retire 3890 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3891 3892 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3893 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3894 3895 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3896 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3897 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3898 } 3899 3900 { // Half of VSN is committed, a humongous chunk is used 3901 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3902 VirtualSpaceNode vsn(vsn_test_size_bytes); 3903 vsn.initialize(); 3904 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3905 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3906 vsn.retire(&cm); 3907 3908 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3909 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3910 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3911 3912 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3913 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3914 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3915 } 3916 3917 } 3918 3919 #define assert_is_available_positive(word_size) \ 3920 assert(vsn.is_available(word_size), \ 3921 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ 3922 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3923 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3924 3925 #define assert_is_available_negative(word_size) \ 3926 assert(!vsn.is_available(word_size), \ 3927 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ 3928 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3929 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3930 3931 static void test_is_available_positive() { 3932 // Reserve some memory. 3933 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3934 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3935 3936 // Commit some memory. 3937 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3938 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3939 assert(expanded, "Failed to commit"); 3940 3941 // Check that is_available accepts the committed size. 3942 assert_is_available_positive(commit_word_size); 3943 3944 // Check that is_available accepts half the committed size. 3945 size_t expand_word_size = commit_word_size / 2; 3946 assert_is_available_positive(expand_word_size); 3947 } 3948 3949 static void test_is_available_negative() { 3950 // Reserve some memory. 3951 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3952 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3953 3954 // Commit some memory. 3955 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3956 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3957 assert(expanded, "Failed to commit"); 3958 3959 // Check that is_available doesn't accept a too large size. 3960 size_t two_times_commit_word_size = commit_word_size * 2; 3961 assert_is_available_negative(two_times_commit_word_size); 3962 } 3963 3964 static void test_is_available_overflow() { 3965 // Reserve some memory. 3966 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3967 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3968 3969 // Commit some memory. 3970 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3971 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3972 assert(expanded, "Failed to commit"); 3973 3974 // Calculate a size that will overflow the virtual space size. 3975 void* virtual_space_max = (void*)(uintptr_t)-1; 3976 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3977 size_t overflow_size = bottom_to_max + BytesPerWord; 3978 size_t overflow_word_size = overflow_size / BytesPerWord; 3979 3980 // Check that is_available can handle the overflow. 3981 assert_is_available_negative(overflow_word_size); 3982 } 3983 3984 static void test_is_available() { 3985 TestVirtualSpaceNodeTest::test_is_available_positive(); 3986 TestVirtualSpaceNodeTest::test_is_available_negative(); 3987 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3988 } 3989 }; 3990 3991 void TestVirtualSpaceNode_test() { 3992 TestVirtualSpaceNodeTest::test(); 3993 TestVirtualSpaceNodeTest::test_is_available(); 3994 } 3995 3996 // The following test is placed here instead of a gtest / unittest file 3997 // because the ChunkManager class is only available in this file. 3998 class SpaceManagerTest : AllStatic { 3999 friend void SpaceManager_test_adjust_initial_chunk_size(); 4000 4001 static void test_adjust_initial_chunk_size(bool is_class) { 4002 const size_t smallest = SpaceManager::smallest_chunk_size(is_class); 4003 const size_t normal = SpaceManager::small_chunk_size(is_class); 4004 const size_t medium = SpaceManager::medium_chunk_size(is_class); 4005 4006 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \ 4007 do { \ 4008 size_t v = value; \ 4009 size_t e = expected; \ 4010 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \ 4011 err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v)); \ 4012 } while (0) 4013 4014 // Smallest (specialized) 4015 test_adjust_initial_chunk_size(1, smallest, is_class); 4016 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class); 4017 test_adjust_initial_chunk_size(smallest, smallest, is_class); 4018 4019 // Small 4020 test_adjust_initial_chunk_size(smallest + 1, normal, is_class); 4021 test_adjust_initial_chunk_size(normal - 1, normal, is_class); 4022 test_adjust_initial_chunk_size(normal, normal, is_class); 4023 4024 // Medium 4025 test_adjust_initial_chunk_size(normal + 1, medium, is_class); 4026 test_adjust_initial_chunk_size(medium - 1, medium, is_class); 4027 test_adjust_initial_chunk_size(medium, medium, is_class); 4028 4029 // Humongous 4030 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class); 4031 4032 #undef test_adjust_initial_chunk_size 4033 } 4034 4035 static void test_adjust_initial_chunk_size() { 4036 test_adjust_initial_chunk_size(false); 4037 test_adjust_initial_chunk_size(true); 4038 } 4039 }; 4040 4041 void SpaceManager_test_adjust_initial_chunk_size() { 4042 SpaceManagerTest::test_adjust_initial_chunk_size(); 4043 } 4044 4045 // The following test is placed here instead of a gtest / unittest file 4046 // because the ChunkManager class is only available in this file. 4047 void ChunkManager_test_list_index() { 4048 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); 4049 4050 // Test previous bug where a query for a humongous class metachunk, 4051 // incorrectly matched the non-class medium metachunk size. 4052 { 4053 assert(MediumChunk > ClassMediumChunk, "Precondition for test"); 4054 4055 ChunkIndex index = manager.list_index(MediumChunk); 4056 4057 assert(index == HumongousIndex, 4058 err_msg("Requested size is larger than ClassMediumChunk," 4059 " so should return HumongousIndex. Got index: %d", (int)index)); 4060 } 4061 4062 // Check the specified sizes as well. 4063 { 4064 ChunkIndex index = manager.list_index(ClassSpecializedChunk); 4065 assert(index == SpecializedIndex, err_msg("Wrong index returned. Got index: %d", (int)index)); 4066 } 4067 { 4068 ChunkIndex index = manager.list_index(ClassSmallChunk); 4069 assert(index == SmallIndex, err_msg("Wrong index returned. Got index: %d", (int)index)); 4070 } 4071 { 4072 ChunkIndex index = manager.list_index(ClassMediumChunk); 4073 assert(index == MediumIndex, err_msg("Wrong index returned. Got index: %d", (int)index)); 4074 } 4075 { 4076 ChunkIndex index = manager.list_index(ClassMediumChunk + 1); 4077 assert(index == HumongousIndex, err_msg("Wrong index returned. Got index: %d", (int)index)); 4078 } 4079 } 4080 4081 #endif