1 /* 2 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc_interface/collectedHeap.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/binaryTreeDictionary.hpp" 28 #include "memory/freeList.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/freeList.hpp" 32 #include "memory/gcLocker.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 53 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 54 55 // Set this constant to enable slow integrity checking of the free chunk lists 56 const bool metaspace_slow_verify = false; 57 58 size_t const allocation_from_dictionary_limit = 4 * K; 59 60 MetaWord* last_allocated = 0; 61 62 size_t Metaspace::_compressed_class_space_size; 63 const MetaspaceTracer* Metaspace::_tracer = NULL; 64 65 // Used in declarations in SpaceManager and ChunkManager 66 enum ChunkIndex { 67 ZeroIndex = 0, 68 SpecializedIndex = ZeroIndex, 69 SmallIndex = SpecializedIndex + 1, 70 MediumIndex = SmallIndex + 1, 71 HumongousIndex = MediumIndex + 1, 72 NumberOfFreeLists = 3, 73 NumberOfInUseLists = 4 74 }; 75 76 enum ChunkSizes { // in words. 77 ClassSpecializedChunk = 128, 78 SpecializedChunk = 128, 79 ClassSmallChunk = 256, 80 SmallChunk = 512, 81 ClassMediumChunk = 4 * K, 82 MediumChunk = 8 * K 83 }; 84 85 static ChunkIndex next_chunk_index(ChunkIndex i) { 86 assert(i < NumberOfInUseLists, "Out of bound"); 87 return (ChunkIndex) (i+1); 88 } 89 90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 91 uint MetaspaceGC::_shrink_factor = 0; 92 bool MetaspaceGC::_should_concurrent_collect = false; 93 94 typedef class FreeList<Metachunk> ChunkList; 95 96 // Manages the global free lists of chunks. 97 class ChunkManager : public CHeapObj<mtInternal> { 98 friend class TestVirtualSpaceNodeTest; 99 100 // Free list of chunks of different sizes. 101 // SpecializedChunk 102 // SmallChunk 103 // MediumChunk 104 // HumongousChunk 105 ChunkList _free_chunks[NumberOfFreeLists]; 106 107 // HumongousChunk 108 ChunkTreeDictionary _humongous_dictionary; 109 110 // ChunkManager in all lists of this type 111 size_t _free_chunks_total; 112 size_t _free_chunks_count; 113 114 void dec_free_chunks_total(size_t v) { 115 assert(_free_chunks_count > 0 && 116 _free_chunks_total > 0, 117 "About to go negative"); 118 Atomic::add_ptr(-1, &_free_chunks_count); 119 jlong minus_v = (jlong) - (jlong) v; 120 Atomic::add_ptr(minus_v, &_free_chunks_total); 121 } 122 123 // Debug support 124 125 size_t sum_free_chunks(); 126 size_t sum_free_chunks_count(); 127 128 void locked_verify_free_chunks_total(); 129 void slow_locked_verify_free_chunks_total() { 130 if (metaspace_slow_verify) { 131 locked_verify_free_chunks_total(); 132 } 133 } 134 void locked_verify_free_chunks_count(); 135 void slow_locked_verify_free_chunks_count() { 136 if (metaspace_slow_verify) { 137 locked_verify_free_chunks_count(); 138 } 139 } 140 void verify_free_chunks_count(); 141 142 public: 143 144 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 145 : _free_chunks_total(0), _free_chunks_count(0) { 146 _free_chunks[SpecializedIndex].set_size(specialized_size); 147 _free_chunks[SmallIndex].set_size(small_size); 148 _free_chunks[MediumIndex].set_size(medium_size); 149 } 150 151 // add or delete (return) a chunk to the global freelist. 152 Metachunk* chunk_freelist_allocate(size_t word_size); 153 154 // Map a size to a list index assuming that there are lists 155 // for special, small, medium, and humongous chunks. 156 static ChunkIndex list_index(size_t size); 157 158 // Remove the chunk from its freelist. It is 159 // expected to be on one of the _free_chunks[] lists. 160 void remove_chunk(Metachunk* chunk); 161 162 // Add the simple linked list of chunks to the freelist of chunks 163 // of type index. 164 void return_chunks(ChunkIndex index, Metachunk* chunks); 165 166 // Total of the space in the free chunks list 167 size_t free_chunks_total_words(); 168 size_t free_chunks_total_bytes(); 169 170 // Number of chunks in the free chunks list 171 size_t free_chunks_count(); 172 173 void inc_free_chunks_total(size_t v, size_t count = 1) { 174 Atomic::add_ptr(count, &_free_chunks_count); 175 Atomic::add_ptr(v, &_free_chunks_total); 176 } 177 ChunkTreeDictionary* humongous_dictionary() { 178 return &_humongous_dictionary; 179 } 180 181 ChunkList* free_chunks(ChunkIndex index); 182 183 // Returns the list for the given chunk word size. 184 ChunkList* find_free_chunks_list(size_t word_size); 185 186 // Remove from a list by size. Selects list based on size of chunk. 187 Metachunk* free_chunks_get(size_t chunk_word_size); 188 189 #define index_bounds_check(index) \ 190 assert(index == SpecializedIndex || \ 191 index == SmallIndex || \ 192 index == MediumIndex || \ 193 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) 194 195 size_t num_free_chunks(ChunkIndex index) const { 196 index_bounds_check(index); 197 198 if (index == HumongousIndex) { 199 return _humongous_dictionary.total_free_blocks(); 200 } 201 202 ssize_t count = _free_chunks[index].count(); 203 return count == -1 ? 0 : (size_t) count; 204 } 205 206 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 207 index_bounds_check(index); 208 209 size_t word_size = 0; 210 if (index == HumongousIndex) { 211 word_size = _humongous_dictionary.total_size(); 212 } else { 213 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 214 word_size = size_per_chunk_in_words * num_free_chunks(index); 215 } 216 217 return word_size * BytesPerWord; 218 } 219 220 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 221 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 222 num_free_chunks(SmallIndex), 223 num_free_chunks(MediumIndex), 224 num_free_chunks(HumongousIndex), 225 size_free_chunks_in_bytes(SpecializedIndex), 226 size_free_chunks_in_bytes(SmallIndex), 227 size_free_chunks_in_bytes(MediumIndex), 228 size_free_chunks_in_bytes(HumongousIndex)); 229 } 230 231 // Debug support 232 void verify(); 233 void slow_verify() { 234 if (metaspace_slow_verify) { 235 verify(); 236 } 237 } 238 void locked_verify(); 239 void slow_locked_verify() { 240 if (metaspace_slow_verify) { 241 locked_verify(); 242 } 243 } 244 void verify_free_chunks_total(); 245 246 void locked_print_free_chunks(outputStream* st); 247 void locked_print_sum_free_chunks(outputStream* st); 248 249 void print_on(outputStream* st) const; 250 }; 251 252 // Used to manage the free list of Metablocks (a block corresponds 253 // to the allocation of a quantum of metadata). 254 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 255 BlockTreeDictionary* const _dictionary; 256 257 // Only allocate and split from freelist if the size of the allocation 258 // is at least 1/4th the size of the available block. 259 const static int WasteMultiplier = 4; 260 261 // Accessors 262 BlockTreeDictionary* dictionary() const { return _dictionary; } 263 264 public: 265 BlockFreelist(); 266 ~BlockFreelist(); 267 268 // Get and return a block to the free list 269 MetaWord* get_block(size_t word_size); 270 void return_block(MetaWord* p, size_t word_size); 271 272 size_t total_size() { return dictionary()->total_size(); } 273 274 void print_on(outputStream* st) const; 275 }; 276 277 // A VirtualSpaceList node. 278 class VirtualSpaceNode : public CHeapObj<mtClass> { 279 friend class VirtualSpaceList; 280 281 // Link to next VirtualSpaceNode 282 VirtualSpaceNode* _next; 283 284 // total in the VirtualSpace 285 MemRegion _reserved; 286 ReservedSpace _rs; 287 VirtualSpace _virtual_space; 288 MetaWord* _top; 289 // count of chunks contained in this VirtualSpace 290 uintx _container_count; 291 292 // Convenience functions to access the _virtual_space 293 char* low() const { return virtual_space()->low(); } 294 char* high() const { return virtual_space()->high(); } 295 296 // The first Metachunk will be allocated at the bottom of the 297 // VirtualSpace 298 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 299 300 // Committed but unused space in the virtual space 301 size_t free_words_in_vs() const; 302 public: 303 304 VirtualSpaceNode(size_t byte_size); 305 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 306 ~VirtualSpaceNode(); 307 308 // Convenience functions for logical bottom and end 309 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 310 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 311 312 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 313 314 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 315 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 316 317 bool is_pre_committed() const { return _virtual_space.special(); } 318 319 // address of next available space in _virtual_space; 320 // Accessors 321 VirtualSpaceNode* next() { return _next; } 322 void set_next(VirtualSpaceNode* v) { _next = v; } 323 324 void set_reserved(MemRegion const v) { _reserved = v; } 325 void set_top(MetaWord* v) { _top = v; } 326 327 // Accessors 328 MemRegion* reserved() { return &_reserved; } 329 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 330 331 // Returns true if "word_size" is available in the VirtualSpace 332 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 333 334 MetaWord* top() const { return _top; } 335 void inc_top(size_t word_size) { _top += word_size; } 336 337 uintx container_count() { return _container_count; } 338 void inc_container_count(); 339 void dec_container_count(); 340 #ifdef ASSERT 341 uintx container_count_slow(); 342 void verify_container_count(); 343 #endif 344 345 // used and capacity in this single entry in the list 346 size_t used_words_in_vs() const; 347 size_t capacity_words_in_vs() const; 348 349 bool initialize(); 350 351 // get space from the virtual space 352 Metachunk* take_from_committed(size_t chunk_word_size); 353 354 // Allocate a chunk from the virtual space and return it. 355 Metachunk* get_chunk_vs(size_t chunk_word_size); 356 357 // Expands/shrinks the committed space in a virtual space. Delegates 358 // to Virtualspace 359 bool expand_by(size_t min_words, size_t preferred_words); 360 361 // In preparation for deleting this node, remove all the chunks 362 // in the node from any freelist. 363 void purge(ChunkManager* chunk_manager); 364 365 // If an allocation doesn't fit in the current node a new node is created. 366 // Allocate chunks out of the remaining committed space in this node 367 // to avoid wasting that memory. 368 // This always adds up because all the chunk sizes are multiples of 369 // the smallest chunk size. 370 void retire(ChunkManager* chunk_manager); 371 372 #ifdef ASSERT 373 // Debug support 374 void mangle(); 375 #endif 376 377 void print_on(outputStream* st) const; 378 }; 379 380 #define assert_is_ptr_aligned(ptr, alignment) \ 381 assert(is_ptr_aligned(ptr, alignment), \ 382 err_msg(PTR_FORMAT " is not aligned to " \ 383 SIZE_FORMAT, p2i(ptr), alignment)) 384 385 #define assert_is_size_aligned(size, alignment) \ 386 assert(is_size_aligned(size, alignment), \ 387 err_msg(SIZE_FORMAT " is not aligned to " \ 388 SIZE_FORMAT, size, alignment)) 389 390 391 // Decide if large pages should be committed when the memory is reserved. 392 static bool should_commit_large_pages_when_reserving(size_t bytes) { 393 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 394 size_t words = bytes / BytesPerWord; 395 bool is_class = false; // We never reserve large pages for the class space. 396 if (MetaspaceGC::can_expand(words, is_class) && 397 MetaspaceGC::allowed_expansion() >= words) { 398 return true; 399 } 400 } 401 402 return false; 403 } 404 405 // byte_size is the size of the associated virtualspace. 406 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 407 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 408 409 #if INCLUDE_CDS 410 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 411 // configurable address, generally at the top of the Java heap so other 412 // memory addresses don't conflict. 413 if (DumpSharedSpaces) { 414 bool large_pages = false; // No large pages when dumping the CDS archive. 415 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 416 417 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 418 if (_rs.is_reserved()) { 419 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 420 } else { 421 // Get a mmap region anywhere if the SharedBaseAddress fails. 422 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 423 } 424 MetaspaceShared::set_shared_rs(&_rs); 425 } else 426 #endif 427 { 428 bool large_pages = should_commit_large_pages_when_reserving(bytes); 429 430 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 431 } 432 433 if (_rs.is_reserved()) { 434 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 435 assert(_rs.size() != 0, "Catch if we get a 0 size"); 436 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 437 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 438 439 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 440 } 441 } 442 443 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 444 Metachunk* chunk = first_chunk(); 445 Metachunk* invalid_chunk = (Metachunk*) top(); 446 while (chunk < invalid_chunk ) { 447 assert(chunk->is_tagged_free(), "Should be tagged free"); 448 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 449 chunk_manager->remove_chunk(chunk); 450 assert(chunk->next() == NULL && 451 chunk->prev() == NULL, 452 "Was not removed from its list"); 453 chunk = (Metachunk*) next; 454 } 455 } 456 457 #ifdef ASSERT 458 uintx VirtualSpaceNode::container_count_slow() { 459 uintx count = 0; 460 Metachunk* chunk = first_chunk(); 461 Metachunk* invalid_chunk = (Metachunk*) top(); 462 while (chunk < invalid_chunk ) { 463 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 464 // Don't count the chunks on the free lists. Those are 465 // still part of the VirtualSpaceNode but not currently 466 // counted. 467 if (!chunk->is_tagged_free()) { 468 count++; 469 } 470 chunk = (Metachunk*) next; 471 } 472 return count; 473 } 474 #endif 475 476 // List of VirtualSpaces for metadata allocation. 477 class VirtualSpaceList : public CHeapObj<mtClass> { 478 friend class VirtualSpaceNode; 479 480 enum VirtualSpaceSizes { 481 VirtualSpaceSize = 256 * K 482 }; 483 484 // Head of the list 485 VirtualSpaceNode* _virtual_space_list; 486 // virtual space currently being used for allocations 487 VirtualSpaceNode* _current_virtual_space; 488 489 // Is this VirtualSpaceList used for the compressed class space 490 bool _is_class; 491 492 // Sum of reserved and committed memory in the virtual spaces 493 size_t _reserved_words; 494 size_t _committed_words; 495 496 // Number of virtual spaces 497 size_t _virtual_space_count; 498 499 ~VirtualSpaceList(); 500 501 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 502 503 void set_virtual_space_list(VirtualSpaceNode* v) { 504 _virtual_space_list = v; 505 } 506 void set_current_virtual_space(VirtualSpaceNode* v) { 507 _current_virtual_space = v; 508 } 509 510 void link_vs(VirtualSpaceNode* new_entry); 511 512 // Get another virtual space and add it to the list. This 513 // is typically prompted by a failed attempt to allocate a chunk 514 // and is typically followed by the allocation of a chunk. 515 bool create_new_virtual_space(size_t vs_word_size); 516 517 // Chunk up the unused committed space in the current 518 // virtual space and add the chunks to the free list. 519 void retire_current_virtual_space(); 520 521 public: 522 VirtualSpaceList(size_t word_size); 523 VirtualSpaceList(ReservedSpace rs); 524 525 size_t free_bytes(); 526 527 Metachunk* get_new_chunk(size_t word_size, 528 size_t grow_chunks_by_words, 529 size_t medium_chunk_bunch); 530 531 bool expand_node_by(VirtualSpaceNode* node, 532 size_t min_words, 533 size_t preferred_words); 534 535 bool expand_by(size_t min_words, 536 size_t preferred_words); 537 538 VirtualSpaceNode* current_virtual_space() { 539 return _current_virtual_space; 540 } 541 542 bool is_class() const { return _is_class; } 543 544 bool initialization_succeeded() { return _virtual_space_list != NULL; } 545 546 size_t reserved_words() { return _reserved_words; } 547 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 548 size_t committed_words() { return _committed_words; } 549 size_t committed_bytes() { return committed_words() * BytesPerWord; } 550 551 void inc_reserved_words(size_t v); 552 void dec_reserved_words(size_t v); 553 void inc_committed_words(size_t v); 554 void dec_committed_words(size_t v); 555 void inc_virtual_space_count(); 556 void dec_virtual_space_count(); 557 558 bool contains(const void* ptr); 559 560 // Unlink empty VirtualSpaceNodes and free it. 561 void purge(ChunkManager* chunk_manager); 562 563 void print_on(outputStream* st) const; 564 565 class VirtualSpaceListIterator : public StackObj { 566 VirtualSpaceNode* _virtual_spaces; 567 public: 568 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 569 _virtual_spaces(virtual_spaces) {} 570 571 bool repeat() { 572 return _virtual_spaces != NULL; 573 } 574 575 VirtualSpaceNode* get_next() { 576 VirtualSpaceNode* result = _virtual_spaces; 577 if (_virtual_spaces != NULL) { 578 _virtual_spaces = _virtual_spaces->next(); 579 } 580 return result; 581 } 582 }; 583 }; 584 585 class Metadebug : AllStatic { 586 // Debugging support for Metaspaces 587 static int _allocation_fail_alot_count; 588 589 public: 590 591 static void init_allocation_fail_alot_count(); 592 #ifdef ASSERT 593 static bool test_metadata_failure(); 594 #endif 595 }; 596 597 int Metadebug::_allocation_fail_alot_count = 0; 598 599 // SpaceManager - used by Metaspace to handle allocations 600 class SpaceManager : public CHeapObj<mtClass> { 601 friend class Metaspace; 602 friend class Metadebug; 603 604 private: 605 606 // protects allocations 607 Mutex* const _lock; 608 609 // Type of metadata allocated. 610 Metaspace::MetadataType _mdtype; 611 612 // List of chunks in use by this SpaceManager. Allocations 613 // are done from the current chunk. The list is used for deallocating 614 // chunks when the SpaceManager is freed. 615 Metachunk* _chunks_in_use[NumberOfInUseLists]; 616 Metachunk* _current_chunk; 617 618 // Number of small chunks to allocate to a manager 619 // If class space manager, small chunks are unlimited 620 static uint const _small_chunk_limit; 621 622 // Sum of all space in allocated chunks 623 size_t _allocated_blocks_words; 624 625 // Sum of all allocated chunks 626 size_t _allocated_chunks_words; 627 size_t _allocated_chunks_count; 628 629 // Free lists of blocks are per SpaceManager since they 630 // are assumed to be in chunks in use by the SpaceManager 631 // and all chunks in use by a SpaceManager are freed when 632 // the class loader using the SpaceManager is collected. 633 BlockFreelist _block_freelists; 634 635 // protects virtualspace and chunk expansions 636 static const char* _expand_lock_name; 637 static const int _expand_lock_rank; 638 static Mutex* const _expand_lock; 639 640 private: 641 // Accessors 642 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 643 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 644 _chunks_in_use[index] = v; 645 } 646 647 BlockFreelist* block_freelists() const { 648 return (BlockFreelist*) &_block_freelists; 649 } 650 651 Metaspace::MetadataType mdtype() { return _mdtype; } 652 653 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 654 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 655 656 Metachunk* current_chunk() const { return _current_chunk; } 657 void set_current_chunk(Metachunk* v) { 658 _current_chunk = v; 659 } 660 661 Metachunk* find_current_chunk(size_t word_size); 662 663 // Add chunk to the list of chunks in use 664 void add_chunk(Metachunk* v, bool make_current); 665 void retire_current_chunk(); 666 667 Mutex* lock() const { return _lock; } 668 669 const char* chunk_size_name(ChunkIndex index) const; 670 671 protected: 672 void initialize(); 673 674 public: 675 SpaceManager(Metaspace::MetadataType mdtype, 676 Mutex* lock); 677 ~SpaceManager(); 678 679 enum ChunkMultiples { 680 MediumChunkMultiple = 4 681 }; 682 683 bool is_class() { return _mdtype == Metaspace::ClassType; } 684 685 // Accessors 686 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 687 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 688 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 689 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 690 691 size_t smallest_chunk_size() { return specialized_chunk_size(); } 692 693 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 694 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 695 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 696 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 697 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 698 699 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 700 701 static Mutex* expand_lock() { return _expand_lock; } 702 703 // Increment the per Metaspace and global running sums for Metachunks 704 // by the given size. This is used when a Metachunk to added to 705 // the in-use list. 706 void inc_size_metrics(size_t words); 707 // Increment the per Metaspace and global running sums Metablocks by the given 708 // size. This is used when a Metablock is allocated. 709 void inc_used_metrics(size_t words); 710 // Delete the portion of the running sums for this SpaceManager. That is, 711 // the globals running sums for the Metachunks and Metablocks are 712 // decremented for all the Metachunks in-use by this SpaceManager. 713 void dec_total_from_size_metrics(); 714 715 // Set the sizes for the initial chunks. 716 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 717 size_t* chunk_word_size, 718 size_t* class_chunk_word_size); 719 720 size_t sum_capacity_in_chunks_in_use() const; 721 size_t sum_used_in_chunks_in_use() const; 722 size_t sum_free_in_chunks_in_use() const; 723 size_t sum_waste_in_chunks_in_use() const; 724 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 725 726 size_t sum_count_in_chunks_in_use(); 727 size_t sum_count_in_chunks_in_use(ChunkIndex i); 728 729 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 730 731 // Block allocation and deallocation. 732 // Allocates a block from the current chunk 733 MetaWord* allocate(size_t word_size); 734 735 // Helper for allocations 736 MetaWord* allocate_work(size_t word_size); 737 738 // Returns a block to the per manager freelist 739 void deallocate(MetaWord* p, size_t word_size); 740 741 // Based on the allocation size and a minimum chunk size, 742 // returned chunk size (for expanding space for chunk allocation). 743 size_t calc_chunk_size(size_t allocation_word_size); 744 745 // Called when an allocation from the current chunk fails. 746 // Gets a new chunk (may require getting a new virtual space), 747 // and allocates from that chunk. 748 MetaWord* grow_and_allocate(size_t word_size); 749 750 // Notify memory usage to MemoryService. 751 void track_metaspace_memory_usage(); 752 753 // debugging support. 754 755 void dump(outputStream* const out) const; 756 void print_on(outputStream* st) const; 757 void locked_print_chunks_in_use_on(outputStream* st) const; 758 759 void verify(); 760 void verify_chunk_size(Metachunk* chunk); 761 NOT_PRODUCT(void mangle_freed_chunks();) 762 #ifdef ASSERT 763 void verify_allocated_blocks_words(); 764 #endif 765 766 size_t get_raw_word_size(size_t word_size) { 767 size_t byte_size = word_size * BytesPerWord; 768 769 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 770 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 771 772 size_t raw_word_size = raw_bytes_size / BytesPerWord; 773 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 774 775 return raw_word_size; 776 } 777 }; 778 779 uint const SpaceManager::_small_chunk_limit = 4; 780 781 const char* SpaceManager::_expand_lock_name = 782 "SpaceManager chunk allocation lock"; 783 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 784 Mutex* const SpaceManager::_expand_lock = 785 new Mutex(SpaceManager::_expand_lock_rank, 786 SpaceManager::_expand_lock_name, 787 Mutex::_allow_vm_block_flag, 788 Monitor::_safepoint_check_never); 789 790 void VirtualSpaceNode::inc_container_count() { 791 assert_lock_strong(SpaceManager::expand_lock()); 792 _container_count++; 793 DEBUG_ONLY(verify_container_count();) 794 } 795 796 void VirtualSpaceNode::dec_container_count() { 797 assert_lock_strong(SpaceManager::expand_lock()); 798 _container_count--; 799 } 800 801 #ifdef ASSERT 802 void VirtualSpaceNode::verify_container_count() { 803 assert(_container_count == container_count_slow(), 804 err_msg("Inconsistency in container_count _container_count " UINTX_FORMAT 805 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow())); 806 } 807 #endif 808 809 // BlockFreelist methods 810 811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {} 812 813 BlockFreelist::~BlockFreelist() { 814 if (Verbose && TraceMetadataChunkAllocation) { 815 dictionary()->print_free_lists(gclog_or_tty); 816 } 817 delete _dictionary; 818 } 819 820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 821 Metablock* free_chunk = ::new (p) Metablock(word_size); 822 dictionary()->return_chunk(free_chunk); 823 } 824 825 MetaWord* BlockFreelist::get_block(size_t word_size) { 826 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 827 // Dark matter. Too small for dictionary. 828 return NULL; 829 } 830 831 Metablock* free_block = 832 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 833 if (free_block == NULL) { 834 return NULL; 835 } 836 837 const size_t block_size = free_block->size(); 838 if (block_size > WasteMultiplier * word_size) { 839 return_block((MetaWord*)free_block, block_size); 840 return NULL; 841 } 842 843 MetaWord* new_block = (MetaWord*)free_block; 844 assert(block_size >= word_size, "Incorrect size of block from freelist"); 845 const size_t unused = block_size - word_size; 846 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 847 return_block(new_block + word_size, unused); 848 } 849 850 return new_block; 851 } 852 853 void BlockFreelist::print_on(outputStream* st) const { 854 dictionary()->print_free_lists(st); 855 } 856 857 // VirtualSpaceNode methods 858 859 VirtualSpaceNode::~VirtualSpaceNode() { 860 _rs.release(); 861 #ifdef ASSERT 862 size_t word_size = sizeof(*this) / BytesPerWord; 863 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 864 #endif 865 } 866 867 size_t VirtualSpaceNode::used_words_in_vs() const { 868 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 869 } 870 871 // Space committed in the VirtualSpace 872 size_t VirtualSpaceNode::capacity_words_in_vs() const { 873 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 874 } 875 876 size_t VirtualSpaceNode::free_words_in_vs() const { 877 return pointer_delta(end(), top(), sizeof(MetaWord)); 878 } 879 880 // Allocates the chunk from the virtual space only. 881 // This interface is also used internally for debugging. Not all 882 // chunks removed here are necessarily used for allocation. 883 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 884 // Bottom of the new chunk 885 MetaWord* chunk_limit = top(); 886 assert(chunk_limit != NULL, "Not safe to call this method"); 887 888 // The virtual spaces are always expanded by the 889 // commit granularity to enforce the following condition. 890 // Without this the is_available check will not work correctly. 891 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 892 "The committed memory doesn't match the expanded memory."); 893 894 if (!is_available(chunk_word_size)) { 895 if (TraceMetadataChunkAllocation) { 896 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 897 // Dump some information about the virtual space that is nearly full 898 print_on(gclog_or_tty); 899 } 900 return NULL; 901 } 902 903 // Take the space (bump top on the current virtual space). 904 inc_top(chunk_word_size); 905 906 // Initialize the chunk 907 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 908 return result; 909 } 910 911 912 // Expand the virtual space (commit more of the reserved space) 913 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 914 size_t min_bytes = min_words * BytesPerWord; 915 size_t preferred_bytes = preferred_words * BytesPerWord; 916 917 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 918 919 if (uncommitted < min_bytes) { 920 return false; 921 } 922 923 size_t commit = MIN2(preferred_bytes, uncommitted); 924 bool result = virtual_space()->expand_by(commit, false); 925 926 assert(result, "Failed to commit memory"); 927 928 return result; 929 } 930 931 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 932 assert_lock_strong(SpaceManager::expand_lock()); 933 Metachunk* result = take_from_committed(chunk_word_size); 934 if (result != NULL) { 935 inc_container_count(); 936 } 937 return result; 938 } 939 940 bool VirtualSpaceNode::initialize() { 941 942 if (!_rs.is_reserved()) { 943 return false; 944 } 945 946 // These are necessary restriction to make sure that the virtual space always 947 // grows in steps of Metaspace::commit_alignment(). If both base and size are 948 // aligned only the middle alignment of the VirtualSpace is used. 949 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 950 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 951 952 // ReservedSpaces marked as special will have the entire memory 953 // pre-committed. Setting a committed size will make sure that 954 // committed_size and actual_committed_size agrees. 955 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 956 957 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 958 Metaspace::commit_alignment()); 959 if (result) { 960 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 961 "Checking that the pre-committed memory was registered by the VirtualSpace"); 962 963 set_top((MetaWord*)virtual_space()->low()); 964 set_reserved(MemRegion((HeapWord*)_rs.base(), 965 (HeapWord*)(_rs.base() + _rs.size()))); 966 967 assert(reserved()->start() == (HeapWord*) _rs.base(), 968 err_msg("Reserved start was not set properly " PTR_FORMAT 969 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()))); 970 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 971 err_msg("Reserved size was not set properly " SIZE_FORMAT 972 " != " SIZE_FORMAT, reserved()->word_size(), 973 _rs.size() / BytesPerWord)); 974 } 975 976 return result; 977 } 978 979 void VirtualSpaceNode::print_on(outputStream* st) const { 980 size_t used = used_words_in_vs(); 981 size_t capacity = capacity_words_in_vs(); 982 VirtualSpace* vs = virtual_space(); 983 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 984 "[" PTR_FORMAT ", " PTR_FORMAT ", " 985 PTR_FORMAT ", " PTR_FORMAT ")", 986 p2i(vs), capacity / K, 987 capacity == 0 ? 0 : used * 100 / capacity, 988 p2i(bottom()), p2i(top()), p2i(end()), 989 p2i(vs->high_boundary())); 990 } 991 992 #ifdef ASSERT 993 void VirtualSpaceNode::mangle() { 994 size_t word_size = capacity_words_in_vs(); 995 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 996 } 997 #endif // ASSERT 998 999 // VirtualSpaceList methods 1000 // Space allocated from the VirtualSpace 1001 1002 VirtualSpaceList::~VirtualSpaceList() { 1003 VirtualSpaceListIterator iter(virtual_space_list()); 1004 while (iter.repeat()) { 1005 VirtualSpaceNode* vsl = iter.get_next(); 1006 delete vsl; 1007 } 1008 } 1009 1010 void VirtualSpaceList::inc_reserved_words(size_t v) { 1011 assert_lock_strong(SpaceManager::expand_lock()); 1012 _reserved_words = _reserved_words + v; 1013 } 1014 void VirtualSpaceList::dec_reserved_words(size_t v) { 1015 assert_lock_strong(SpaceManager::expand_lock()); 1016 _reserved_words = _reserved_words - v; 1017 } 1018 1019 #define assert_committed_below_limit() \ 1020 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1021 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 1022 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1023 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 1024 1025 void VirtualSpaceList::inc_committed_words(size_t v) { 1026 assert_lock_strong(SpaceManager::expand_lock()); 1027 _committed_words = _committed_words + v; 1028 1029 assert_committed_below_limit(); 1030 } 1031 void VirtualSpaceList::dec_committed_words(size_t v) { 1032 assert_lock_strong(SpaceManager::expand_lock()); 1033 _committed_words = _committed_words - v; 1034 1035 assert_committed_below_limit(); 1036 } 1037 1038 void VirtualSpaceList::inc_virtual_space_count() { 1039 assert_lock_strong(SpaceManager::expand_lock()); 1040 _virtual_space_count++; 1041 } 1042 void VirtualSpaceList::dec_virtual_space_count() { 1043 assert_lock_strong(SpaceManager::expand_lock()); 1044 _virtual_space_count--; 1045 } 1046 1047 void ChunkManager::remove_chunk(Metachunk* chunk) { 1048 size_t word_size = chunk->word_size(); 1049 ChunkIndex index = list_index(word_size); 1050 if (index != HumongousIndex) { 1051 free_chunks(index)->remove_chunk(chunk); 1052 } else { 1053 humongous_dictionary()->remove_chunk(chunk); 1054 } 1055 1056 // Chunk is being removed from the chunks free list. 1057 dec_free_chunks_total(chunk->word_size()); 1058 } 1059 1060 // Walk the list of VirtualSpaceNodes and delete 1061 // nodes with a 0 container_count. Remove Metachunks in 1062 // the node from their respective freelists. 1063 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1064 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1065 assert_lock_strong(SpaceManager::expand_lock()); 1066 // Don't use a VirtualSpaceListIterator because this 1067 // list is being changed and a straightforward use of an iterator is not safe. 1068 VirtualSpaceNode* purged_vsl = NULL; 1069 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1070 VirtualSpaceNode* next_vsl = prev_vsl; 1071 while (next_vsl != NULL) { 1072 VirtualSpaceNode* vsl = next_vsl; 1073 next_vsl = vsl->next(); 1074 // Don't free the current virtual space since it will likely 1075 // be needed soon. 1076 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1077 // Unlink it from the list 1078 if (prev_vsl == vsl) { 1079 // This is the case of the current node being the first node. 1080 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1081 set_virtual_space_list(vsl->next()); 1082 } else { 1083 prev_vsl->set_next(vsl->next()); 1084 } 1085 1086 vsl->purge(chunk_manager); 1087 dec_reserved_words(vsl->reserved_words()); 1088 dec_committed_words(vsl->committed_words()); 1089 dec_virtual_space_count(); 1090 purged_vsl = vsl; 1091 delete vsl; 1092 } else { 1093 prev_vsl = vsl; 1094 } 1095 } 1096 #ifdef ASSERT 1097 if (purged_vsl != NULL) { 1098 // List should be stable enough to use an iterator here. 1099 VirtualSpaceListIterator iter(virtual_space_list()); 1100 while (iter.repeat()) { 1101 VirtualSpaceNode* vsl = iter.get_next(); 1102 assert(vsl != purged_vsl, "Purge of vsl failed"); 1103 } 1104 } 1105 #endif 1106 } 1107 1108 1109 // This function looks at the mmap regions in the metaspace without locking. 1110 // The chunks are added with store ordering and not deleted except for at 1111 // unloading time during a safepoint. 1112 bool VirtualSpaceList::contains(const void* ptr) { 1113 // List should be stable enough to use an iterator here because removing virtual 1114 // space nodes is only allowed at a safepoint. 1115 VirtualSpaceListIterator iter(virtual_space_list()); 1116 while (iter.repeat()) { 1117 VirtualSpaceNode* vsn = iter.get_next(); 1118 if (vsn->contains(ptr)) { 1119 return true; 1120 } 1121 } 1122 return false; 1123 } 1124 1125 void VirtualSpaceList::retire_current_virtual_space() { 1126 assert_lock_strong(SpaceManager::expand_lock()); 1127 1128 VirtualSpaceNode* vsn = current_virtual_space(); 1129 1130 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1131 Metaspace::chunk_manager_metadata(); 1132 1133 vsn->retire(cm); 1134 } 1135 1136 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1137 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1138 ChunkIndex index = (ChunkIndex)i; 1139 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1140 1141 while (free_words_in_vs() >= chunk_size) { 1142 DEBUG_ONLY(verify_container_count();) 1143 Metachunk* chunk = get_chunk_vs(chunk_size); 1144 assert(chunk != NULL, "allocation should have been successful"); 1145 1146 chunk_manager->return_chunks(index, chunk); 1147 chunk_manager->inc_free_chunks_total(chunk_size); 1148 DEBUG_ONLY(verify_container_count();) 1149 } 1150 } 1151 assert(free_words_in_vs() == 0, "should be empty now"); 1152 } 1153 1154 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1155 _is_class(false), 1156 _virtual_space_list(NULL), 1157 _current_virtual_space(NULL), 1158 _reserved_words(0), 1159 _committed_words(0), 1160 _virtual_space_count(0) { 1161 MutexLockerEx cl(SpaceManager::expand_lock(), 1162 Mutex::_no_safepoint_check_flag); 1163 create_new_virtual_space(word_size); 1164 } 1165 1166 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1167 _is_class(true), 1168 _virtual_space_list(NULL), 1169 _current_virtual_space(NULL), 1170 _reserved_words(0), 1171 _committed_words(0), 1172 _virtual_space_count(0) { 1173 MutexLockerEx cl(SpaceManager::expand_lock(), 1174 Mutex::_no_safepoint_check_flag); 1175 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1176 bool succeeded = class_entry->initialize(); 1177 if (succeeded) { 1178 link_vs(class_entry); 1179 } 1180 } 1181 1182 size_t VirtualSpaceList::free_bytes() { 1183 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1184 } 1185 1186 // Allocate another meta virtual space and add it to the list. 1187 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1188 assert_lock_strong(SpaceManager::expand_lock()); 1189 1190 if (is_class()) { 1191 assert(false, "We currently don't support more than one VirtualSpace for" 1192 " the compressed class space. The initialization of the" 1193 " CCS uses another code path and should not hit this path."); 1194 return false; 1195 } 1196 1197 if (vs_word_size == 0) { 1198 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1199 return false; 1200 } 1201 1202 // Reserve the space 1203 size_t vs_byte_size = vs_word_size * BytesPerWord; 1204 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1205 1206 // Allocate the meta virtual space and initialize it. 1207 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1208 if (!new_entry->initialize()) { 1209 delete new_entry; 1210 return false; 1211 } else { 1212 assert(new_entry->reserved_words() == vs_word_size, 1213 "Reserved memory size differs from requested memory size"); 1214 // ensure lock-free iteration sees fully initialized node 1215 OrderAccess::storestore(); 1216 link_vs(new_entry); 1217 return true; 1218 } 1219 } 1220 1221 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1222 if (virtual_space_list() == NULL) { 1223 set_virtual_space_list(new_entry); 1224 } else { 1225 current_virtual_space()->set_next(new_entry); 1226 } 1227 set_current_virtual_space(new_entry); 1228 inc_reserved_words(new_entry->reserved_words()); 1229 inc_committed_words(new_entry->committed_words()); 1230 inc_virtual_space_count(); 1231 #ifdef ASSERT 1232 new_entry->mangle(); 1233 #endif 1234 if (TraceMetavirtualspaceAllocation && Verbose) { 1235 VirtualSpaceNode* vsl = current_virtual_space(); 1236 vsl->print_on(gclog_or_tty); 1237 } 1238 } 1239 1240 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1241 size_t min_words, 1242 size_t preferred_words) { 1243 size_t before = node->committed_words(); 1244 1245 bool result = node->expand_by(min_words, preferred_words); 1246 1247 size_t after = node->committed_words(); 1248 1249 // after and before can be the same if the memory was pre-committed. 1250 assert(after >= before, "Inconsistency"); 1251 inc_committed_words(after - before); 1252 1253 return result; 1254 } 1255 1256 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1257 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1258 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1259 assert(min_words <= preferred_words, "Invalid arguments"); 1260 1261 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1262 return false; 1263 } 1264 1265 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1266 if (allowed_expansion_words < min_words) { 1267 return false; 1268 } 1269 1270 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1271 1272 // Commit more memory from the the current virtual space. 1273 bool vs_expanded = expand_node_by(current_virtual_space(), 1274 min_words, 1275 max_expansion_words); 1276 if (vs_expanded) { 1277 return true; 1278 } 1279 retire_current_virtual_space(); 1280 1281 // Get another virtual space. 1282 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1283 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1284 1285 if (create_new_virtual_space(grow_vs_words)) { 1286 if (current_virtual_space()->is_pre_committed()) { 1287 // The memory was pre-committed, so we are done here. 1288 assert(min_words <= current_virtual_space()->committed_words(), 1289 "The new VirtualSpace was pre-committed, so it" 1290 "should be large enough to fit the alloc request."); 1291 return true; 1292 } 1293 1294 return expand_node_by(current_virtual_space(), 1295 min_words, 1296 max_expansion_words); 1297 } 1298 1299 return false; 1300 } 1301 1302 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1303 size_t grow_chunks_by_words, 1304 size_t medium_chunk_bunch) { 1305 1306 // Allocate a chunk out of the current virtual space. 1307 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1308 1309 if (next != NULL) { 1310 return next; 1311 } 1312 1313 // The expand amount is currently only determined by the requested sizes 1314 // and not how much committed memory is left in the current virtual space. 1315 1316 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1317 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1318 if (min_word_size >= preferred_word_size) { 1319 // Can happen when humongous chunks are allocated. 1320 preferred_word_size = min_word_size; 1321 } 1322 1323 bool expanded = expand_by(min_word_size, preferred_word_size); 1324 if (expanded) { 1325 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1326 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1327 } 1328 1329 return next; 1330 } 1331 1332 void VirtualSpaceList::print_on(outputStream* st) const { 1333 if (TraceMetadataChunkAllocation && Verbose) { 1334 VirtualSpaceListIterator iter(virtual_space_list()); 1335 while (iter.repeat()) { 1336 VirtualSpaceNode* node = iter.get_next(); 1337 node->print_on(st); 1338 } 1339 } 1340 } 1341 1342 // MetaspaceGC methods 1343 1344 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1345 // Within the VM operation after the GC the attempt to allocate the metadata 1346 // should succeed. If the GC did not free enough space for the metaspace 1347 // allocation, the HWM is increased so that another virtualspace will be 1348 // allocated for the metadata. With perm gen the increase in the perm 1349 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1350 // metaspace policy uses those as the small and large steps for the HWM. 1351 // 1352 // After the GC the compute_new_size() for MetaspaceGC is called to 1353 // resize the capacity of the metaspaces. The current implementation 1354 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1355 // to resize the Java heap by some GC's. New flags can be implemented 1356 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1357 // free space is desirable in the metaspace capacity to decide how much 1358 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1359 // free space is desirable in the metaspace capacity before decreasing 1360 // the HWM. 1361 1362 // Calculate the amount to increase the high water mark (HWM). 1363 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1364 // another expansion is not requested too soon. If that is not 1365 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1366 // If that is still not enough, expand by the size of the allocation 1367 // plus some. 1368 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1369 size_t min_delta = MinMetaspaceExpansion; 1370 size_t max_delta = MaxMetaspaceExpansion; 1371 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1372 1373 if (delta <= min_delta) { 1374 delta = min_delta; 1375 } else if (delta <= max_delta) { 1376 // Don't want to hit the high water mark on the next 1377 // allocation so make the delta greater than just enough 1378 // for this allocation. 1379 delta = max_delta; 1380 } else { 1381 // This allocation is large but the next ones are probably not 1382 // so increase by the minimum. 1383 delta = delta + min_delta; 1384 } 1385 1386 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1387 1388 return delta; 1389 } 1390 1391 size_t MetaspaceGC::capacity_until_GC() { 1392 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1393 assert(value >= MetaspaceSize, "Not initialized properly?"); 1394 return value; 1395 } 1396 1397 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1398 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1399 1400 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1401 size_t new_value = capacity_until_GC + v; 1402 1403 if (new_value < capacity_until_GC) { 1404 // The addition wrapped around, set new_value to aligned max value. 1405 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1406 } 1407 1408 intptr_t expected = (intptr_t) capacity_until_GC; 1409 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1410 1411 if (expected != actual) { 1412 return false; 1413 } 1414 1415 if (new_cap_until_GC != NULL) { 1416 *new_cap_until_GC = new_value; 1417 } 1418 if (old_cap_until_GC != NULL) { 1419 *old_cap_until_GC = capacity_until_GC; 1420 } 1421 return true; 1422 } 1423 1424 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1425 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1426 1427 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1428 } 1429 1430 void MetaspaceGC::initialize() { 1431 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1432 // we can't do a GC during initialization. 1433 _capacity_until_GC = MaxMetaspaceSize; 1434 } 1435 1436 void MetaspaceGC::post_initialize() { 1437 // Reset the high-water mark once the VM initialization is done. 1438 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1439 } 1440 1441 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1442 // Check if the compressed class space is full. 1443 if (is_class && Metaspace::using_class_space()) { 1444 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1445 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1446 return false; 1447 } 1448 } 1449 1450 // Check if the user has imposed a limit on the metaspace memory. 1451 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1452 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1453 return false; 1454 } 1455 1456 return true; 1457 } 1458 1459 size_t MetaspaceGC::allowed_expansion() { 1460 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1461 size_t capacity_until_gc = capacity_until_GC(); 1462 1463 assert(capacity_until_gc >= committed_bytes, 1464 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1465 capacity_until_gc, committed_bytes)); 1466 1467 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1468 size_t left_until_GC = capacity_until_gc - committed_bytes; 1469 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1470 1471 return left_to_commit / BytesPerWord; 1472 } 1473 1474 void MetaspaceGC::compute_new_size() { 1475 assert(_shrink_factor <= 100, "invalid shrink factor"); 1476 uint current_shrink_factor = _shrink_factor; 1477 _shrink_factor = 0; 1478 1479 // Using committed_bytes() for used_after_gc is an overestimation, since the 1480 // chunk free lists are included in committed_bytes() and the memory in an 1481 // un-fragmented chunk free list is available for future allocations. 1482 // However, if the chunk free lists becomes fragmented, then the memory may 1483 // not be available for future allocations and the memory is therefore "in use". 1484 // Including the chunk free lists in the definition of "in use" is therefore 1485 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1486 // shrink below committed_bytes() and this has caused serious bugs in the past. 1487 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1488 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1489 1490 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1491 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1492 1493 const double min_tmp = used_after_gc / maximum_used_percentage; 1494 size_t minimum_desired_capacity = 1495 (size_t)MIN2(min_tmp, double(max_uintx)); 1496 // Don't shrink less than the initial generation size 1497 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1498 MetaspaceSize); 1499 1500 if (PrintGCDetails && Verbose) { 1501 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); 1502 gclog_or_tty->print_cr(" " 1503 " minimum_free_percentage: %6.2f" 1504 " maximum_used_percentage: %6.2f", 1505 minimum_free_percentage, 1506 maximum_used_percentage); 1507 gclog_or_tty->print_cr(" " 1508 " used_after_gc : %6.1fKB", 1509 used_after_gc / (double) K); 1510 } 1511 1512 1513 size_t shrink_bytes = 0; 1514 if (capacity_until_GC < minimum_desired_capacity) { 1515 // If we have less capacity below the metaspace HWM, then 1516 // increment the HWM. 1517 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1518 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1519 // Don't expand unless it's significant 1520 if (expand_bytes >= MinMetaspaceExpansion) { 1521 size_t new_capacity_until_GC = 0; 1522 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1523 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1524 1525 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1526 new_capacity_until_GC, 1527 MetaspaceGCThresholdUpdater::ComputeNewSize); 1528 if (PrintGCDetails && Verbose) { 1529 gclog_or_tty->print_cr(" expanding:" 1530 " minimum_desired_capacity: %6.1fKB" 1531 " expand_bytes: %6.1fKB" 1532 " MinMetaspaceExpansion: %6.1fKB" 1533 " new metaspace HWM: %6.1fKB", 1534 minimum_desired_capacity / (double) K, 1535 expand_bytes / (double) K, 1536 MinMetaspaceExpansion / (double) K, 1537 new_capacity_until_GC / (double) K); 1538 } 1539 } 1540 return; 1541 } 1542 1543 // No expansion, now see if we want to shrink 1544 // We would never want to shrink more than this 1545 assert(capacity_until_GC >= minimum_desired_capacity, 1546 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT, 1547 capacity_until_GC, minimum_desired_capacity)); 1548 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1549 1550 // Should shrinking be considered? 1551 if (MaxMetaspaceFreeRatio < 100) { 1552 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1553 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1554 const double max_tmp = used_after_gc / minimum_used_percentage; 1555 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1556 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1557 MetaspaceSize); 1558 if (PrintGCDetails && Verbose) { 1559 gclog_or_tty->print_cr(" " 1560 " maximum_free_percentage: %6.2f" 1561 " minimum_used_percentage: %6.2f", 1562 maximum_free_percentage, 1563 minimum_used_percentage); 1564 gclog_or_tty->print_cr(" " 1565 " minimum_desired_capacity: %6.1fKB" 1566 " maximum_desired_capacity: %6.1fKB", 1567 minimum_desired_capacity / (double) K, 1568 maximum_desired_capacity / (double) K); 1569 } 1570 1571 assert(minimum_desired_capacity <= maximum_desired_capacity, 1572 "sanity check"); 1573 1574 if (capacity_until_GC > maximum_desired_capacity) { 1575 // Capacity too large, compute shrinking size 1576 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1577 // We don't want shrink all the way back to initSize if people call 1578 // System.gc(), because some programs do that between "phases" and then 1579 // we'd just have to grow the heap up again for the next phase. So we 1580 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1581 // on the third call, and 100% by the fourth call. But if we recompute 1582 // size without shrinking, it goes back to 0%. 1583 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1584 1585 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1586 1587 assert(shrink_bytes <= max_shrink_bytes, 1588 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1589 shrink_bytes, max_shrink_bytes)); 1590 if (current_shrink_factor == 0) { 1591 _shrink_factor = 10; 1592 } else { 1593 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1594 } 1595 if (PrintGCDetails && Verbose) { 1596 gclog_or_tty->print_cr(" " 1597 " shrinking:" 1598 " initSize: %.1fK" 1599 " maximum_desired_capacity: %.1fK", 1600 MetaspaceSize / (double) K, 1601 maximum_desired_capacity / (double) K); 1602 gclog_or_tty->print_cr(" " 1603 " shrink_bytes: %.1fK" 1604 " current_shrink_factor: %d" 1605 " new shrink factor: %d" 1606 " MinMetaspaceExpansion: %.1fK", 1607 shrink_bytes / (double) K, 1608 current_shrink_factor, 1609 _shrink_factor, 1610 MinMetaspaceExpansion / (double) K); 1611 } 1612 } 1613 } 1614 1615 // Don't shrink unless it's significant 1616 if (shrink_bytes >= MinMetaspaceExpansion && 1617 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1618 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1619 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1620 new_capacity_until_GC, 1621 MetaspaceGCThresholdUpdater::ComputeNewSize); 1622 } 1623 } 1624 1625 // Metadebug methods 1626 1627 void Metadebug::init_allocation_fail_alot_count() { 1628 if (MetadataAllocationFailALot) { 1629 _allocation_fail_alot_count = 1630 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1631 } 1632 } 1633 1634 #ifdef ASSERT 1635 bool Metadebug::test_metadata_failure() { 1636 if (MetadataAllocationFailALot && 1637 Threads::is_vm_complete()) { 1638 if (_allocation_fail_alot_count > 0) { 1639 _allocation_fail_alot_count--; 1640 } else { 1641 if (TraceMetadataChunkAllocation && Verbose) { 1642 gclog_or_tty->print_cr("Metadata allocation failing for " 1643 "MetadataAllocationFailALot"); 1644 } 1645 init_allocation_fail_alot_count(); 1646 return true; 1647 } 1648 } 1649 return false; 1650 } 1651 #endif 1652 1653 // ChunkManager methods 1654 1655 size_t ChunkManager::free_chunks_total_words() { 1656 return _free_chunks_total; 1657 } 1658 1659 size_t ChunkManager::free_chunks_total_bytes() { 1660 return free_chunks_total_words() * BytesPerWord; 1661 } 1662 1663 size_t ChunkManager::free_chunks_count() { 1664 #ifdef ASSERT 1665 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1666 MutexLockerEx cl(SpaceManager::expand_lock(), 1667 Mutex::_no_safepoint_check_flag); 1668 // This lock is only needed in debug because the verification 1669 // of the _free_chunks_totals walks the list of free chunks 1670 slow_locked_verify_free_chunks_count(); 1671 } 1672 #endif 1673 return _free_chunks_count; 1674 } 1675 1676 void ChunkManager::locked_verify_free_chunks_total() { 1677 assert_lock_strong(SpaceManager::expand_lock()); 1678 assert(sum_free_chunks() == _free_chunks_total, 1679 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" 1680 " same as sum " SIZE_FORMAT, _free_chunks_total, 1681 sum_free_chunks())); 1682 } 1683 1684 void ChunkManager::verify_free_chunks_total() { 1685 MutexLockerEx cl(SpaceManager::expand_lock(), 1686 Mutex::_no_safepoint_check_flag); 1687 locked_verify_free_chunks_total(); 1688 } 1689 1690 void ChunkManager::locked_verify_free_chunks_count() { 1691 assert_lock_strong(SpaceManager::expand_lock()); 1692 assert(sum_free_chunks_count() == _free_chunks_count, 1693 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" 1694 " same as sum " SIZE_FORMAT, _free_chunks_count, 1695 sum_free_chunks_count())); 1696 } 1697 1698 void ChunkManager::verify_free_chunks_count() { 1699 #ifdef ASSERT 1700 MutexLockerEx cl(SpaceManager::expand_lock(), 1701 Mutex::_no_safepoint_check_flag); 1702 locked_verify_free_chunks_count(); 1703 #endif 1704 } 1705 1706 void ChunkManager::verify() { 1707 MutexLockerEx cl(SpaceManager::expand_lock(), 1708 Mutex::_no_safepoint_check_flag); 1709 locked_verify(); 1710 } 1711 1712 void ChunkManager::locked_verify() { 1713 locked_verify_free_chunks_count(); 1714 locked_verify_free_chunks_total(); 1715 } 1716 1717 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1718 assert_lock_strong(SpaceManager::expand_lock()); 1719 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1720 _free_chunks_total, _free_chunks_count); 1721 } 1722 1723 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1724 assert_lock_strong(SpaceManager::expand_lock()); 1725 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1726 sum_free_chunks(), sum_free_chunks_count()); 1727 } 1728 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1729 return &_free_chunks[index]; 1730 } 1731 1732 // These methods that sum the free chunk lists are used in printing 1733 // methods that are used in product builds. 1734 size_t ChunkManager::sum_free_chunks() { 1735 assert_lock_strong(SpaceManager::expand_lock()); 1736 size_t result = 0; 1737 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1738 ChunkList* list = free_chunks(i); 1739 1740 if (list == NULL) { 1741 continue; 1742 } 1743 1744 result = result + list->count() * list->size(); 1745 } 1746 result = result + humongous_dictionary()->total_size(); 1747 return result; 1748 } 1749 1750 size_t ChunkManager::sum_free_chunks_count() { 1751 assert_lock_strong(SpaceManager::expand_lock()); 1752 size_t count = 0; 1753 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1754 ChunkList* list = free_chunks(i); 1755 if (list == NULL) { 1756 continue; 1757 } 1758 count = count + list->count(); 1759 } 1760 count = count + humongous_dictionary()->total_free_blocks(); 1761 return count; 1762 } 1763 1764 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1765 ChunkIndex index = list_index(word_size); 1766 assert(index < HumongousIndex, "No humongous list"); 1767 return free_chunks(index); 1768 } 1769 1770 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1771 assert_lock_strong(SpaceManager::expand_lock()); 1772 1773 slow_locked_verify(); 1774 1775 Metachunk* chunk = NULL; 1776 if (list_index(word_size) != HumongousIndex) { 1777 ChunkList* free_list = find_free_chunks_list(word_size); 1778 assert(free_list != NULL, "Sanity check"); 1779 1780 chunk = free_list->head(); 1781 1782 if (chunk == NULL) { 1783 return NULL; 1784 } 1785 1786 // Remove the chunk as the head of the list. 1787 free_list->remove_chunk(chunk); 1788 1789 if (TraceMetadataChunkAllocation && Verbose) { 1790 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1791 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1792 p2i(free_list), p2i(chunk), chunk->word_size()); 1793 } 1794 } else { 1795 chunk = humongous_dictionary()->get_chunk( 1796 word_size, 1797 FreeBlockDictionary<Metachunk>::atLeast); 1798 1799 if (chunk == NULL) { 1800 return NULL; 1801 } 1802 1803 if (TraceMetadataHumongousAllocation) { 1804 size_t waste = chunk->word_size() - word_size; 1805 gclog_or_tty->print_cr("Free list allocate humongous chunk size " 1806 SIZE_FORMAT " for requested size " SIZE_FORMAT 1807 " waste " SIZE_FORMAT, 1808 chunk->word_size(), word_size, waste); 1809 } 1810 } 1811 1812 // Chunk is being removed from the chunks free list. 1813 dec_free_chunks_total(chunk->word_size()); 1814 1815 // Remove it from the links to this freelist 1816 chunk->set_next(NULL); 1817 chunk->set_prev(NULL); 1818 #ifdef ASSERT 1819 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1820 // work. 1821 chunk->set_is_tagged_free(false); 1822 #endif 1823 chunk->container()->inc_container_count(); 1824 1825 slow_locked_verify(); 1826 return chunk; 1827 } 1828 1829 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1830 assert_lock_strong(SpaceManager::expand_lock()); 1831 slow_locked_verify(); 1832 1833 // Take from the beginning of the list 1834 Metachunk* chunk = free_chunks_get(word_size); 1835 if (chunk == NULL) { 1836 return NULL; 1837 } 1838 1839 assert((word_size <= chunk->word_size()) || 1840 list_index(chunk->word_size() == HumongousIndex), 1841 "Non-humongous variable sized chunk"); 1842 if (TraceMetadataChunkAllocation) { 1843 size_t list_count; 1844 if (list_index(word_size) < HumongousIndex) { 1845 ChunkList* list = find_free_chunks_list(word_size); 1846 list_count = list->count(); 1847 } else { 1848 list_count = humongous_dictionary()->total_count(); 1849 } 1850 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1851 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1852 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1853 locked_print_free_chunks(gclog_or_tty); 1854 } 1855 1856 return chunk; 1857 } 1858 1859 void ChunkManager::print_on(outputStream* out) const { 1860 if (PrintFLSStatistics != 0) { 1861 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); 1862 } 1863 } 1864 1865 // SpaceManager methods 1866 1867 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1868 size_t* chunk_word_size, 1869 size_t* class_chunk_word_size) { 1870 switch (type) { 1871 case Metaspace::BootMetaspaceType: 1872 *chunk_word_size = Metaspace::first_chunk_word_size(); 1873 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1874 break; 1875 case Metaspace::ROMetaspaceType: 1876 *chunk_word_size = SharedReadOnlySize / wordSize; 1877 *class_chunk_word_size = ClassSpecializedChunk; 1878 break; 1879 case Metaspace::ReadWriteMetaspaceType: 1880 *chunk_word_size = SharedReadWriteSize / wordSize; 1881 *class_chunk_word_size = ClassSpecializedChunk; 1882 break; 1883 case Metaspace::AnonymousMetaspaceType: 1884 case Metaspace::ReflectionMetaspaceType: 1885 *chunk_word_size = SpecializedChunk; 1886 *class_chunk_word_size = ClassSpecializedChunk; 1887 break; 1888 default: 1889 *chunk_word_size = SmallChunk; 1890 *class_chunk_word_size = ClassSmallChunk; 1891 break; 1892 } 1893 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1894 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT 1895 " class " SIZE_FORMAT, 1896 *chunk_word_size, *class_chunk_word_size)); 1897 } 1898 1899 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1900 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1901 size_t free = 0; 1902 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1903 Metachunk* chunk = chunks_in_use(i); 1904 while (chunk != NULL) { 1905 free += chunk->free_word_size(); 1906 chunk = chunk->next(); 1907 } 1908 } 1909 return free; 1910 } 1911 1912 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1913 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1914 size_t result = 0; 1915 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1916 result += sum_waste_in_chunks_in_use(i); 1917 } 1918 1919 return result; 1920 } 1921 1922 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1923 size_t result = 0; 1924 Metachunk* chunk = chunks_in_use(index); 1925 // Count the free space in all the chunk but not the 1926 // current chunk from which allocations are still being done. 1927 while (chunk != NULL) { 1928 if (chunk != current_chunk()) { 1929 result += chunk->free_word_size(); 1930 } 1931 chunk = chunk->next(); 1932 } 1933 return result; 1934 } 1935 1936 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1937 // For CMS use "allocated_chunks_words()" which does not need the 1938 // Metaspace lock. For the other collectors sum over the 1939 // lists. Use both methods as a check that "allocated_chunks_words()" 1940 // is correct. That is, sum_capacity_in_chunks() is too expensive 1941 // to use in the product and allocated_chunks_words() should be used 1942 // but allow for checking that allocated_chunks_words() returns the same 1943 // value as sum_capacity_in_chunks_in_use() which is the definitive 1944 // answer. 1945 if (UseConcMarkSweepGC) { 1946 return allocated_chunks_words(); 1947 } else { 1948 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1949 size_t sum = 0; 1950 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1951 Metachunk* chunk = chunks_in_use(i); 1952 while (chunk != NULL) { 1953 sum += chunk->word_size(); 1954 chunk = chunk->next(); 1955 } 1956 } 1957 return sum; 1958 } 1959 } 1960 1961 size_t SpaceManager::sum_count_in_chunks_in_use() { 1962 size_t count = 0; 1963 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1964 count = count + sum_count_in_chunks_in_use(i); 1965 } 1966 1967 return count; 1968 } 1969 1970 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 1971 size_t count = 0; 1972 Metachunk* chunk = chunks_in_use(i); 1973 while (chunk != NULL) { 1974 count++; 1975 chunk = chunk->next(); 1976 } 1977 return count; 1978 } 1979 1980 1981 size_t SpaceManager::sum_used_in_chunks_in_use() const { 1982 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1983 size_t used = 0; 1984 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1985 Metachunk* chunk = chunks_in_use(i); 1986 while (chunk != NULL) { 1987 used += chunk->used_word_size(); 1988 chunk = chunk->next(); 1989 } 1990 } 1991 return used; 1992 } 1993 1994 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 1995 1996 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1997 Metachunk* chunk = chunks_in_use(i); 1998 st->print("SpaceManager: %s " PTR_FORMAT, 1999 chunk_size_name(i), p2i(chunk)); 2000 if (chunk != NULL) { 2001 st->print_cr(" free " SIZE_FORMAT, 2002 chunk->free_word_size()); 2003 } else { 2004 st->cr(); 2005 } 2006 } 2007 2008 chunk_manager()->locked_print_free_chunks(st); 2009 chunk_manager()->locked_print_sum_free_chunks(st); 2010 } 2011 2012 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2013 2014 // Decide between a small chunk and a medium chunk. Up to 2015 // _small_chunk_limit small chunks can be allocated but 2016 // once a medium chunk has been allocated, no more small 2017 // chunks will be allocated. 2018 size_t chunk_word_size; 2019 if (chunks_in_use(MediumIndex) == NULL && 2020 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2021 chunk_word_size = (size_t) small_chunk_size(); 2022 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2023 chunk_word_size = medium_chunk_size(); 2024 } 2025 } else { 2026 chunk_word_size = medium_chunk_size(); 2027 } 2028 2029 // Might still need a humongous chunk. Enforce 2030 // humongous allocations sizes to be aligned up to 2031 // the smallest chunk size. 2032 size_t if_humongous_sized_chunk = 2033 align_size_up(word_size + Metachunk::overhead(), 2034 smallest_chunk_size()); 2035 chunk_word_size = 2036 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2037 2038 assert(!SpaceManager::is_humongous(word_size) || 2039 chunk_word_size == if_humongous_sized_chunk, 2040 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT 2041 " chunk_word_size " SIZE_FORMAT, 2042 word_size, chunk_word_size)); 2043 if (TraceMetadataHumongousAllocation && 2044 SpaceManager::is_humongous(word_size)) { 2045 gclog_or_tty->print_cr("Metadata humongous allocation:"); 2046 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); 2047 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, 2048 chunk_word_size); 2049 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, 2050 Metachunk::overhead()); 2051 } 2052 return chunk_word_size; 2053 } 2054 2055 void SpaceManager::track_metaspace_memory_usage() { 2056 if (is_init_completed()) { 2057 if (is_class()) { 2058 MemoryService::track_compressed_class_memory_usage(); 2059 } 2060 MemoryService::track_metaspace_memory_usage(); 2061 } 2062 } 2063 2064 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2065 assert(vs_list()->current_virtual_space() != NULL, 2066 "Should have been set"); 2067 assert(current_chunk() == NULL || 2068 current_chunk()->allocate(word_size) == NULL, 2069 "Don't need to expand"); 2070 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2071 2072 if (TraceMetadataChunkAllocation && Verbose) { 2073 size_t words_left = 0; 2074 size_t words_used = 0; 2075 if (current_chunk() != NULL) { 2076 words_left = current_chunk()->free_word_size(); 2077 words_used = current_chunk()->used_word_size(); 2078 } 2079 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT 2080 " words " SIZE_FORMAT " words used " SIZE_FORMAT 2081 " words left", 2082 word_size, words_used, words_left); 2083 } 2084 2085 // Get another chunk out of the virtual space 2086 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2087 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2088 2089 MetaWord* mem = NULL; 2090 2091 // If a chunk was available, add it to the in-use chunk list 2092 // and do an allocation from it. 2093 if (next != NULL) { 2094 // Add to this manager's list of chunks in use. 2095 add_chunk(next, false); 2096 mem = next->allocate(word_size); 2097 } 2098 2099 // Track metaspace memory usage statistic. 2100 track_metaspace_memory_usage(); 2101 2102 return mem; 2103 } 2104 2105 void SpaceManager::print_on(outputStream* st) const { 2106 2107 for (ChunkIndex i = ZeroIndex; 2108 i < NumberOfInUseLists ; 2109 i = next_chunk_index(i) ) { 2110 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2111 p2i(chunks_in_use(i)), 2112 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2113 } 2114 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2115 " Humongous " SIZE_FORMAT, 2116 sum_waste_in_chunks_in_use(SmallIndex), 2117 sum_waste_in_chunks_in_use(MediumIndex), 2118 sum_waste_in_chunks_in_use(HumongousIndex)); 2119 // block free lists 2120 if (block_freelists() != NULL) { 2121 st->print_cr("total in block free lists " SIZE_FORMAT, 2122 block_freelists()->total_size()); 2123 } 2124 } 2125 2126 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2127 Mutex* lock) : 2128 _mdtype(mdtype), 2129 _allocated_blocks_words(0), 2130 _allocated_chunks_words(0), 2131 _allocated_chunks_count(0), 2132 _lock(lock) 2133 { 2134 initialize(); 2135 } 2136 2137 void SpaceManager::inc_size_metrics(size_t words) { 2138 assert_lock_strong(SpaceManager::expand_lock()); 2139 // Total of allocated Metachunks and allocated Metachunks count 2140 // for each SpaceManager 2141 _allocated_chunks_words = _allocated_chunks_words + words; 2142 _allocated_chunks_count++; 2143 // Global total of capacity in allocated Metachunks 2144 MetaspaceAux::inc_capacity(mdtype(), words); 2145 // Global total of allocated Metablocks. 2146 // used_words_slow() includes the overhead in each 2147 // Metachunk so include it in the used when the 2148 // Metachunk is first added (so only added once per 2149 // Metachunk). 2150 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2151 } 2152 2153 void SpaceManager::inc_used_metrics(size_t words) { 2154 // Add to the per SpaceManager total 2155 Atomic::add_ptr(words, &_allocated_blocks_words); 2156 // Add to the global total 2157 MetaspaceAux::inc_used(mdtype(), words); 2158 } 2159 2160 void SpaceManager::dec_total_from_size_metrics() { 2161 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2162 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2163 // Also deduct the overhead per Metachunk 2164 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2165 } 2166 2167 void SpaceManager::initialize() { 2168 Metadebug::init_allocation_fail_alot_count(); 2169 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2170 _chunks_in_use[i] = NULL; 2171 } 2172 _current_chunk = NULL; 2173 if (TraceMetadataChunkAllocation && Verbose) { 2174 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this)); 2175 } 2176 } 2177 2178 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2179 if (chunks == NULL) { 2180 return; 2181 } 2182 ChunkList* list = free_chunks(index); 2183 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2184 assert_lock_strong(SpaceManager::expand_lock()); 2185 Metachunk* cur = chunks; 2186 2187 // This returns chunks one at a time. If a new 2188 // class List can be created that is a base class 2189 // of FreeList then something like FreeList::prepend() 2190 // can be used in place of this loop 2191 while (cur != NULL) { 2192 assert(cur->container() != NULL, "Container should have been set"); 2193 cur->container()->dec_container_count(); 2194 // Capture the next link before it is changed 2195 // by the call to return_chunk_at_head(); 2196 Metachunk* next = cur->next(); 2197 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2198 list->return_chunk_at_head(cur); 2199 cur = next; 2200 } 2201 } 2202 2203 SpaceManager::~SpaceManager() { 2204 // This call this->_lock which can't be done while holding expand_lock() 2205 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2206 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2207 " allocated_chunks_words() " SIZE_FORMAT, 2208 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2209 2210 MutexLockerEx fcl(SpaceManager::expand_lock(), 2211 Mutex::_no_safepoint_check_flag); 2212 2213 chunk_manager()->slow_locked_verify(); 2214 2215 dec_total_from_size_metrics(); 2216 2217 if (TraceMetadataChunkAllocation && Verbose) { 2218 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2219 locked_print_chunks_in_use_on(gclog_or_tty); 2220 } 2221 2222 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2223 // is during the freeing of a VirtualSpaceNodes. 2224 2225 // Have to update before the chunks_in_use lists are emptied 2226 // below. 2227 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2228 sum_count_in_chunks_in_use()); 2229 2230 // Add all the chunks in use by this space manager 2231 // to the global list of free chunks. 2232 2233 // Follow each list of chunks-in-use and add them to the 2234 // free lists. Each list is NULL terminated. 2235 2236 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2237 if (TraceMetadataChunkAllocation && Verbose) { 2238 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist", 2239 sum_count_in_chunks_in_use(i), 2240 chunk_size_name(i)); 2241 } 2242 Metachunk* chunks = chunks_in_use(i); 2243 chunk_manager()->return_chunks(i, chunks); 2244 set_chunks_in_use(i, NULL); 2245 if (TraceMetadataChunkAllocation && Verbose) { 2246 gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s", 2247 chunk_manager()->free_chunks(i)->count(), 2248 chunk_size_name(i)); 2249 } 2250 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2251 } 2252 2253 // The medium chunk case may be optimized by passing the head and 2254 // tail of the medium chunk list to add_at_head(). The tail is often 2255 // the current chunk but there are probably exceptions. 2256 2257 // Humongous chunks 2258 if (TraceMetadataChunkAllocation && Verbose) { 2259 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary", 2260 sum_count_in_chunks_in_use(HumongousIndex), 2261 chunk_size_name(HumongousIndex)); 2262 gclog_or_tty->print("Humongous chunk dictionary: "); 2263 } 2264 // Humongous chunks are never the current chunk. 2265 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2266 2267 while (humongous_chunks != NULL) { 2268 #ifdef ASSERT 2269 humongous_chunks->set_is_tagged_free(true); 2270 #endif 2271 if (TraceMetadataChunkAllocation && Verbose) { 2272 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2273 p2i(humongous_chunks), 2274 humongous_chunks->word_size()); 2275 } 2276 assert(humongous_chunks->word_size() == (size_t) 2277 align_size_up(humongous_chunks->word_size(), 2278 smallest_chunk_size()), 2279 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2280 " granularity " SIZE_FORMAT, 2281 humongous_chunks->word_size(), smallest_chunk_size())); 2282 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2283 humongous_chunks->container()->dec_container_count(); 2284 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2285 humongous_chunks = next_humongous_chunks; 2286 } 2287 if (TraceMetadataChunkAllocation && Verbose) { 2288 gclog_or_tty->cr(); 2289 gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s", 2290 chunk_manager()->humongous_dictionary()->total_count(), 2291 chunk_size_name(HumongousIndex)); 2292 } 2293 chunk_manager()->slow_locked_verify(); 2294 } 2295 2296 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2297 switch (index) { 2298 case SpecializedIndex: 2299 return "Specialized"; 2300 case SmallIndex: 2301 return "Small"; 2302 case MediumIndex: 2303 return "Medium"; 2304 case HumongousIndex: 2305 return "Humongous"; 2306 default: 2307 return NULL; 2308 } 2309 } 2310 2311 ChunkIndex ChunkManager::list_index(size_t size) { 2312 switch (size) { 2313 case SpecializedChunk: 2314 assert(SpecializedChunk == ClassSpecializedChunk, 2315 "Need branch for ClassSpecializedChunk"); 2316 return SpecializedIndex; 2317 case SmallChunk: 2318 case ClassSmallChunk: 2319 return SmallIndex; 2320 case MediumChunk: 2321 case ClassMediumChunk: 2322 return MediumIndex; 2323 default: 2324 assert(size > MediumChunk || size > ClassMediumChunk, 2325 "Not a humongous chunk"); 2326 return HumongousIndex; 2327 } 2328 } 2329 2330 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2331 assert_lock_strong(_lock); 2332 size_t raw_word_size = get_raw_word_size(word_size); 2333 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2334 assert(raw_word_size >= min_size, 2335 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 2336 block_freelists()->return_block(p, raw_word_size); 2337 } 2338 2339 // Adds a chunk to the list of chunks in use. 2340 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2341 2342 assert(new_chunk != NULL, "Should not be NULL"); 2343 assert(new_chunk->next() == NULL, "Should not be on a list"); 2344 2345 new_chunk->reset_empty(); 2346 2347 // Find the correct list and and set the current 2348 // chunk for that list. 2349 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2350 2351 if (index != HumongousIndex) { 2352 retire_current_chunk(); 2353 set_current_chunk(new_chunk); 2354 new_chunk->set_next(chunks_in_use(index)); 2355 set_chunks_in_use(index, new_chunk); 2356 } else { 2357 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2358 // small, so small will be null. Link this first chunk as the current 2359 // chunk. 2360 if (make_current) { 2361 // Set as the current chunk but otherwise treat as a humongous chunk. 2362 set_current_chunk(new_chunk); 2363 } 2364 // Link at head. The _current_chunk only points to a humongous chunk for 2365 // the null class loader metaspace (class and data virtual space managers) 2366 // any humongous chunks so will not point to the tail 2367 // of the humongous chunks list. 2368 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2369 set_chunks_in_use(HumongousIndex, new_chunk); 2370 2371 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2372 } 2373 2374 // Add to the running sum of capacity 2375 inc_size_metrics(new_chunk->word_size()); 2376 2377 assert(new_chunk->is_empty(), "Not ready for reuse"); 2378 if (TraceMetadataChunkAllocation && Verbose) { 2379 gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ", 2380 sum_count_in_chunks_in_use()); 2381 new_chunk->print_on(gclog_or_tty); 2382 chunk_manager()->locked_print_free_chunks(gclog_or_tty); 2383 } 2384 } 2385 2386 void SpaceManager::retire_current_chunk() { 2387 if (current_chunk() != NULL) { 2388 size_t remaining_words = current_chunk()->free_word_size(); 2389 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2390 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2391 inc_used_metrics(remaining_words); 2392 } 2393 } 2394 } 2395 2396 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2397 size_t grow_chunks_by_words) { 2398 // Get a chunk from the chunk freelist 2399 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2400 2401 if (next == NULL) { 2402 next = vs_list()->get_new_chunk(word_size, 2403 grow_chunks_by_words, 2404 medium_chunk_bunch()); 2405 } 2406 2407 if (TraceMetadataHumongousAllocation && next != NULL && 2408 SpaceManager::is_humongous(next->word_size())) { 2409 gclog_or_tty->print_cr(" new humongous chunk word size " 2410 PTR_FORMAT, next->word_size()); 2411 } 2412 2413 return next; 2414 } 2415 2416 MetaWord* SpaceManager::allocate(size_t word_size) { 2417 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2418 2419 size_t raw_word_size = get_raw_word_size(word_size); 2420 BlockFreelist* fl = block_freelists(); 2421 MetaWord* p = NULL; 2422 // Allocation from the dictionary is expensive in the sense that 2423 // the dictionary has to be searched for a size. Don't allocate 2424 // from the dictionary until it starts to get fat. Is this 2425 // a reasonable policy? Maybe an skinny dictionary is fast enough 2426 // for allocations. Do some profiling. JJJ 2427 if (fl->total_size() > allocation_from_dictionary_limit) { 2428 p = fl->get_block(raw_word_size); 2429 } 2430 if (p == NULL) { 2431 p = allocate_work(raw_word_size); 2432 } 2433 2434 return p; 2435 } 2436 2437 // Returns the address of spaced allocated for "word_size". 2438 // This methods does not know about blocks (Metablocks) 2439 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2440 assert_lock_strong(_lock); 2441 #ifdef ASSERT 2442 if (Metadebug::test_metadata_failure()) { 2443 return NULL; 2444 } 2445 #endif 2446 // Is there space in the current chunk? 2447 MetaWord* result = NULL; 2448 2449 // For DumpSharedSpaces, only allocate out of the current chunk which is 2450 // never null because we gave it the size we wanted. Caller reports out 2451 // of memory if this returns null. 2452 if (DumpSharedSpaces) { 2453 assert(current_chunk() != NULL, "should never happen"); 2454 inc_used_metrics(word_size); 2455 return current_chunk()->allocate(word_size); // caller handles null result 2456 } 2457 2458 if (current_chunk() != NULL) { 2459 result = current_chunk()->allocate(word_size); 2460 } 2461 2462 if (result == NULL) { 2463 result = grow_and_allocate(word_size); 2464 } 2465 2466 if (result != NULL) { 2467 inc_used_metrics(word_size); 2468 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2469 "Head of the list is being allocated"); 2470 } 2471 2472 return result; 2473 } 2474 2475 void SpaceManager::verify() { 2476 // If there are blocks in the dictionary, then 2477 // verification of chunks does not work since 2478 // being in the dictionary alters a chunk. 2479 if (block_freelists()->total_size() == 0) { 2480 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2481 Metachunk* curr = chunks_in_use(i); 2482 while (curr != NULL) { 2483 curr->verify(); 2484 verify_chunk_size(curr); 2485 curr = curr->next(); 2486 } 2487 } 2488 } 2489 } 2490 2491 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2492 assert(is_humongous(chunk->word_size()) || 2493 chunk->word_size() == medium_chunk_size() || 2494 chunk->word_size() == small_chunk_size() || 2495 chunk->word_size() == specialized_chunk_size(), 2496 "Chunk size is wrong"); 2497 return; 2498 } 2499 2500 #ifdef ASSERT 2501 void SpaceManager::verify_allocated_blocks_words() { 2502 // Verification is only guaranteed at a safepoint. 2503 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2504 "Verification can fail if the applications is running"); 2505 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2506 err_msg("allocation total is not consistent " SIZE_FORMAT 2507 " vs " SIZE_FORMAT, 2508 allocated_blocks_words(), sum_used_in_chunks_in_use())); 2509 } 2510 2511 #endif 2512 2513 void SpaceManager::dump(outputStream* const out) const { 2514 size_t curr_total = 0; 2515 size_t waste = 0; 2516 uint i = 0; 2517 size_t used = 0; 2518 size_t capacity = 0; 2519 2520 // Add up statistics for all chunks in this SpaceManager. 2521 for (ChunkIndex index = ZeroIndex; 2522 index < NumberOfInUseLists; 2523 index = next_chunk_index(index)) { 2524 for (Metachunk* curr = chunks_in_use(index); 2525 curr != NULL; 2526 curr = curr->next()) { 2527 out->print("%d) ", i++); 2528 curr->print_on(out); 2529 curr_total += curr->word_size(); 2530 used += curr->used_word_size(); 2531 capacity += curr->word_size(); 2532 waste += curr->free_word_size() + curr->overhead();; 2533 } 2534 } 2535 2536 if (TraceMetadataChunkAllocation && Verbose) { 2537 block_freelists()->print_on(out); 2538 } 2539 2540 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2541 // Free space isn't wasted. 2542 waste -= free; 2543 2544 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2545 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2546 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2547 } 2548 2549 #ifndef PRODUCT 2550 void SpaceManager::mangle_freed_chunks() { 2551 for (ChunkIndex index = ZeroIndex; 2552 index < NumberOfInUseLists; 2553 index = next_chunk_index(index)) { 2554 for (Metachunk* curr = chunks_in_use(index); 2555 curr != NULL; 2556 curr = curr->next()) { 2557 curr->mangle(); 2558 } 2559 } 2560 } 2561 #endif // PRODUCT 2562 2563 // MetaspaceAux 2564 2565 2566 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2567 size_t MetaspaceAux::_used_words[] = {0, 0}; 2568 2569 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2570 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2571 return list == NULL ? 0 : list->free_bytes(); 2572 } 2573 2574 size_t MetaspaceAux::free_bytes() { 2575 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2576 } 2577 2578 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2579 assert_lock_strong(SpaceManager::expand_lock()); 2580 assert(words <= capacity_words(mdtype), 2581 err_msg("About to decrement below 0: words " SIZE_FORMAT 2582 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2583 words, mdtype, capacity_words(mdtype))); 2584 _capacity_words[mdtype] -= words; 2585 } 2586 2587 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2588 assert_lock_strong(SpaceManager::expand_lock()); 2589 // Needs to be atomic 2590 _capacity_words[mdtype] += words; 2591 } 2592 2593 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2594 assert(words <= used_words(mdtype), 2595 err_msg("About to decrement below 0: words " SIZE_FORMAT 2596 " is greater than _used_words[%u] " SIZE_FORMAT, 2597 words, mdtype, used_words(mdtype))); 2598 // For CMS deallocation of the Metaspaces occurs during the 2599 // sweep which is a concurrent phase. Protection by the expand_lock() 2600 // is not enough since allocation is on a per Metaspace basis 2601 // and protected by the Metaspace lock. 2602 jlong minus_words = (jlong) - (jlong) words; 2603 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2604 } 2605 2606 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2607 // _used_words tracks allocations for 2608 // each piece of metadata. Those allocations are 2609 // generally done concurrently by different application 2610 // threads so must be done atomically. 2611 Atomic::add_ptr(words, &_used_words[mdtype]); 2612 } 2613 2614 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2615 size_t used = 0; 2616 ClassLoaderDataGraphMetaspaceIterator iter; 2617 while (iter.repeat()) { 2618 Metaspace* msp = iter.get_next(); 2619 // Sum allocated_blocks_words for each metaspace 2620 if (msp != NULL) { 2621 used += msp->used_words_slow(mdtype); 2622 } 2623 } 2624 return used * BytesPerWord; 2625 } 2626 2627 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2628 size_t free = 0; 2629 ClassLoaderDataGraphMetaspaceIterator iter; 2630 while (iter.repeat()) { 2631 Metaspace* msp = iter.get_next(); 2632 if (msp != NULL) { 2633 free += msp->free_words_slow(mdtype); 2634 } 2635 } 2636 return free * BytesPerWord; 2637 } 2638 2639 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2640 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2641 return 0; 2642 } 2643 // Don't count the space in the freelists. That space will be 2644 // added to the capacity calculation as needed. 2645 size_t capacity = 0; 2646 ClassLoaderDataGraphMetaspaceIterator iter; 2647 while (iter.repeat()) { 2648 Metaspace* msp = iter.get_next(); 2649 if (msp != NULL) { 2650 capacity += msp->capacity_words_slow(mdtype); 2651 } 2652 } 2653 return capacity * BytesPerWord; 2654 } 2655 2656 size_t MetaspaceAux::capacity_bytes_slow() { 2657 #ifdef PRODUCT 2658 // Use capacity_bytes() in PRODUCT instead of this function. 2659 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2660 #endif 2661 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2662 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2663 assert(capacity_bytes() == class_capacity + non_class_capacity, 2664 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT 2665 " class_capacity + non_class_capacity " SIZE_FORMAT 2666 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2667 capacity_bytes(), class_capacity + non_class_capacity, 2668 class_capacity, non_class_capacity)); 2669 2670 return class_capacity + non_class_capacity; 2671 } 2672 2673 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2674 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2675 return list == NULL ? 0 : list->reserved_bytes(); 2676 } 2677 2678 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2679 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2680 return list == NULL ? 0 : list->committed_bytes(); 2681 } 2682 2683 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2684 2685 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2686 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2687 if (chunk_manager == NULL) { 2688 return 0; 2689 } 2690 chunk_manager->slow_verify(); 2691 return chunk_manager->free_chunks_total_words(); 2692 } 2693 2694 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2695 return free_chunks_total_words(mdtype) * BytesPerWord; 2696 } 2697 2698 size_t MetaspaceAux::free_chunks_total_words() { 2699 return free_chunks_total_words(Metaspace::ClassType) + 2700 free_chunks_total_words(Metaspace::NonClassType); 2701 } 2702 2703 size_t MetaspaceAux::free_chunks_total_bytes() { 2704 return free_chunks_total_words() * BytesPerWord; 2705 } 2706 2707 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2708 return Metaspace::get_chunk_manager(mdtype) != NULL; 2709 } 2710 2711 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2712 if (!has_chunk_free_list(mdtype)) { 2713 return MetaspaceChunkFreeListSummary(); 2714 } 2715 2716 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2717 return cm->chunk_free_list_summary(); 2718 } 2719 2720 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2721 gclog_or_tty->print(", [Metaspace:"); 2722 if (PrintGCDetails && Verbose) { 2723 gclog_or_tty->print(" " SIZE_FORMAT 2724 "->" SIZE_FORMAT 2725 "(" SIZE_FORMAT ")", 2726 prev_metadata_used, 2727 used_bytes(), 2728 reserved_bytes()); 2729 } else { 2730 gclog_or_tty->print(" " SIZE_FORMAT "K" 2731 "->" SIZE_FORMAT "K" 2732 "(" SIZE_FORMAT "K)", 2733 prev_metadata_used/K, 2734 used_bytes()/K, 2735 reserved_bytes()/K); 2736 } 2737 2738 gclog_or_tty->print("]"); 2739 } 2740 2741 // This is printed when PrintGCDetails 2742 void MetaspaceAux::print_on(outputStream* out) { 2743 Metaspace::MetadataType nct = Metaspace::NonClassType; 2744 2745 out->print_cr(" Metaspace " 2746 "used " SIZE_FORMAT "K, " 2747 "capacity " SIZE_FORMAT "K, " 2748 "committed " SIZE_FORMAT "K, " 2749 "reserved " SIZE_FORMAT "K", 2750 used_bytes()/K, 2751 capacity_bytes()/K, 2752 committed_bytes()/K, 2753 reserved_bytes()/K); 2754 2755 if (Metaspace::using_class_space()) { 2756 Metaspace::MetadataType ct = Metaspace::ClassType; 2757 out->print_cr(" class space " 2758 "used " SIZE_FORMAT "K, " 2759 "capacity " SIZE_FORMAT "K, " 2760 "committed " SIZE_FORMAT "K, " 2761 "reserved " SIZE_FORMAT "K", 2762 used_bytes(ct)/K, 2763 capacity_bytes(ct)/K, 2764 committed_bytes(ct)/K, 2765 reserved_bytes(ct)/K); 2766 } 2767 } 2768 2769 // Print information for class space and data space separately. 2770 // This is almost the same as above. 2771 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2772 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2773 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2774 size_t used_bytes = used_bytes_slow(mdtype); 2775 size_t free_bytes = free_bytes_slow(mdtype); 2776 size_t used_and_free = used_bytes + free_bytes + 2777 free_chunks_capacity_bytes; 2778 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2779 "K + unused in chunks " SIZE_FORMAT "K + " 2780 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2781 "K capacity in allocated chunks " SIZE_FORMAT "K", 2782 used_bytes / K, 2783 free_bytes / K, 2784 free_chunks_capacity_bytes / K, 2785 used_and_free / K, 2786 capacity_bytes / K); 2787 // Accounting can only be correct if we got the values during a safepoint 2788 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2789 } 2790 2791 // Print total fragmentation for class metaspaces 2792 void MetaspaceAux::print_class_waste(outputStream* out) { 2793 assert(Metaspace::using_class_space(), "class metaspace not used"); 2794 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2795 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2796 ClassLoaderDataGraphMetaspaceIterator iter; 2797 while (iter.repeat()) { 2798 Metaspace* msp = iter.get_next(); 2799 if (msp != NULL) { 2800 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2801 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2802 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2803 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2804 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2805 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2806 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2807 } 2808 } 2809 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2810 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2811 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2812 "large count " SIZE_FORMAT, 2813 cls_specialized_count, cls_specialized_waste, 2814 cls_small_count, cls_small_waste, 2815 cls_medium_count, cls_medium_waste, cls_humongous_count); 2816 } 2817 2818 // Print total fragmentation for data and class metaspaces separately 2819 void MetaspaceAux::print_waste(outputStream* out) { 2820 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2821 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2822 2823 ClassLoaderDataGraphMetaspaceIterator iter; 2824 while (iter.repeat()) { 2825 Metaspace* msp = iter.get_next(); 2826 if (msp != NULL) { 2827 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2828 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2829 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2830 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2831 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2832 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2833 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2834 } 2835 } 2836 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2837 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2838 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2839 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2840 "large count " SIZE_FORMAT, 2841 specialized_count, specialized_waste, small_count, 2842 small_waste, medium_count, medium_waste, humongous_count); 2843 if (Metaspace::using_class_space()) { 2844 print_class_waste(out); 2845 } 2846 } 2847 2848 // Dump global metaspace things from the end of ClassLoaderDataGraph 2849 void MetaspaceAux::dump(outputStream* out) { 2850 out->print_cr("All Metaspace:"); 2851 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2852 out->print("class space: "); print_on(out, Metaspace::ClassType); 2853 print_waste(out); 2854 } 2855 2856 void MetaspaceAux::verify_free_chunks() { 2857 Metaspace::chunk_manager_metadata()->verify(); 2858 if (Metaspace::using_class_space()) { 2859 Metaspace::chunk_manager_class()->verify(); 2860 } 2861 } 2862 2863 void MetaspaceAux::verify_capacity() { 2864 #ifdef ASSERT 2865 size_t running_sum_capacity_bytes = capacity_bytes(); 2866 // For purposes of the running sum of capacity, verify against capacity 2867 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2868 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2869 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT 2870 " capacity_bytes_slow()" SIZE_FORMAT, 2871 running_sum_capacity_bytes, capacity_in_use_bytes)); 2872 for (Metaspace::MetadataType i = Metaspace::ClassType; 2873 i < Metaspace:: MetadataTypeCount; 2874 i = (Metaspace::MetadataType)(i + 1)) { 2875 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2876 assert(capacity_bytes(i) == capacity_in_use_bytes, 2877 err_msg("capacity_bytes(%u) " SIZE_FORMAT 2878 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2879 i, capacity_bytes(i), i, capacity_in_use_bytes)); 2880 } 2881 #endif 2882 } 2883 2884 void MetaspaceAux::verify_used() { 2885 #ifdef ASSERT 2886 size_t running_sum_used_bytes = used_bytes(); 2887 // For purposes of the running sum of used, verify against used 2888 size_t used_in_use_bytes = used_bytes_slow(); 2889 assert(used_bytes() == used_in_use_bytes, 2890 err_msg("used_bytes() " SIZE_FORMAT 2891 " used_bytes_slow()" SIZE_FORMAT, 2892 used_bytes(), used_in_use_bytes)); 2893 for (Metaspace::MetadataType i = Metaspace::ClassType; 2894 i < Metaspace:: MetadataTypeCount; 2895 i = (Metaspace::MetadataType)(i + 1)) { 2896 size_t used_in_use_bytes = used_bytes_slow(i); 2897 assert(used_bytes(i) == used_in_use_bytes, 2898 err_msg("used_bytes(%u) " SIZE_FORMAT 2899 " used_bytes_slow(%u)" SIZE_FORMAT, 2900 i, used_bytes(i), i, used_in_use_bytes)); 2901 } 2902 #endif 2903 } 2904 2905 void MetaspaceAux::verify_metrics() { 2906 verify_capacity(); 2907 verify_used(); 2908 } 2909 2910 2911 // Metaspace methods 2912 2913 size_t Metaspace::_first_chunk_word_size = 0; 2914 size_t Metaspace::_first_class_chunk_word_size = 0; 2915 2916 size_t Metaspace::_commit_alignment = 0; 2917 size_t Metaspace::_reserve_alignment = 0; 2918 2919 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2920 initialize(lock, type); 2921 } 2922 2923 Metaspace::~Metaspace() { 2924 delete _vsm; 2925 if (using_class_space()) { 2926 delete _class_vsm; 2927 } 2928 } 2929 2930 VirtualSpaceList* Metaspace::_space_list = NULL; 2931 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2932 2933 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2934 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2935 2936 #define VIRTUALSPACEMULTIPLIER 2 2937 2938 #ifdef _LP64 2939 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2940 2941 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2942 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2943 // narrow_klass_base is the lower of the metaspace base and the cds base 2944 // (if cds is enabled). The narrow_klass_shift depends on the distance 2945 // between the lower base and higher address. 2946 address lower_base; 2947 address higher_address; 2948 #if INCLUDE_CDS 2949 if (UseSharedSpaces) { 2950 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2951 (address)(metaspace_base + compressed_class_space_size())); 2952 lower_base = MIN2(metaspace_base, cds_base); 2953 } else 2954 #endif 2955 { 2956 higher_address = metaspace_base + compressed_class_space_size(); 2957 lower_base = metaspace_base; 2958 2959 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2960 // If compressed class space fits in lower 32G, we don't need a base. 2961 if (higher_address <= (address)klass_encoding_max) { 2962 lower_base = 0; // Effectively lower base is zero. 2963 } 2964 } 2965 2966 Universe::set_narrow_klass_base(lower_base); 2967 2968 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 2969 Universe::set_narrow_klass_shift(0); 2970 } else { 2971 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2972 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2973 } 2974 } 2975 2976 #if INCLUDE_CDS 2977 // Return TRUE if the specified metaspace_base and cds_base are close enough 2978 // to work with compressed klass pointers. 2979 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2980 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2981 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2982 address lower_base = MIN2((address)metaspace_base, cds_base); 2983 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2984 (address)(metaspace_base + compressed_class_space_size())); 2985 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 2986 } 2987 #endif 2988 2989 // Try to allocate the metaspace at the requested addr. 2990 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2991 assert(using_class_space(), "called improperly"); 2992 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2993 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 2994 "Metaspace size is too big"); 2995 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 2996 assert_is_ptr_aligned(cds_base, _reserve_alignment); 2997 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 2998 2999 // Don't use large pages for the class space. 3000 bool large_pages = false; 3001 3002 #ifndef AARCH64 3003 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3004 _reserve_alignment, 3005 large_pages, 3006 requested_addr); 3007 #else // AARCH64 3008 ReservedSpace metaspace_rs; 3009 3010 // Our compressed klass pointers may fit nicely into the lower 32 3011 // bits. 3012 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3013 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3014 _reserve_alignment, 3015 large_pages, 3016 requested_addr); 3017 } 3018 3019 if (! metaspace_rs.is_reserved()) { 3020 // Try to align metaspace so that we can decode a compressed klass 3021 // with a single MOVK instruction. We can do this iff the 3022 // compressed class base is a multiple of 4G. 3023 for (char *a = (char*)align_ptr_up(requested_addr, 4*G); 3024 a < (char*)(1024*G); 3025 a += 4*G) { 3026 3027 #if INCLUDE_CDS 3028 if (UseSharedSpaces 3029 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3030 // We failed to find an aligned base that will reach. Fall 3031 // back to using our requested addr. 3032 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3033 _reserve_alignment, 3034 large_pages, 3035 requested_addr); 3036 break; 3037 } 3038 #endif 3039 3040 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3041 _reserve_alignment, 3042 large_pages, 3043 a); 3044 if (metaspace_rs.is_reserved()) 3045 break; 3046 } 3047 } 3048 3049 #endif // AARCH64 3050 3051 if (!metaspace_rs.is_reserved()) { 3052 #if INCLUDE_CDS 3053 if (UseSharedSpaces) { 3054 size_t increment = align_size_up(1*G, _reserve_alignment); 3055 3056 // Keep trying to allocate the metaspace, increasing the requested_addr 3057 // by 1GB each time, until we reach an address that will no longer allow 3058 // use of CDS with compressed klass pointers. 3059 char *addr = requested_addr; 3060 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3061 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3062 addr = addr + increment; 3063 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3064 _reserve_alignment, large_pages, addr); 3065 } 3066 } 3067 #endif 3068 // If no successful allocation then try to allocate the space anywhere. If 3069 // that fails then OOM doom. At this point we cannot try allocating the 3070 // metaspace as if UseCompressedClassPointers is off because too much 3071 // initialization has happened that depends on UseCompressedClassPointers. 3072 // So, UseCompressedClassPointers cannot be turned off at this point. 3073 if (!metaspace_rs.is_reserved()) { 3074 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3075 _reserve_alignment, large_pages); 3076 if (!metaspace_rs.is_reserved()) { 3077 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3078 compressed_class_space_size())); 3079 } 3080 } 3081 } 3082 3083 // If we got here then the metaspace got allocated. 3084 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3085 3086 #if INCLUDE_CDS 3087 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3088 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3089 FileMapInfo::stop_sharing_and_unmap( 3090 "Could not allocate metaspace at a compatible address"); 3091 } 3092 #endif 3093 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3094 UseSharedSpaces ? (address)cds_base : 0); 3095 3096 initialize_class_space(metaspace_rs); 3097 3098 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 3099 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3100 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3101 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 3102 compressed_class_space_size(), p2i(metaspace_rs.base()), p2i(requested_addr)); 3103 } 3104 } 3105 3106 // For UseCompressedClassPointers the class space is reserved above the top of 3107 // the Java heap. The argument passed in is at the base of the compressed space. 3108 void Metaspace::initialize_class_space(ReservedSpace rs) { 3109 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3110 assert(rs.size() >= CompressedClassSpaceSize, 3111 err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize)); 3112 assert(using_class_space(), "Must be using class space"); 3113 _class_space_list = new VirtualSpaceList(rs); 3114 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3115 3116 if (!_class_space_list->initialization_succeeded()) { 3117 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3118 } 3119 } 3120 3121 #endif 3122 3123 void Metaspace::ergo_initialize() { 3124 if (DumpSharedSpaces) { 3125 // Using large pages when dumping the shared archive is currently not implemented. 3126 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3127 } 3128 3129 size_t page_size = os::vm_page_size(); 3130 if (UseLargePages && UseLargePagesInMetaspace) { 3131 page_size = os::large_page_size(); 3132 } 3133 3134 _commit_alignment = page_size; 3135 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3136 3137 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3138 // override if MaxMetaspaceSize was set on the command line or not. 3139 // This information is needed later to conform to the specification of the 3140 // java.lang.management.MemoryUsage API. 3141 // 3142 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3143 // globals.hpp to the aligned value, but this is not possible, since the 3144 // alignment depends on other flags being parsed. 3145 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3146 3147 if (MetaspaceSize > MaxMetaspaceSize) { 3148 MetaspaceSize = MaxMetaspaceSize; 3149 } 3150 3151 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3152 3153 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3154 3155 if (MetaspaceSize < 256*K) { 3156 vm_exit_during_initialization("Too small initial Metaspace size"); 3157 } 3158 3159 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3160 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3161 3162 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3163 set_compressed_class_space_size(CompressedClassSpaceSize); 3164 } 3165 3166 void Metaspace::global_initialize() { 3167 MetaspaceGC::initialize(); 3168 3169 // Initialize the alignment for shared spaces. 3170 int max_alignment = os::vm_allocation_granularity(); 3171 size_t cds_total = 0; 3172 3173 MetaspaceShared::set_max_alignment(max_alignment); 3174 3175 if (DumpSharedSpaces) { 3176 #if INCLUDE_CDS 3177 MetaspaceShared::estimate_regions_size(); 3178 3179 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3180 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3181 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3182 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3183 3184 // make sure SharedReadOnlySize and SharedReadWriteSize are not less than 3185 // the minimum values. 3186 if (SharedReadOnlySize < MetaspaceShared::min_ro_size){ 3187 report_out_of_shared_space(SharedReadOnly); 3188 } 3189 3190 if (SharedReadWriteSize < MetaspaceShared::min_rw_size){ 3191 report_out_of_shared_space(SharedReadWrite); 3192 } 3193 3194 // the min_misc_data_size and min_misc_code_size estimates are based on 3195 // MetaspaceShared::generate_vtable_methods(). 3196 // The minimum size only accounts for the vtable methods. Any size less than the 3197 // minimum required size would cause vm crash when allocating the vtable methods. 3198 uint min_misc_data_size = align_size_up( 3199 MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment); 3200 3201 if (SharedMiscDataSize < min_misc_data_size) { 3202 report_out_of_shared_space(SharedMiscData); 3203 } 3204 3205 uintx min_misc_code_size = align_size_up( 3206 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * 3207 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, 3208 max_alignment); 3209 3210 if (SharedMiscCodeSize < min_misc_code_size) { 3211 report_out_of_shared_space(SharedMiscCode); 3212 } 3213 3214 // Initialize with the sum of the shared space sizes. The read-only 3215 // and read write metaspace chunks will be allocated out of this and the 3216 // remainder is the misc code and data chunks. 3217 cds_total = FileMapInfo::shared_spaces_size(); 3218 cds_total = align_size_up(cds_total, _reserve_alignment); 3219 _space_list = new VirtualSpaceList(cds_total/wordSize); 3220 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3221 3222 if (!_space_list->initialization_succeeded()) { 3223 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3224 } 3225 3226 #ifdef _LP64 3227 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3228 vm_exit_during_initialization("Unable to dump shared archive.", 3229 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3230 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3231 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3232 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3233 } 3234 3235 // Set the compressed klass pointer base so that decoding of these pointers works 3236 // properly when creating the shared archive. 3237 assert(UseCompressedOops && UseCompressedClassPointers, 3238 "UseCompressedOops and UseCompressedClassPointers must be set"); 3239 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3240 if (TraceMetavirtualspaceAllocation && Verbose) { 3241 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3242 p2i(_space_list->current_virtual_space()->bottom())); 3243 } 3244 3245 Universe::set_narrow_klass_shift(0); 3246 #endif // _LP64 3247 #endif // INCLUDE_CDS 3248 } else { 3249 #if INCLUDE_CDS 3250 // If using shared space, open the file that contains the shared space 3251 // and map in the memory before initializing the rest of metaspace (so 3252 // the addresses don't conflict) 3253 address cds_address = NULL; 3254 if (UseSharedSpaces) { 3255 FileMapInfo* mapinfo = new FileMapInfo(); 3256 3257 // Open the shared archive file, read and validate the header. If 3258 // initialization fails, shared spaces [UseSharedSpaces] are 3259 // disabled and the file is closed. 3260 // Map in spaces now also 3261 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3262 cds_total = FileMapInfo::shared_spaces_size(); 3263 cds_address = (address)mapinfo->region_base(0); 3264 } else { 3265 assert(!mapinfo->is_open() && !UseSharedSpaces, 3266 "archive file not closed or shared spaces not disabled."); 3267 } 3268 } 3269 #endif // INCLUDE_CDS 3270 #ifdef _LP64 3271 // If UseCompressedClassPointers is set then allocate the metaspace area 3272 // above the heap and above the CDS area (if it exists). 3273 if (using_class_space()) { 3274 if (UseSharedSpaces) { 3275 #if INCLUDE_CDS 3276 char* cds_end = (char*)(cds_address + cds_total); 3277 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3278 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3279 #endif 3280 } else { 3281 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3282 allocate_metaspace_compressed_klass_ptrs(base, 0); 3283 } 3284 } 3285 #endif // _LP64 3286 3287 // Initialize these before initializing the VirtualSpaceList 3288 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3289 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3290 // Make the first class chunk bigger than a medium chunk so it's not put 3291 // on the medium chunk list. The next chunk will be small and progress 3292 // from there. This size calculated by -version. 3293 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3294 (CompressedClassSpaceSize/BytesPerWord)*2); 3295 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3296 // Arbitrarily set the initial virtual space to a multiple 3297 // of the boot class loader size. 3298 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3299 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3300 3301 // Initialize the list of virtual spaces. 3302 _space_list = new VirtualSpaceList(word_size); 3303 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3304 3305 if (!_space_list->initialization_succeeded()) { 3306 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3307 } 3308 } 3309 3310 _tracer = new MetaspaceTracer(); 3311 } 3312 3313 void Metaspace::post_initialize() { 3314 MetaspaceGC::post_initialize(); 3315 } 3316 3317 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3318 size_t chunk_word_size, 3319 size_t chunk_bunch) { 3320 // Get a chunk from the chunk freelist 3321 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3322 if (chunk != NULL) { 3323 return chunk; 3324 } 3325 3326 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3327 } 3328 3329 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3330 3331 assert(space_list() != NULL, 3332 "Metadata VirtualSpaceList has not been initialized"); 3333 assert(chunk_manager_metadata() != NULL, 3334 "Metadata ChunkManager has not been initialized"); 3335 3336 _vsm = new SpaceManager(NonClassType, lock); 3337 if (_vsm == NULL) { 3338 return; 3339 } 3340 size_t word_size; 3341 size_t class_word_size; 3342 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3343 3344 if (using_class_space()) { 3345 assert(class_space_list() != NULL, 3346 "Class VirtualSpaceList has not been initialized"); 3347 assert(chunk_manager_class() != NULL, 3348 "Class ChunkManager has not been initialized"); 3349 3350 // Allocate SpaceManager for classes. 3351 _class_vsm = new SpaceManager(ClassType, lock); 3352 if (_class_vsm == NULL) { 3353 return; 3354 } 3355 } 3356 3357 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3358 3359 // Allocate chunk for metadata objects 3360 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3361 word_size, 3362 vsm()->medium_chunk_bunch()); 3363 // For dumping shared archive, report error if allocation has failed. 3364 if (DumpSharedSpaces && new_chunk == NULL) { 3365 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3366 } 3367 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3368 if (new_chunk != NULL) { 3369 // Add to this manager's list of chunks in use and current_chunk(). 3370 vsm()->add_chunk(new_chunk, true); 3371 } 3372 3373 // Allocate chunk for class metadata objects 3374 if (using_class_space()) { 3375 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3376 class_word_size, 3377 class_vsm()->medium_chunk_bunch()); 3378 if (class_chunk != NULL) { 3379 class_vsm()->add_chunk(class_chunk, true); 3380 } else { 3381 // For dumping shared archive, report error if allocation has failed. 3382 if (DumpSharedSpaces) { 3383 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3384 } 3385 } 3386 } 3387 3388 _alloc_record_head = NULL; 3389 _alloc_record_tail = NULL; 3390 } 3391 3392 size_t Metaspace::align_word_size_up(size_t word_size) { 3393 size_t byte_size = word_size * wordSize; 3394 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3395 } 3396 3397 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3398 // DumpSharedSpaces doesn't use class metadata area (yet) 3399 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3400 if (is_class_space_allocation(mdtype)) { 3401 return class_vsm()->allocate(word_size); 3402 } else { 3403 return vsm()->allocate(word_size); 3404 } 3405 } 3406 3407 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3408 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3409 assert(delta_bytes > 0, "Must be"); 3410 3411 size_t before = 0; 3412 size_t after = 0; 3413 MetaWord* res; 3414 bool incremented; 3415 3416 // Each thread increments the HWM at most once. Even if the thread fails to increment 3417 // the HWM, an allocation is still attempted. This is because another thread must then 3418 // have incremented the HWM and therefore the allocation might still succeed. 3419 do { 3420 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3421 res = allocate(word_size, mdtype); 3422 } while (!incremented && res == NULL); 3423 3424 if (incremented) { 3425 tracer()->report_gc_threshold(before, after, 3426 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3427 if (PrintGCDetails && Verbose) { 3428 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3429 " to " SIZE_FORMAT, before, after); 3430 } 3431 } 3432 3433 return res; 3434 } 3435 3436 // Space allocated in the Metaspace. This may 3437 // be across several metadata virtual spaces. 3438 char* Metaspace::bottom() const { 3439 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3440 return (char*)vsm()->current_chunk()->bottom(); 3441 } 3442 3443 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3444 if (mdtype == ClassType) { 3445 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3446 } else { 3447 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3448 } 3449 } 3450 3451 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3452 if (mdtype == ClassType) { 3453 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3454 } else { 3455 return vsm()->sum_free_in_chunks_in_use(); 3456 } 3457 } 3458 3459 // Space capacity in the Metaspace. It includes 3460 // space in the list of chunks from which allocations 3461 // have been made. Don't include space in the global freelist and 3462 // in the space available in the dictionary which 3463 // is already counted in some chunk. 3464 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3465 if (mdtype == ClassType) { 3466 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3467 } else { 3468 return vsm()->sum_capacity_in_chunks_in_use(); 3469 } 3470 } 3471 3472 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3473 return used_words_slow(mdtype) * BytesPerWord; 3474 } 3475 3476 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3477 return capacity_words_slow(mdtype) * BytesPerWord; 3478 } 3479 3480 size_t Metaspace::allocated_blocks_bytes() const { 3481 return vsm()->allocated_blocks_bytes() + 3482 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3483 } 3484 3485 size_t Metaspace::allocated_chunks_bytes() const { 3486 return vsm()->allocated_chunks_bytes() + 3487 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3488 } 3489 3490 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3491 assert(!SafepointSynchronize::is_at_safepoint() 3492 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3493 3494 if (DumpSharedSpaces && PrintSharedSpaces) { 3495 record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); 3496 } 3497 3498 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3499 3500 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3501 // Dark matter. Too small for dictionary. 3502 #ifdef ASSERT 3503 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3504 #endif 3505 return; 3506 } 3507 if (is_class && using_class_space()) { 3508 class_vsm()->deallocate(ptr, word_size); 3509 } else { 3510 vsm()->deallocate(ptr, word_size); 3511 } 3512 } 3513 3514 3515 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3516 bool read_only, MetaspaceObj::Type type, TRAPS) { 3517 if (HAS_PENDING_EXCEPTION) { 3518 assert(false, "Should not allocate with exception pending"); 3519 return NULL; // caller does a CHECK_NULL too 3520 } 3521 3522 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3523 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3524 3525 // Allocate in metaspaces without taking out a lock, because it deadlocks 3526 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3527 // to revisit this for application class data sharing. 3528 if (DumpSharedSpaces) { 3529 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3530 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3531 MetaWord* result = space->allocate(word_size, NonClassType); 3532 if (result == NULL) { 3533 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3534 } 3535 if (PrintSharedSpaces) { 3536 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3537 } 3538 3539 // Zero initialize. 3540 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3541 3542 return result; 3543 } 3544 3545 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3546 3547 // Try to allocate metadata. 3548 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3549 3550 if (result == NULL) { 3551 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3552 3553 // Allocation failed. 3554 if (is_init_completed()) { 3555 // Only start a GC if the bootstrapping has completed. 3556 3557 // Try to clean out some memory and retry. 3558 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3559 loader_data, word_size, mdtype); 3560 } 3561 } 3562 3563 if (result == NULL) { 3564 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3565 } 3566 3567 // Zero initialize. 3568 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3569 3570 return result; 3571 } 3572 3573 size_t Metaspace::class_chunk_size(size_t word_size) { 3574 assert(using_class_space(), "Has to use class space"); 3575 return class_vsm()->calc_chunk_size(word_size); 3576 } 3577 3578 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3579 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3580 3581 // If result is still null, we are out of memory. 3582 if (Verbose && TraceMetadataChunkAllocation) { 3583 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3584 SIZE_FORMAT, word_size); 3585 if (loader_data->metaspace_or_null() != NULL) { 3586 loader_data->dump(gclog_or_tty); 3587 } 3588 MetaspaceAux::dump(gclog_or_tty); 3589 } 3590 3591 bool out_of_compressed_class_space = false; 3592 if (is_class_space_allocation(mdtype)) { 3593 Metaspace* metaspace = loader_data->metaspace_non_null(); 3594 out_of_compressed_class_space = 3595 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3596 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3597 CompressedClassSpaceSize; 3598 } 3599 3600 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3601 const char* space_string = out_of_compressed_class_space ? 3602 "Compressed class space" : "Metaspace"; 3603 3604 report_java_out_of_memory(space_string); 3605 3606 if (JvmtiExport::should_post_resource_exhausted()) { 3607 JvmtiExport::post_resource_exhausted( 3608 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3609 space_string); 3610 } 3611 3612 if (!is_init_completed()) { 3613 vm_exit_during_initialization("OutOfMemoryError", space_string); 3614 } 3615 3616 if (out_of_compressed_class_space) { 3617 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3618 } else { 3619 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3620 } 3621 } 3622 3623 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3624 switch (mdtype) { 3625 case Metaspace::ClassType: return "Class"; 3626 case Metaspace::NonClassType: return "Metadata"; 3627 default: 3628 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); 3629 return NULL; 3630 } 3631 } 3632 3633 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3634 assert(DumpSharedSpaces, "sanity"); 3635 3636 int byte_size = (int)word_size * HeapWordSize; 3637 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3638 3639 if (_alloc_record_head == NULL) { 3640 _alloc_record_head = _alloc_record_tail = rec; 3641 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3642 _alloc_record_tail->_next = rec; 3643 _alloc_record_tail = rec; 3644 } else { 3645 // slow linear search, but this doesn't happen that often, and only when dumping 3646 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3647 if (old->_ptr == ptr) { 3648 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3649 int remain_bytes = old->_byte_size - byte_size; 3650 assert(remain_bytes >= 0, "sanity"); 3651 old->_type = type; 3652 3653 if (remain_bytes == 0) { 3654 delete(rec); 3655 } else { 3656 address remain_ptr = address(ptr) + byte_size; 3657 rec->_ptr = remain_ptr; 3658 rec->_byte_size = remain_bytes; 3659 rec->_type = MetaspaceObj::DeallocatedType; 3660 rec->_next = old->_next; 3661 old->_byte_size = byte_size; 3662 old->_next = rec; 3663 } 3664 return; 3665 } 3666 } 3667 assert(0, "reallocating a freed pointer that was not recorded"); 3668 } 3669 } 3670 3671 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3672 assert(DumpSharedSpaces, "sanity"); 3673 3674 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3675 if (rec->_ptr == ptr) { 3676 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity"); 3677 rec->_type = MetaspaceObj::DeallocatedType; 3678 return; 3679 } 3680 } 3681 3682 assert(0, "deallocating a pointer that was not recorded"); 3683 } 3684 3685 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3686 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3687 3688 address last_addr = (address)bottom(); 3689 3690 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3691 address ptr = rec->_ptr; 3692 if (last_addr < ptr) { 3693 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3694 } 3695 closure->doit(ptr, rec->_type, rec->_byte_size); 3696 last_addr = ptr + rec->_byte_size; 3697 } 3698 3699 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3700 if (last_addr < top) { 3701 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3702 } 3703 } 3704 3705 void Metaspace::purge(MetadataType mdtype) { 3706 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3707 } 3708 3709 void Metaspace::purge() { 3710 MutexLockerEx cl(SpaceManager::expand_lock(), 3711 Mutex::_no_safepoint_check_flag); 3712 purge(NonClassType); 3713 if (using_class_space()) { 3714 purge(ClassType); 3715 } 3716 } 3717 3718 void Metaspace::print_on(outputStream* out) const { 3719 // Print both class virtual space counts and metaspace. 3720 if (Verbose) { 3721 vsm()->print_on(out); 3722 if (using_class_space()) { 3723 class_vsm()->print_on(out); 3724 } 3725 } 3726 } 3727 3728 bool Metaspace::contains(const void* ptr) { 3729 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3730 return true; 3731 } 3732 3733 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3734 return true; 3735 } 3736 3737 return get_space_list(NonClassType)->contains(ptr); 3738 } 3739 3740 void Metaspace::verify() { 3741 vsm()->verify(); 3742 if (using_class_space()) { 3743 class_vsm()->verify(); 3744 } 3745 } 3746 3747 void Metaspace::dump(outputStream* const out) const { 3748 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3749 vsm()->dump(out); 3750 if (using_class_space()) { 3751 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3752 class_vsm()->dump(out); 3753 } 3754 } 3755 3756 /////////////// Unit tests /////////////// 3757 3758 #ifndef PRODUCT 3759 3760 class TestMetaspaceAuxTest : AllStatic { 3761 public: 3762 static void test_reserved() { 3763 size_t reserved = MetaspaceAux::reserved_bytes(); 3764 3765 assert(reserved > 0, "assert"); 3766 3767 size_t committed = MetaspaceAux::committed_bytes(); 3768 assert(committed <= reserved, "assert"); 3769 3770 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3771 assert(reserved_metadata > 0, "assert"); 3772 assert(reserved_metadata <= reserved, "assert"); 3773 3774 if (UseCompressedClassPointers) { 3775 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3776 assert(reserved_class > 0, "assert"); 3777 assert(reserved_class < reserved, "assert"); 3778 } 3779 } 3780 3781 static void test_committed() { 3782 size_t committed = MetaspaceAux::committed_bytes(); 3783 3784 assert(committed > 0, "assert"); 3785 3786 size_t reserved = MetaspaceAux::reserved_bytes(); 3787 assert(committed <= reserved, "assert"); 3788 3789 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3790 assert(committed_metadata > 0, "assert"); 3791 assert(committed_metadata <= committed, "assert"); 3792 3793 if (UseCompressedClassPointers) { 3794 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3795 assert(committed_class > 0, "assert"); 3796 assert(committed_class < committed, "assert"); 3797 } 3798 } 3799 3800 static void test_virtual_space_list_large_chunk() { 3801 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3802 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3803 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3804 // vm_allocation_granularity aligned on Windows. 3805 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3806 large_size += (os::vm_page_size()/BytesPerWord); 3807 vs_list->get_new_chunk(large_size, large_size, 0); 3808 } 3809 3810 static void test() { 3811 test_reserved(); 3812 test_committed(); 3813 test_virtual_space_list_large_chunk(); 3814 } 3815 }; 3816 3817 void TestMetaspaceAux_test() { 3818 TestMetaspaceAuxTest::test(); 3819 } 3820 3821 class TestVirtualSpaceNodeTest { 3822 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3823 size_t& num_small_chunks, 3824 size_t& num_specialized_chunks) { 3825 num_medium_chunks = words_left / MediumChunk; 3826 words_left = words_left % MediumChunk; 3827 3828 num_small_chunks = words_left / SmallChunk; 3829 words_left = words_left % SmallChunk; 3830 // how many specialized chunks can we get? 3831 num_specialized_chunks = words_left / SpecializedChunk; 3832 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3833 } 3834 3835 public: 3836 static void test() { 3837 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3838 const size_t vsn_test_size_words = MediumChunk * 4; 3839 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3840 3841 // The chunk sizes must be multiples of eachother, or this will fail 3842 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3843 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3844 3845 { // No committed memory in VSN 3846 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3847 VirtualSpaceNode vsn(vsn_test_size_bytes); 3848 vsn.initialize(); 3849 vsn.retire(&cm); 3850 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3851 } 3852 3853 { // All of VSN is committed, half is used by chunks 3854 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3855 VirtualSpaceNode vsn(vsn_test_size_bytes); 3856 vsn.initialize(); 3857 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3858 vsn.get_chunk_vs(MediumChunk); 3859 vsn.get_chunk_vs(MediumChunk); 3860 vsn.retire(&cm); 3861 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3862 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3863 } 3864 3865 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3866 // This doesn't work for systems with vm_page_size >= 16K. 3867 if (page_chunks < MediumChunk) { 3868 // 4 pages of VSN is committed, some is used by chunks 3869 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3870 VirtualSpaceNode vsn(vsn_test_size_bytes); 3871 3872 vsn.initialize(); 3873 vsn.expand_by(page_chunks, page_chunks); 3874 vsn.get_chunk_vs(SmallChunk); 3875 vsn.get_chunk_vs(SpecializedChunk); 3876 vsn.retire(&cm); 3877 3878 // committed - used = words left to retire 3879 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3880 3881 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3882 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3883 3884 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3885 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3886 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3887 } 3888 3889 { // Half of VSN is committed, a humongous chunk is used 3890 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3891 VirtualSpaceNode vsn(vsn_test_size_bytes); 3892 vsn.initialize(); 3893 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3894 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3895 vsn.retire(&cm); 3896 3897 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3898 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3899 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3900 3901 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3902 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3903 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3904 } 3905 3906 } 3907 3908 #define assert_is_available_positive(word_size) \ 3909 assert(vsn.is_available(word_size), \ 3910 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ 3911 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3912 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()))); 3913 3914 #define assert_is_available_negative(word_size) \ 3915 assert(!vsn.is_available(word_size), \ 3916 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ 3917 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3918 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()))); 3919 3920 static void test_is_available_positive() { 3921 // Reserve some memory. 3922 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3923 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3924 3925 // Commit some memory. 3926 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3927 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3928 assert(expanded, "Failed to commit"); 3929 3930 // Check that is_available accepts the committed size. 3931 assert_is_available_positive(commit_word_size); 3932 3933 // Check that is_available accepts half the committed size. 3934 size_t expand_word_size = commit_word_size / 2; 3935 assert_is_available_positive(expand_word_size); 3936 } 3937 3938 static void test_is_available_negative() { 3939 // Reserve some memory. 3940 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3941 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3942 3943 // Commit some memory. 3944 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3945 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3946 assert(expanded, "Failed to commit"); 3947 3948 // Check that is_available doesn't accept a too large size. 3949 size_t two_times_commit_word_size = commit_word_size * 2; 3950 assert_is_available_negative(two_times_commit_word_size); 3951 } 3952 3953 static void test_is_available_overflow() { 3954 // Reserve some memory. 3955 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3956 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3957 3958 // Commit some memory. 3959 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3960 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3961 assert(expanded, "Failed to commit"); 3962 3963 // Calculate a size that will overflow the virtual space size. 3964 void* virtual_space_max = (void*)(uintptr_t)-1; 3965 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3966 size_t overflow_size = bottom_to_max + BytesPerWord; 3967 size_t overflow_word_size = overflow_size / BytesPerWord; 3968 3969 // Check that is_available can handle the overflow. 3970 assert_is_available_negative(overflow_word_size); 3971 } 3972 3973 static void test_is_available() { 3974 TestVirtualSpaceNodeTest::test_is_available_positive(); 3975 TestVirtualSpaceNodeTest::test_is_available_negative(); 3976 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3977 } 3978 }; 3979 3980 void TestVirtualSpaceNode_test() { 3981 TestVirtualSpaceNodeTest::test(); 3982 TestVirtualSpaceNodeTest::test_is_available(); 3983 } 3984 #endif