1 /* 2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc/shared/collectedHeap.hpp" 26 #include "gc/shared/collectorPolicy.hpp" 27 #include "gc/shared/gcLocker.hpp" 28 #include "logging/log.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/binaryTreeDictionary.hpp" 31 #include "memory/filemap.hpp" 32 #include "memory/freeList.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 53 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 54 55 // Set this constant to enable slow integrity checking of the free chunk lists 56 const bool metaspace_slow_verify = false; 57 58 size_t const allocation_from_dictionary_limit = 4 * K; 59 60 MetaWord* last_allocated = 0; 61 62 size_t Metaspace::_compressed_class_space_size; 63 const MetaspaceTracer* Metaspace::_tracer = NULL; 64 65 // Used in declarations in SpaceManager and ChunkManager 66 enum ChunkIndex { 67 ZeroIndex = 0, 68 SpecializedIndex = ZeroIndex, 69 SmallIndex = SpecializedIndex + 1, 70 MediumIndex = SmallIndex + 1, 71 HumongousIndex = MediumIndex + 1, 72 NumberOfFreeLists = 3, 73 NumberOfInUseLists = 4 74 }; 75 76 enum ChunkSizes { // in words. 77 ClassSpecializedChunk = 128, 78 SpecializedChunk = 128, 79 ClassSmallChunk = 256, 80 SmallChunk = 512, 81 ClassMediumChunk = 4 * K, 82 MediumChunk = 8 * K 83 }; 84 85 static ChunkIndex next_chunk_index(ChunkIndex i) { 86 assert(i < NumberOfInUseLists, "Out of bound"); 87 return (ChunkIndex) (i+1); 88 } 89 90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 91 uint MetaspaceGC::_shrink_factor = 0; 92 bool MetaspaceGC::_should_concurrent_collect = false; 93 94 typedef class FreeList<Metachunk> ChunkList; 95 96 // Manages the global free lists of chunks. 97 class ChunkManager : public CHeapObj<mtInternal> { 98 friend class TestVirtualSpaceNodeTest; 99 100 // Free list of chunks of different sizes. 101 // SpecializedChunk 102 // SmallChunk 103 // MediumChunk 104 // HumongousChunk 105 ChunkList _free_chunks[NumberOfFreeLists]; 106 107 // HumongousChunk 108 ChunkTreeDictionary _humongous_dictionary; 109 110 // ChunkManager in all lists of this type 111 size_t _free_chunks_total; 112 size_t _free_chunks_count; 113 114 void dec_free_chunks_total(size_t v) { 115 assert(_free_chunks_count > 0 && 116 _free_chunks_total > 0, 117 "About to go negative"); 118 Atomic::add_ptr(-1, &_free_chunks_count); 119 jlong minus_v = (jlong) - (jlong) v; 120 Atomic::add_ptr(minus_v, &_free_chunks_total); 121 } 122 123 // Debug support 124 125 size_t sum_free_chunks(); 126 size_t sum_free_chunks_count(); 127 128 void locked_verify_free_chunks_total(); 129 void slow_locked_verify_free_chunks_total() { 130 if (metaspace_slow_verify) { 131 locked_verify_free_chunks_total(); 132 } 133 } 134 void locked_verify_free_chunks_count(); 135 void slow_locked_verify_free_chunks_count() { 136 if (metaspace_slow_verify) { 137 locked_verify_free_chunks_count(); 138 } 139 } 140 void verify_free_chunks_count(); 141 142 public: 143 144 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 145 : _free_chunks_total(0), _free_chunks_count(0) { 146 _free_chunks[SpecializedIndex].set_size(specialized_size); 147 _free_chunks[SmallIndex].set_size(small_size); 148 _free_chunks[MediumIndex].set_size(medium_size); 149 } 150 151 // add or delete (return) a chunk to the global freelist. 152 Metachunk* chunk_freelist_allocate(size_t word_size); 153 154 // Map a size to a list index assuming that there are lists 155 // for special, small, medium, and humongous chunks. 156 static ChunkIndex list_index(size_t size); 157 158 // Remove the chunk from its freelist. It is 159 // expected to be on one of the _free_chunks[] lists. 160 void remove_chunk(Metachunk* chunk); 161 162 // Add the simple linked list of chunks to the freelist of chunks 163 // of type index. 164 void return_chunks(ChunkIndex index, Metachunk* chunks); 165 166 // Total of the space in the free chunks list 167 size_t free_chunks_total_words(); 168 size_t free_chunks_total_bytes(); 169 170 // Number of chunks in the free chunks list 171 size_t free_chunks_count(); 172 173 void inc_free_chunks_total(size_t v, size_t count = 1) { 174 Atomic::add_ptr(count, &_free_chunks_count); 175 Atomic::add_ptr(v, &_free_chunks_total); 176 } 177 ChunkTreeDictionary* humongous_dictionary() { 178 return &_humongous_dictionary; 179 } 180 181 ChunkList* free_chunks(ChunkIndex index); 182 183 // Returns the list for the given chunk word size. 184 ChunkList* find_free_chunks_list(size_t word_size); 185 186 // Remove from a list by size. Selects list based on size of chunk. 187 Metachunk* free_chunks_get(size_t chunk_word_size); 188 189 #define index_bounds_check(index) \ 190 assert(index == SpecializedIndex || \ 191 index == SmallIndex || \ 192 index == MediumIndex || \ 193 index == HumongousIndex, "Bad index: %d", (int) index) 194 195 size_t num_free_chunks(ChunkIndex index) const { 196 index_bounds_check(index); 197 198 if (index == HumongousIndex) { 199 return _humongous_dictionary.total_free_blocks(); 200 } 201 202 ssize_t count = _free_chunks[index].count(); 203 return count == -1 ? 0 : (size_t) count; 204 } 205 206 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 207 index_bounds_check(index); 208 209 size_t word_size = 0; 210 if (index == HumongousIndex) { 211 word_size = _humongous_dictionary.total_size(); 212 } else { 213 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 214 word_size = size_per_chunk_in_words * num_free_chunks(index); 215 } 216 217 return word_size * BytesPerWord; 218 } 219 220 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 221 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 222 num_free_chunks(SmallIndex), 223 num_free_chunks(MediumIndex), 224 num_free_chunks(HumongousIndex), 225 size_free_chunks_in_bytes(SpecializedIndex), 226 size_free_chunks_in_bytes(SmallIndex), 227 size_free_chunks_in_bytes(MediumIndex), 228 size_free_chunks_in_bytes(HumongousIndex)); 229 } 230 231 // Debug support 232 void verify(); 233 void slow_verify() { 234 if (metaspace_slow_verify) { 235 verify(); 236 } 237 } 238 void locked_verify(); 239 void slow_locked_verify() { 240 if (metaspace_slow_verify) { 241 locked_verify(); 242 } 243 } 244 void verify_free_chunks_total(); 245 246 void locked_print_free_chunks(outputStream* st); 247 void locked_print_sum_free_chunks(outputStream* st); 248 249 void print_on(outputStream* st) const; 250 }; 251 252 // Used to manage the free list of Metablocks (a block corresponds 253 // to the allocation of a quantum of metadata). 254 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 255 BlockTreeDictionary* const _dictionary; 256 257 // Only allocate and split from freelist if the size of the allocation 258 // is at least 1/4th the size of the available block. 259 const static int WasteMultiplier = 4; 260 261 // Accessors 262 BlockTreeDictionary* dictionary() const { return _dictionary; } 263 264 public: 265 BlockFreelist(); 266 ~BlockFreelist(); 267 268 // Get and return a block to the free list 269 MetaWord* get_block(size_t word_size); 270 void return_block(MetaWord* p, size_t word_size); 271 272 size_t total_size() { return dictionary()->total_size(); } 273 274 void print_on(outputStream* st) const; 275 }; 276 277 // A VirtualSpaceList node. 278 class VirtualSpaceNode : public CHeapObj<mtClass> { 279 friend class VirtualSpaceList; 280 281 // Link to next VirtualSpaceNode 282 VirtualSpaceNode* _next; 283 284 // total in the VirtualSpace 285 MemRegion _reserved; 286 ReservedSpace _rs; 287 VirtualSpace _virtual_space; 288 MetaWord* _top; 289 // count of chunks contained in this VirtualSpace 290 uintx _container_count; 291 292 // Convenience functions to access the _virtual_space 293 char* low() const { return virtual_space()->low(); } 294 char* high() const { return virtual_space()->high(); } 295 296 // The first Metachunk will be allocated at the bottom of the 297 // VirtualSpace 298 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 299 300 // Committed but unused space in the virtual space 301 size_t free_words_in_vs() const; 302 public: 303 304 VirtualSpaceNode(size_t byte_size); 305 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 306 ~VirtualSpaceNode(); 307 308 // Convenience functions for logical bottom and end 309 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 310 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 311 312 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 313 314 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 315 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 316 317 bool is_pre_committed() const { return _virtual_space.special(); } 318 319 // address of next available space in _virtual_space; 320 // Accessors 321 VirtualSpaceNode* next() { return _next; } 322 void set_next(VirtualSpaceNode* v) { _next = v; } 323 324 void set_reserved(MemRegion const v) { _reserved = v; } 325 void set_top(MetaWord* v) { _top = v; } 326 327 // Accessors 328 MemRegion* reserved() { return &_reserved; } 329 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 330 331 // Returns true if "word_size" is available in the VirtualSpace 332 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 333 334 MetaWord* top() const { return _top; } 335 void inc_top(size_t word_size) { _top += word_size; } 336 337 uintx container_count() { return _container_count; } 338 void inc_container_count(); 339 void dec_container_count(); 340 #ifdef ASSERT 341 uintx container_count_slow(); 342 void verify_container_count(); 343 #endif 344 345 // used and capacity in this single entry in the list 346 size_t used_words_in_vs() const; 347 size_t capacity_words_in_vs() const; 348 349 bool initialize(); 350 351 // get space from the virtual space 352 Metachunk* take_from_committed(size_t chunk_word_size); 353 354 // Allocate a chunk from the virtual space and return it. 355 Metachunk* get_chunk_vs(size_t chunk_word_size); 356 357 // Expands/shrinks the committed space in a virtual space. Delegates 358 // to Virtualspace 359 bool expand_by(size_t min_words, size_t preferred_words); 360 361 // In preparation for deleting this node, remove all the chunks 362 // in the node from any freelist. 363 void purge(ChunkManager* chunk_manager); 364 365 // If an allocation doesn't fit in the current node a new node is created. 366 // Allocate chunks out of the remaining committed space in this node 367 // to avoid wasting that memory. 368 // This always adds up because all the chunk sizes are multiples of 369 // the smallest chunk size. 370 void retire(ChunkManager* chunk_manager); 371 372 #ifdef ASSERT 373 // Debug support 374 void mangle(); 375 #endif 376 377 void print_on(outputStream* st) const; 378 }; 379 380 #define assert_is_ptr_aligned(ptr, alignment) \ 381 assert(is_ptr_aligned(ptr, alignment), \ 382 PTR_FORMAT " is not aligned to " \ 383 SIZE_FORMAT, p2i(ptr), alignment) 384 385 #define assert_is_size_aligned(size, alignment) \ 386 assert(is_size_aligned(size, alignment), \ 387 SIZE_FORMAT " is not aligned to " \ 388 SIZE_FORMAT, size, alignment) 389 390 391 // Decide if large pages should be committed when the memory is reserved. 392 static bool should_commit_large_pages_when_reserving(size_t bytes) { 393 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 394 size_t words = bytes / BytesPerWord; 395 bool is_class = false; // We never reserve large pages for the class space. 396 if (MetaspaceGC::can_expand(words, is_class) && 397 MetaspaceGC::allowed_expansion() >= words) { 398 return true; 399 } 400 } 401 402 return false; 403 } 404 405 // byte_size is the size of the associated virtualspace. 406 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 407 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 408 409 #if INCLUDE_CDS 410 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 411 // configurable address, generally at the top of the Java heap so other 412 // memory addresses don't conflict. 413 if (DumpSharedSpaces) { 414 bool large_pages = false; // No large pages when dumping the CDS archive. 415 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 416 417 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 418 if (_rs.is_reserved()) { 419 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 420 } else { 421 // Get a mmap region anywhere if the SharedBaseAddress fails. 422 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 423 } 424 MetaspaceShared::initialize_shared_rs(&_rs); 425 } else 426 #endif 427 { 428 bool large_pages = should_commit_large_pages_when_reserving(bytes); 429 430 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 431 } 432 433 if (_rs.is_reserved()) { 434 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 435 assert(_rs.size() != 0, "Catch if we get a 0 size"); 436 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 437 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 438 439 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 440 } 441 } 442 443 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 444 Metachunk* chunk = first_chunk(); 445 Metachunk* invalid_chunk = (Metachunk*) top(); 446 while (chunk < invalid_chunk ) { 447 assert(chunk->is_tagged_free(), "Should be tagged free"); 448 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 449 chunk_manager->remove_chunk(chunk); 450 assert(chunk->next() == NULL && 451 chunk->prev() == NULL, 452 "Was not removed from its list"); 453 chunk = (Metachunk*) next; 454 } 455 } 456 457 #ifdef ASSERT 458 uintx VirtualSpaceNode::container_count_slow() { 459 uintx count = 0; 460 Metachunk* chunk = first_chunk(); 461 Metachunk* invalid_chunk = (Metachunk*) top(); 462 while (chunk < invalid_chunk ) { 463 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 464 // Don't count the chunks on the free lists. Those are 465 // still part of the VirtualSpaceNode but not currently 466 // counted. 467 if (!chunk->is_tagged_free()) { 468 count++; 469 } 470 chunk = (Metachunk*) next; 471 } 472 return count; 473 } 474 #endif 475 476 // List of VirtualSpaces for metadata allocation. 477 class VirtualSpaceList : public CHeapObj<mtClass> { 478 friend class VirtualSpaceNode; 479 480 enum VirtualSpaceSizes { 481 VirtualSpaceSize = 256 * K 482 }; 483 484 // Head of the list 485 VirtualSpaceNode* _virtual_space_list; 486 // virtual space currently being used for allocations 487 VirtualSpaceNode* _current_virtual_space; 488 489 // Is this VirtualSpaceList used for the compressed class space 490 bool _is_class; 491 492 // Sum of reserved and committed memory in the virtual spaces 493 size_t _reserved_words; 494 size_t _committed_words; 495 496 // Number of virtual spaces 497 size_t _virtual_space_count; 498 499 ~VirtualSpaceList(); 500 501 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 502 503 void set_virtual_space_list(VirtualSpaceNode* v) { 504 _virtual_space_list = v; 505 } 506 void set_current_virtual_space(VirtualSpaceNode* v) { 507 _current_virtual_space = v; 508 } 509 510 void link_vs(VirtualSpaceNode* new_entry); 511 512 // Get another virtual space and add it to the list. This 513 // is typically prompted by a failed attempt to allocate a chunk 514 // and is typically followed by the allocation of a chunk. 515 bool create_new_virtual_space(size_t vs_word_size); 516 517 // Chunk up the unused committed space in the current 518 // virtual space and add the chunks to the free list. 519 void retire_current_virtual_space(); 520 521 public: 522 VirtualSpaceList(size_t word_size); 523 VirtualSpaceList(ReservedSpace rs); 524 525 size_t free_bytes(); 526 527 Metachunk* get_new_chunk(size_t word_size, 528 size_t grow_chunks_by_words, 529 size_t medium_chunk_bunch); 530 531 bool expand_node_by(VirtualSpaceNode* node, 532 size_t min_words, 533 size_t preferred_words); 534 535 bool expand_by(size_t min_words, 536 size_t preferred_words); 537 538 VirtualSpaceNode* current_virtual_space() { 539 return _current_virtual_space; 540 } 541 542 bool is_class() const { return _is_class; } 543 544 bool initialization_succeeded() { return _virtual_space_list != NULL; } 545 546 size_t reserved_words() { return _reserved_words; } 547 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 548 size_t committed_words() { return _committed_words; } 549 size_t committed_bytes() { return committed_words() * BytesPerWord; } 550 551 void inc_reserved_words(size_t v); 552 void dec_reserved_words(size_t v); 553 void inc_committed_words(size_t v); 554 void dec_committed_words(size_t v); 555 void inc_virtual_space_count(); 556 void dec_virtual_space_count(); 557 558 bool contains(const void* ptr); 559 560 // Unlink empty VirtualSpaceNodes and free it. 561 void purge(ChunkManager* chunk_manager); 562 563 void print_on(outputStream* st) const; 564 565 class VirtualSpaceListIterator : public StackObj { 566 VirtualSpaceNode* _virtual_spaces; 567 public: 568 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 569 _virtual_spaces(virtual_spaces) {} 570 571 bool repeat() { 572 return _virtual_spaces != NULL; 573 } 574 575 VirtualSpaceNode* get_next() { 576 VirtualSpaceNode* result = _virtual_spaces; 577 if (_virtual_spaces != NULL) { 578 _virtual_spaces = _virtual_spaces->next(); 579 } 580 return result; 581 } 582 }; 583 }; 584 585 class Metadebug : AllStatic { 586 // Debugging support for Metaspaces 587 static int _allocation_fail_alot_count; 588 589 public: 590 591 static void init_allocation_fail_alot_count(); 592 #ifdef ASSERT 593 static bool test_metadata_failure(); 594 #endif 595 }; 596 597 int Metadebug::_allocation_fail_alot_count = 0; 598 599 // SpaceManager - used by Metaspace to handle allocations 600 class SpaceManager : public CHeapObj<mtClass> { 601 friend class Metaspace; 602 friend class Metadebug; 603 604 private: 605 606 // protects allocations 607 Mutex* const _lock; 608 609 // Type of metadata allocated. 610 Metaspace::MetadataType _mdtype; 611 612 // List of chunks in use by this SpaceManager. Allocations 613 // are done from the current chunk. The list is used for deallocating 614 // chunks when the SpaceManager is freed. 615 Metachunk* _chunks_in_use[NumberOfInUseLists]; 616 Metachunk* _current_chunk; 617 618 // Maximum number of small chunks to allocate to a SpaceManager 619 static uint const _small_chunk_limit; 620 621 // Sum of all space in allocated chunks 622 size_t _allocated_blocks_words; 623 624 // Sum of all allocated chunks 625 size_t _allocated_chunks_words; 626 size_t _allocated_chunks_count; 627 628 // Free lists of blocks are per SpaceManager since they 629 // are assumed to be in chunks in use by the SpaceManager 630 // and all chunks in use by a SpaceManager are freed when 631 // the class loader using the SpaceManager is collected. 632 BlockFreelist _block_freelists; 633 634 // protects virtualspace and chunk expansions 635 static const char* _expand_lock_name; 636 static const int _expand_lock_rank; 637 static Mutex* const _expand_lock; 638 639 private: 640 // Accessors 641 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 642 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 643 _chunks_in_use[index] = v; 644 } 645 646 BlockFreelist* block_freelists() const { 647 return (BlockFreelist*) &_block_freelists; 648 } 649 650 Metaspace::MetadataType mdtype() { return _mdtype; } 651 652 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 653 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 654 655 Metachunk* current_chunk() const { return _current_chunk; } 656 void set_current_chunk(Metachunk* v) { 657 _current_chunk = v; 658 } 659 660 Metachunk* find_current_chunk(size_t word_size); 661 662 // Add chunk to the list of chunks in use 663 void add_chunk(Metachunk* v, bool make_current); 664 void retire_current_chunk(); 665 666 Mutex* lock() const { return _lock; } 667 668 const char* chunk_size_name(ChunkIndex index) const; 669 670 protected: 671 void initialize(); 672 673 public: 674 SpaceManager(Metaspace::MetadataType mdtype, 675 Mutex* lock); 676 ~SpaceManager(); 677 678 enum ChunkMultiples { 679 MediumChunkMultiple = 4 680 }; 681 682 bool is_class() { return _mdtype == Metaspace::ClassType; } 683 684 // Accessors 685 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 686 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 687 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 688 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 689 690 size_t smallest_chunk_size() { return specialized_chunk_size(); } 691 692 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 693 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 694 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 695 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 696 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 697 698 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 699 700 static Mutex* expand_lock() { return _expand_lock; } 701 702 // Increment the per Metaspace and global running sums for Metachunks 703 // by the given size. This is used when a Metachunk to added to 704 // the in-use list. 705 void inc_size_metrics(size_t words); 706 // Increment the per Metaspace and global running sums Metablocks by the given 707 // size. This is used when a Metablock is allocated. 708 void inc_used_metrics(size_t words); 709 // Delete the portion of the running sums for this SpaceManager. That is, 710 // the globals running sums for the Metachunks and Metablocks are 711 // decremented for all the Metachunks in-use by this SpaceManager. 712 void dec_total_from_size_metrics(); 713 714 // Set the sizes for the initial chunks. 715 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 716 size_t* chunk_word_size, 717 size_t* class_chunk_word_size); 718 719 size_t sum_capacity_in_chunks_in_use() const; 720 size_t sum_used_in_chunks_in_use() const; 721 size_t sum_free_in_chunks_in_use() const; 722 size_t sum_waste_in_chunks_in_use() const; 723 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 724 725 size_t sum_count_in_chunks_in_use(); 726 size_t sum_count_in_chunks_in_use(ChunkIndex i); 727 728 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 729 730 // Block allocation and deallocation. 731 // Allocates a block from the current chunk 732 MetaWord* allocate(size_t word_size); 733 // Allocates a block from a small chunk 734 MetaWord* get_small_chunk_and_allocate(size_t word_size); 735 736 // Helper for allocations 737 MetaWord* allocate_work(size_t word_size); 738 739 // Returns a block to the per manager freelist 740 void deallocate(MetaWord* p, size_t word_size); 741 742 // Based on the allocation size and a minimum chunk size, 743 // returned chunk size (for expanding space for chunk allocation). 744 size_t calc_chunk_size(size_t allocation_word_size); 745 746 // Called when an allocation from the current chunk fails. 747 // Gets a new chunk (may require getting a new virtual space), 748 // and allocates from that chunk. 749 MetaWord* grow_and_allocate(size_t word_size); 750 751 // Notify memory usage to MemoryService. 752 void track_metaspace_memory_usage(); 753 754 // debugging support. 755 756 void dump(outputStream* const out) const; 757 void print_on(outputStream* st) const; 758 void locked_print_chunks_in_use_on(outputStream* st) const; 759 760 void verify(); 761 void verify_chunk_size(Metachunk* chunk); 762 #ifdef ASSERT 763 void verify_allocated_blocks_words(); 764 #endif 765 766 size_t get_raw_word_size(size_t word_size) { 767 size_t byte_size = word_size * BytesPerWord; 768 769 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 770 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 771 772 size_t raw_word_size = raw_bytes_size / BytesPerWord; 773 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 774 775 return raw_word_size; 776 } 777 }; 778 779 uint const SpaceManager::_small_chunk_limit = 4; 780 781 const char* SpaceManager::_expand_lock_name = 782 "SpaceManager chunk allocation lock"; 783 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 784 Mutex* const SpaceManager::_expand_lock = 785 new Mutex(SpaceManager::_expand_lock_rank, 786 SpaceManager::_expand_lock_name, 787 Mutex::_allow_vm_block_flag, 788 Monitor::_safepoint_check_never); 789 790 void VirtualSpaceNode::inc_container_count() { 791 assert_lock_strong(SpaceManager::expand_lock()); 792 _container_count++; 793 } 794 795 void VirtualSpaceNode::dec_container_count() { 796 assert_lock_strong(SpaceManager::expand_lock()); 797 _container_count--; 798 } 799 800 #ifdef ASSERT 801 void VirtualSpaceNode::verify_container_count() { 802 assert(_container_count == container_count_slow(), 803 "Inconsistency in container_count _container_count " UINTX_FORMAT 804 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 805 } 806 #endif 807 808 // BlockFreelist methods 809 810 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {} 811 812 BlockFreelist::~BlockFreelist() { 813 delete _dictionary; 814 } 815 816 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 817 Metablock* free_chunk = ::new (p) Metablock(word_size); 818 dictionary()->return_chunk(free_chunk); 819 } 820 821 MetaWord* BlockFreelist::get_block(size_t word_size) { 822 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 823 // Dark matter. Too small for dictionary. 824 return NULL; 825 } 826 827 Metablock* free_block = 828 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 829 if (free_block == NULL) { 830 return NULL; 831 } 832 833 const size_t block_size = free_block->size(); 834 if (block_size > WasteMultiplier * word_size) { 835 return_block((MetaWord*)free_block, block_size); 836 return NULL; 837 } 838 839 MetaWord* new_block = (MetaWord*)free_block; 840 assert(block_size >= word_size, "Incorrect size of block from freelist"); 841 const size_t unused = block_size - word_size; 842 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 843 return_block(new_block + word_size, unused); 844 } 845 846 return new_block; 847 } 848 849 void BlockFreelist::print_on(outputStream* st) const { 850 dictionary()->print_free_lists(st); 851 } 852 853 // VirtualSpaceNode methods 854 855 VirtualSpaceNode::~VirtualSpaceNode() { 856 _rs.release(); 857 #ifdef ASSERT 858 size_t word_size = sizeof(*this) / BytesPerWord; 859 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 860 #endif 861 } 862 863 size_t VirtualSpaceNode::used_words_in_vs() const { 864 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 865 } 866 867 // Space committed in the VirtualSpace 868 size_t VirtualSpaceNode::capacity_words_in_vs() const { 869 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 870 } 871 872 size_t VirtualSpaceNode::free_words_in_vs() const { 873 return pointer_delta(end(), top(), sizeof(MetaWord)); 874 } 875 876 // Allocates the chunk from the virtual space only. 877 // This interface is also used internally for debugging. Not all 878 // chunks removed here are necessarily used for allocation. 879 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 880 // Bottom of the new chunk 881 MetaWord* chunk_limit = top(); 882 assert(chunk_limit != NULL, "Not safe to call this method"); 883 884 // The virtual spaces are always expanded by the 885 // commit granularity to enforce the following condition. 886 // Without this the is_available check will not work correctly. 887 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 888 "The committed memory doesn't match the expanded memory."); 889 890 if (!is_available(chunk_word_size)) { 891 Log(gc, metaspace, freelist) log; 892 log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 893 // Dump some information about the virtual space that is nearly full 894 ResourceMark rm; 895 print_on(log.debug_stream()); 896 return NULL; 897 } 898 899 // Take the space (bump top on the current virtual space). 900 inc_top(chunk_word_size); 901 902 // Initialize the chunk 903 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 904 return result; 905 } 906 907 908 // Expand the virtual space (commit more of the reserved space) 909 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 910 size_t min_bytes = min_words * BytesPerWord; 911 size_t preferred_bytes = preferred_words * BytesPerWord; 912 913 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 914 915 if (uncommitted < min_bytes) { 916 return false; 917 } 918 919 size_t commit = MIN2(preferred_bytes, uncommitted); 920 bool result = virtual_space()->expand_by(commit, false); 921 922 assert(result, "Failed to commit memory"); 923 924 return result; 925 } 926 927 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 928 assert_lock_strong(SpaceManager::expand_lock()); 929 Metachunk* result = take_from_committed(chunk_word_size); 930 if (result != NULL) { 931 inc_container_count(); 932 } 933 return result; 934 } 935 936 bool VirtualSpaceNode::initialize() { 937 938 if (!_rs.is_reserved()) { 939 return false; 940 } 941 942 // These are necessary restriction to make sure that the virtual space always 943 // grows in steps of Metaspace::commit_alignment(). If both base and size are 944 // aligned only the middle alignment of the VirtualSpace is used. 945 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 946 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 947 948 // ReservedSpaces marked as special will have the entire memory 949 // pre-committed. Setting a committed size will make sure that 950 // committed_size and actual_committed_size agrees. 951 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 952 953 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 954 Metaspace::commit_alignment()); 955 if (result) { 956 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 957 "Checking that the pre-committed memory was registered by the VirtualSpace"); 958 959 set_top((MetaWord*)virtual_space()->low()); 960 set_reserved(MemRegion((HeapWord*)_rs.base(), 961 (HeapWord*)(_rs.base() + _rs.size()))); 962 963 assert(reserved()->start() == (HeapWord*) _rs.base(), 964 "Reserved start was not set properly " PTR_FORMAT 965 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 966 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 967 "Reserved size was not set properly " SIZE_FORMAT 968 " != " SIZE_FORMAT, reserved()->word_size(), 969 _rs.size() / BytesPerWord); 970 } 971 972 return result; 973 } 974 975 void VirtualSpaceNode::print_on(outputStream* st) const { 976 size_t used = used_words_in_vs(); 977 size_t capacity = capacity_words_in_vs(); 978 VirtualSpace* vs = virtual_space(); 979 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 980 "[" PTR_FORMAT ", " PTR_FORMAT ", " 981 PTR_FORMAT ", " PTR_FORMAT ")", 982 p2i(vs), capacity / K, 983 capacity == 0 ? 0 : used * 100 / capacity, 984 p2i(bottom()), p2i(top()), p2i(end()), 985 p2i(vs->high_boundary())); 986 } 987 988 #ifdef ASSERT 989 void VirtualSpaceNode::mangle() { 990 size_t word_size = capacity_words_in_vs(); 991 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 992 } 993 #endif // ASSERT 994 995 // VirtualSpaceList methods 996 // Space allocated from the VirtualSpace 997 998 VirtualSpaceList::~VirtualSpaceList() { 999 VirtualSpaceListIterator iter(virtual_space_list()); 1000 while (iter.repeat()) { 1001 VirtualSpaceNode* vsl = iter.get_next(); 1002 delete vsl; 1003 } 1004 } 1005 1006 void VirtualSpaceList::inc_reserved_words(size_t v) { 1007 assert_lock_strong(SpaceManager::expand_lock()); 1008 _reserved_words = _reserved_words + v; 1009 } 1010 void VirtualSpaceList::dec_reserved_words(size_t v) { 1011 assert_lock_strong(SpaceManager::expand_lock()); 1012 _reserved_words = _reserved_words - v; 1013 } 1014 1015 #define assert_committed_below_limit() \ 1016 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1017 "Too much committed memory. Committed: " SIZE_FORMAT \ 1018 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1019 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1020 1021 void VirtualSpaceList::inc_committed_words(size_t v) { 1022 assert_lock_strong(SpaceManager::expand_lock()); 1023 _committed_words = _committed_words + v; 1024 1025 assert_committed_below_limit(); 1026 } 1027 void VirtualSpaceList::dec_committed_words(size_t v) { 1028 assert_lock_strong(SpaceManager::expand_lock()); 1029 _committed_words = _committed_words - v; 1030 1031 assert_committed_below_limit(); 1032 } 1033 1034 void VirtualSpaceList::inc_virtual_space_count() { 1035 assert_lock_strong(SpaceManager::expand_lock()); 1036 _virtual_space_count++; 1037 } 1038 void VirtualSpaceList::dec_virtual_space_count() { 1039 assert_lock_strong(SpaceManager::expand_lock()); 1040 _virtual_space_count--; 1041 } 1042 1043 void ChunkManager::remove_chunk(Metachunk* chunk) { 1044 size_t word_size = chunk->word_size(); 1045 ChunkIndex index = list_index(word_size); 1046 if (index != HumongousIndex) { 1047 free_chunks(index)->remove_chunk(chunk); 1048 } else { 1049 humongous_dictionary()->remove_chunk(chunk); 1050 } 1051 1052 // Chunk is being removed from the chunks free list. 1053 dec_free_chunks_total(chunk->word_size()); 1054 } 1055 1056 // Walk the list of VirtualSpaceNodes and delete 1057 // nodes with a 0 container_count. Remove Metachunks in 1058 // the node from their respective freelists. 1059 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1060 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1061 assert_lock_strong(SpaceManager::expand_lock()); 1062 // Don't use a VirtualSpaceListIterator because this 1063 // list is being changed and a straightforward use of an iterator is not safe. 1064 VirtualSpaceNode* purged_vsl = NULL; 1065 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1066 VirtualSpaceNode* next_vsl = prev_vsl; 1067 while (next_vsl != NULL) { 1068 VirtualSpaceNode* vsl = next_vsl; 1069 DEBUG_ONLY(vsl->verify_container_count();) 1070 next_vsl = vsl->next(); 1071 // Don't free the current virtual space since it will likely 1072 // be needed soon. 1073 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1074 // Unlink it from the list 1075 if (prev_vsl == vsl) { 1076 // This is the case of the current node being the first node. 1077 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1078 set_virtual_space_list(vsl->next()); 1079 } else { 1080 prev_vsl->set_next(vsl->next()); 1081 } 1082 1083 vsl->purge(chunk_manager); 1084 dec_reserved_words(vsl->reserved_words()); 1085 dec_committed_words(vsl->committed_words()); 1086 dec_virtual_space_count(); 1087 purged_vsl = vsl; 1088 delete vsl; 1089 } else { 1090 prev_vsl = vsl; 1091 } 1092 } 1093 #ifdef ASSERT 1094 if (purged_vsl != NULL) { 1095 // List should be stable enough to use an iterator here. 1096 VirtualSpaceListIterator iter(virtual_space_list()); 1097 while (iter.repeat()) { 1098 VirtualSpaceNode* vsl = iter.get_next(); 1099 assert(vsl != purged_vsl, "Purge of vsl failed"); 1100 } 1101 } 1102 #endif 1103 } 1104 1105 1106 // This function looks at the mmap regions in the metaspace without locking. 1107 // The chunks are added with store ordering and not deleted except for at 1108 // unloading time during a safepoint. 1109 bool VirtualSpaceList::contains(const void* ptr) { 1110 // List should be stable enough to use an iterator here because removing virtual 1111 // space nodes is only allowed at a safepoint. 1112 VirtualSpaceListIterator iter(virtual_space_list()); 1113 while (iter.repeat()) { 1114 VirtualSpaceNode* vsn = iter.get_next(); 1115 if (vsn->contains(ptr)) { 1116 return true; 1117 } 1118 } 1119 return false; 1120 } 1121 1122 void VirtualSpaceList::retire_current_virtual_space() { 1123 assert_lock_strong(SpaceManager::expand_lock()); 1124 1125 VirtualSpaceNode* vsn = current_virtual_space(); 1126 1127 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1128 Metaspace::chunk_manager_metadata(); 1129 1130 vsn->retire(cm); 1131 } 1132 1133 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1134 DEBUG_ONLY(verify_container_count();) 1135 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1136 ChunkIndex index = (ChunkIndex)i; 1137 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1138 1139 while (free_words_in_vs() >= chunk_size) { 1140 Metachunk* chunk = get_chunk_vs(chunk_size); 1141 assert(chunk != NULL, "allocation should have been successful"); 1142 1143 chunk_manager->return_chunks(index, chunk); 1144 chunk_manager->inc_free_chunks_total(chunk_size); 1145 } 1146 DEBUG_ONLY(verify_container_count();) 1147 } 1148 assert(free_words_in_vs() == 0, "should be empty now"); 1149 } 1150 1151 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1152 _is_class(false), 1153 _virtual_space_list(NULL), 1154 _current_virtual_space(NULL), 1155 _reserved_words(0), 1156 _committed_words(0), 1157 _virtual_space_count(0) { 1158 MutexLockerEx cl(SpaceManager::expand_lock(), 1159 Mutex::_no_safepoint_check_flag); 1160 create_new_virtual_space(word_size); 1161 } 1162 1163 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1164 _is_class(true), 1165 _virtual_space_list(NULL), 1166 _current_virtual_space(NULL), 1167 _reserved_words(0), 1168 _committed_words(0), 1169 _virtual_space_count(0) { 1170 MutexLockerEx cl(SpaceManager::expand_lock(), 1171 Mutex::_no_safepoint_check_flag); 1172 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1173 bool succeeded = class_entry->initialize(); 1174 if (succeeded) { 1175 link_vs(class_entry); 1176 } 1177 } 1178 1179 size_t VirtualSpaceList::free_bytes() { 1180 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1181 } 1182 1183 // Allocate another meta virtual space and add it to the list. 1184 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1185 assert_lock_strong(SpaceManager::expand_lock()); 1186 1187 if (is_class()) { 1188 assert(false, "We currently don't support more than one VirtualSpace for" 1189 " the compressed class space. The initialization of the" 1190 " CCS uses another code path and should not hit this path."); 1191 return false; 1192 } 1193 1194 if (vs_word_size == 0) { 1195 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1196 return false; 1197 } 1198 1199 // Reserve the space 1200 size_t vs_byte_size = vs_word_size * BytesPerWord; 1201 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1202 1203 // Allocate the meta virtual space and initialize it. 1204 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1205 if (!new_entry->initialize()) { 1206 delete new_entry; 1207 return false; 1208 } else { 1209 assert(new_entry->reserved_words() == vs_word_size, 1210 "Reserved memory size differs from requested memory size"); 1211 // ensure lock-free iteration sees fully initialized node 1212 OrderAccess::storestore(); 1213 link_vs(new_entry); 1214 return true; 1215 } 1216 } 1217 1218 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1219 if (virtual_space_list() == NULL) { 1220 set_virtual_space_list(new_entry); 1221 } else { 1222 current_virtual_space()->set_next(new_entry); 1223 } 1224 set_current_virtual_space(new_entry); 1225 inc_reserved_words(new_entry->reserved_words()); 1226 inc_committed_words(new_entry->committed_words()); 1227 inc_virtual_space_count(); 1228 #ifdef ASSERT 1229 new_entry->mangle(); 1230 #endif 1231 if (log_is_enabled(Trace, gc, metaspace)) { 1232 Log(gc, metaspace) log; 1233 VirtualSpaceNode* vsl = current_virtual_space(); 1234 ResourceMark rm; 1235 vsl->print_on(log.trace_stream()); 1236 } 1237 } 1238 1239 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1240 size_t min_words, 1241 size_t preferred_words) { 1242 size_t before = node->committed_words(); 1243 1244 bool result = node->expand_by(min_words, preferred_words); 1245 1246 size_t after = node->committed_words(); 1247 1248 // after and before can be the same if the memory was pre-committed. 1249 assert(after >= before, "Inconsistency"); 1250 inc_committed_words(after - before); 1251 1252 return result; 1253 } 1254 1255 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1256 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1257 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1258 assert(min_words <= preferred_words, "Invalid arguments"); 1259 1260 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1261 return false; 1262 } 1263 1264 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1265 if (allowed_expansion_words < min_words) { 1266 return false; 1267 } 1268 1269 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1270 1271 // Commit more memory from the the current virtual space. 1272 bool vs_expanded = expand_node_by(current_virtual_space(), 1273 min_words, 1274 max_expansion_words); 1275 if (vs_expanded) { 1276 return true; 1277 } 1278 retire_current_virtual_space(); 1279 1280 // Get another virtual space. 1281 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1282 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1283 1284 if (create_new_virtual_space(grow_vs_words)) { 1285 if (current_virtual_space()->is_pre_committed()) { 1286 // The memory was pre-committed, so we are done here. 1287 assert(min_words <= current_virtual_space()->committed_words(), 1288 "The new VirtualSpace was pre-committed, so it" 1289 "should be large enough to fit the alloc request."); 1290 return true; 1291 } 1292 1293 return expand_node_by(current_virtual_space(), 1294 min_words, 1295 max_expansion_words); 1296 } 1297 1298 return false; 1299 } 1300 1301 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1302 size_t grow_chunks_by_words, 1303 size_t medium_chunk_bunch) { 1304 1305 // Allocate a chunk out of the current virtual space. 1306 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1307 1308 if (next != NULL) { 1309 return next; 1310 } 1311 1312 // The expand amount is currently only determined by the requested sizes 1313 // and not how much committed memory is left in the current virtual space. 1314 1315 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1316 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1317 if (min_word_size >= preferred_word_size) { 1318 // Can happen when humongous chunks are allocated. 1319 preferred_word_size = min_word_size; 1320 } 1321 1322 bool expanded = expand_by(min_word_size, preferred_word_size); 1323 if (expanded) { 1324 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1325 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1326 } 1327 1328 return next; 1329 } 1330 1331 void VirtualSpaceList::print_on(outputStream* st) const { 1332 VirtualSpaceListIterator iter(virtual_space_list()); 1333 while (iter.repeat()) { 1334 VirtualSpaceNode* node = iter.get_next(); 1335 node->print_on(st); 1336 } 1337 } 1338 1339 // MetaspaceGC methods 1340 1341 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1342 // Within the VM operation after the GC the attempt to allocate the metadata 1343 // should succeed. If the GC did not free enough space for the metaspace 1344 // allocation, the HWM is increased so that another virtualspace will be 1345 // allocated for the metadata. With perm gen the increase in the perm 1346 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1347 // metaspace policy uses those as the small and large steps for the HWM. 1348 // 1349 // After the GC the compute_new_size() for MetaspaceGC is called to 1350 // resize the capacity of the metaspaces. The current implementation 1351 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1352 // to resize the Java heap by some GC's. New flags can be implemented 1353 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1354 // free space is desirable in the metaspace capacity to decide how much 1355 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1356 // free space is desirable in the metaspace capacity before decreasing 1357 // the HWM. 1358 1359 // Calculate the amount to increase the high water mark (HWM). 1360 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1361 // another expansion is not requested too soon. If that is not 1362 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1363 // If that is still not enough, expand by the size of the allocation 1364 // plus some. 1365 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1366 size_t min_delta = MinMetaspaceExpansion; 1367 size_t max_delta = MaxMetaspaceExpansion; 1368 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1369 1370 if (delta <= min_delta) { 1371 delta = min_delta; 1372 } else if (delta <= max_delta) { 1373 // Don't want to hit the high water mark on the next 1374 // allocation so make the delta greater than just enough 1375 // for this allocation. 1376 delta = max_delta; 1377 } else { 1378 // This allocation is large but the next ones are probably not 1379 // so increase by the minimum. 1380 delta = delta + min_delta; 1381 } 1382 1383 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1384 1385 return delta; 1386 } 1387 1388 size_t MetaspaceGC::capacity_until_GC() { 1389 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1390 assert(value >= MetaspaceSize, "Not initialized properly?"); 1391 return value; 1392 } 1393 1394 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1395 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1396 1397 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1398 size_t new_value = capacity_until_GC + v; 1399 1400 if (new_value < capacity_until_GC) { 1401 // The addition wrapped around, set new_value to aligned max value. 1402 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1403 } 1404 1405 intptr_t expected = (intptr_t) capacity_until_GC; 1406 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1407 1408 if (expected != actual) { 1409 return false; 1410 } 1411 1412 if (new_cap_until_GC != NULL) { 1413 *new_cap_until_GC = new_value; 1414 } 1415 if (old_cap_until_GC != NULL) { 1416 *old_cap_until_GC = capacity_until_GC; 1417 } 1418 return true; 1419 } 1420 1421 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1422 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1423 1424 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1425 } 1426 1427 void MetaspaceGC::initialize() { 1428 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1429 // we can't do a GC during initialization. 1430 _capacity_until_GC = MaxMetaspaceSize; 1431 } 1432 1433 void MetaspaceGC::post_initialize() { 1434 // Reset the high-water mark once the VM initialization is done. 1435 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1436 } 1437 1438 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1439 // Check if the compressed class space is full. 1440 if (is_class && Metaspace::using_class_space()) { 1441 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1442 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1443 return false; 1444 } 1445 } 1446 1447 // Check if the user has imposed a limit on the metaspace memory. 1448 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1449 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1450 return false; 1451 } 1452 1453 return true; 1454 } 1455 1456 size_t MetaspaceGC::allowed_expansion() { 1457 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1458 size_t capacity_until_gc = capacity_until_GC(); 1459 1460 assert(capacity_until_gc >= committed_bytes, 1461 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1462 capacity_until_gc, committed_bytes); 1463 1464 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1465 size_t left_until_GC = capacity_until_gc - committed_bytes; 1466 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1467 1468 return left_to_commit / BytesPerWord; 1469 } 1470 1471 void MetaspaceGC::compute_new_size() { 1472 assert(_shrink_factor <= 100, "invalid shrink factor"); 1473 uint current_shrink_factor = _shrink_factor; 1474 _shrink_factor = 0; 1475 1476 // Using committed_bytes() for used_after_gc is an overestimation, since the 1477 // chunk free lists are included in committed_bytes() and the memory in an 1478 // un-fragmented chunk free list is available for future allocations. 1479 // However, if the chunk free lists becomes fragmented, then the memory may 1480 // not be available for future allocations and the memory is therefore "in use". 1481 // Including the chunk free lists in the definition of "in use" is therefore 1482 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1483 // shrink below committed_bytes() and this has caused serious bugs in the past. 1484 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1485 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1486 1487 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1488 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1489 1490 const double min_tmp = used_after_gc / maximum_used_percentage; 1491 size_t minimum_desired_capacity = 1492 (size_t)MIN2(min_tmp, double(max_uintx)); 1493 // Don't shrink less than the initial generation size 1494 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1495 MetaspaceSize); 1496 1497 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1498 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1499 minimum_free_percentage, maximum_used_percentage); 1500 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1501 1502 1503 size_t shrink_bytes = 0; 1504 if (capacity_until_GC < minimum_desired_capacity) { 1505 // If we have less capacity below the metaspace HWM, then 1506 // increment the HWM. 1507 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1508 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1509 // Don't expand unless it's significant 1510 if (expand_bytes >= MinMetaspaceExpansion) { 1511 size_t new_capacity_until_GC = 0; 1512 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1513 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1514 1515 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1516 new_capacity_until_GC, 1517 MetaspaceGCThresholdUpdater::ComputeNewSize); 1518 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1519 minimum_desired_capacity / (double) K, 1520 expand_bytes / (double) K, 1521 MinMetaspaceExpansion / (double) K, 1522 new_capacity_until_GC / (double) K); 1523 } 1524 return; 1525 } 1526 1527 // No expansion, now see if we want to shrink 1528 // We would never want to shrink more than this 1529 assert(capacity_until_GC >= minimum_desired_capacity, 1530 SIZE_FORMAT " >= " SIZE_FORMAT, 1531 capacity_until_GC, minimum_desired_capacity); 1532 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1533 1534 // Should shrinking be considered? 1535 if (MaxMetaspaceFreeRatio < 100) { 1536 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1537 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1538 const double max_tmp = used_after_gc / minimum_used_percentage; 1539 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1540 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1541 MetaspaceSize); 1542 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1543 maximum_free_percentage, minimum_used_percentage); 1544 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1545 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1546 1547 assert(minimum_desired_capacity <= maximum_desired_capacity, 1548 "sanity check"); 1549 1550 if (capacity_until_GC > maximum_desired_capacity) { 1551 // Capacity too large, compute shrinking size 1552 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1553 // We don't want shrink all the way back to initSize if people call 1554 // System.gc(), because some programs do that between "phases" and then 1555 // we'd just have to grow the heap up again for the next phase. So we 1556 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1557 // on the third call, and 100% by the fourth call. But if we recompute 1558 // size without shrinking, it goes back to 0%. 1559 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1560 1561 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1562 1563 assert(shrink_bytes <= max_shrink_bytes, 1564 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1565 shrink_bytes, max_shrink_bytes); 1566 if (current_shrink_factor == 0) { 1567 _shrink_factor = 10; 1568 } else { 1569 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1570 } 1571 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1572 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1573 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1574 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1575 } 1576 } 1577 1578 // Don't shrink unless it's significant 1579 if (shrink_bytes >= MinMetaspaceExpansion && 1580 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1581 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1582 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1583 new_capacity_until_GC, 1584 MetaspaceGCThresholdUpdater::ComputeNewSize); 1585 } 1586 } 1587 1588 // Metadebug methods 1589 1590 void Metadebug::init_allocation_fail_alot_count() { 1591 if (MetadataAllocationFailALot) { 1592 _allocation_fail_alot_count = 1593 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1594 } 1595 } 1596 1597 #ifdef ASSERT 1598 bool Metadebug::test_metadata_failure() { 1599 if (MetadataAllocationFailALot && 1600 Threads::is_vm_complete()) { 1601 if (_allocation_fail_alot_count > 0) { 1602 _allocation_fail_alot_count--; 1603 } else { 1604 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1605 init_allocation_fail_alot_count(); 1606 return true; 1607 } 1608 } 1609 return false; 1610 } 1611 #endif 1612 1613 // ChunkManager methods 1614 1615 size_t ChunkManager::free_chunks_total_words() { 1616 return _free_chunks_total; 1617 } 1618 1619 size_t ChunkManager::free_chunks_total_bytes() { 1620 return free_chunks_total_words() * BytesPerWord; 1621 } 1622 1623 size_t ChunkManager::free_chunks_count() { 1624 #ifdef ASSERT 1625 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1626 MutexLockerEx cl(SpaceManager::expand_lock(), 1627 Mutex::_no_safepoint_check_flag); 1628 // This lock is only needed in debug because the verification 1629 // of the _free_chunks_totals walks the list of free chunks 1630 slow_locked_verify_free_chunks_count(); 1631 } 1632 #endif 1633 return _free_chunks_count; 1634 } 1635 1636 void ChunkManager::locked_verify_free_chunks_total() { 1637 assert_lock_strong(SpaceManager::expand_lock()); 1638 assert(sum_free_chunks() == _free_chunks_total, 1639 "_free_chunks_total " SIZE_FORMAT " is not the" 1640 " same as sum " SIZE_FORMAT, _free_chunks_total, 1641 sum_free_chunks()); 1642 } 1643 1644 void ChunkManager::verify_free_chunks_total() { 1645 MutexLockerEx cl(SpaceManager::expand_lock(), 1646 Mutex::_no_safepoint_check_flag); 1647 locked_verify_free_chunks_total(); 1648 } 1649 1650 void ChunkManager::locked_verify_free_chunks_count() { 1651 assert_lock_strong(SpaceManager::expand_lock()); 1652 assert(sum_free_chunks_count() == _free_chunks_count, 1653 "_free_chunks_count " SIZE_FORMAT " is not the" 1654 " same as sum " SIZE_FORMAT, _free_chunks_count, 1655 sum_free_chunks_count()); 1656 } 1657 1658 void ChunkManager::verify_free_chunks_count() { 1659 #ifdef ASSERT 1660 MutexLockerEx cl(SpaceManager::expand_lock(), 1661 Mutex::_no_safepoint_check_flag); 1662 locked_verify_free_chunks_count(); 1663 #endif 1664 } 1665 1666 void ChunkManager::verify() { 1667 MutexLockerEx cl(SpaceManager::expand_lock(), 1668 Mutex::_no_safepoint_check_flag); 1669 locked_verify(); 1670 } 1671 1672 void ChunkManager::locked_verify() { 1673 locked_verify_free_chunks_count(); 1674 locked_verify_free_chunks_total(); 1675 } 1676 1677 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1678 assert_lock_strong(SpaceManager::expand_lock()); 1679 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1680 _free_chunks_total, _free_chunks_count); 1681 } 1682 1683 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1684 assert_lock_strong(SpaceManager::expand_lock()); 1685 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1686 sum_free_chunks(), sum_free_chunks_count()); 1687 } 1688 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1689 return &_free_chunks[index]; 1690 } 1691 1692 // These methods that sum the free chunk lists are used in printing 1693 // methods that are used in product builds. 1694 size_t ChunkManager::sum_free_chunks() { 1695 assert_lock_strong(SpaceManager::expand_lock()); 1696 size_t result = 0; 1697 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1698 ChunkList* list = free_chunks(i); 1699 1700 if (list == NULL) { 1701 continue; 1702 } 1703 1704 result = result + list->count() * list->size(); 1705 } 1706 result = result + humongous_dictionary()->total_size(); 1707 return result; 1708 } 1709 1710 size_t ChunkManager::sum_free_chunks_count() { 1711 assert_lock_strong(SpaceManager::expand_lock()); 1712 size_t count = 0; 1713 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1714 ChunkList* list = free_chunks(i); 1715 if (list == NULL) { 1716 continue; 1717 } 1718 count = count + list->count(); 1719 } 1720 count = count + humongous_dictionary()->total_free_blocks(); 1721 return count; 1722 } 1723 1724 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1725 ChunkIndex index = list_index(word_size); 1726 assert(index < HumongousIndex, "No humongous list"); 1727 return free_chunks(index); 1728 } 1729 1730 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1731 assert_lock_strong(SpaceManager::expand_lock()); 1732 1733 slow_locked_verify(); 1734 1735 Metachunk* chunk = NULL; 1736 if (list_index(word_size) != HumongousIndex) { 1737 ChunkList* free_list = find_free_chunks_list(word_size); 1738 assert(free_list != NULL, "Sanity check"); 1739 1740 chunk = free_list->head(); 1741 1742 if (chunk == NULL) { 1743 return NULL; 1744 } 1745 1746 // Remove the chunk as the head of the list. 1747 free_list->remove_chunk(chunk); 1748 1749 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1750 p2i(free_list), p2i(chunk), chunk->word_size()); 1751 } else { 1752 chunk = humongous_dictionary()->get_chunk( 1753 word_size, 1754 FreeBlockDictionary<Metachunk>::atLeast); 1755 1756 if (chunk == NULL) { 1757 return NULL; 1758 } 1759 1760 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1761 chunk->word_size(), word_size, chunk->word_size() - word_size); 1762 } 1763 1764 // Chunk is being removed from the chunks free list. 1765 dec_free_chunks_total(chunk->word_size()); 1766 1767 // Remove it from the links to this freelist 1768 chunk->set_next(NULL); 1769 chunk->set_prev(NULL); 1770 #ifdef ASSERT 1771 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1772 // work. 1773 chunk->set_is_tagged_free(false); 1774 #endif 1775 chunk->container()->inc_container_count(); 1776 1777 slow_locked_verify(); 1778 return chunk; 1779 } 1780 1781 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1782 assert_lock_strong(SpaceManager::expand_lock()); 1783 slow_locked_verify(); 1784 1785 // Take from the beginning of the list 1786 Metachunk* chunk = free_chunks_get(word_size); 1787 if (chunk == NULL) { 1788 return NULL; 1789 } 1790 1791 assert((word_size <= chunk->word_size()) || 1792 list_index(chunk->word_size() == HumongousIndex), 1793 "Non-humongous variable sized chunk"); 1794 Log(gc, metaspace, freelist) log; 1795 if (log.is_debug()) { 1796 size_t list_count; 1797 if (list_index(word_size) < HumongousIndex) { 1798 ChunkList* list = find_free_chunks_list(word_size); 1799 list_count = list->count(); 1800 } else { 1801 list_count = humongous_dictionary()->total_count(); 1802 } 1803 log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1804 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1805 ResourceMark rm; 1806 locked_print_free_chunks(log.debug_stream()); 1807 } 1808 1809 return chunk; 1810 } 1811 1812 void ChunkManager::print_on(outputStream* out) const { 1813 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 1814 } 1815 1816 // SpaceManager methods 1817 1818 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1819 size_t* chunk_word_size, 1820 size_t* class_chunk_word_size) { 1821 switch (type) { 1822 case Metaspace::BootMetaspaceType: 1823 *chunk_word_size = Metaspace::first_chunk_word_size(); 1824 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1825 break; 1826 case Metaspace::ROMetaspaceType: 1827 *chunk_word_size = SharedReadOnlySize / wordSize; 1828 *class_chunk_word_size = ClassSpecializedChunk; 1829 break; 1830 case Metaspace::ReadWriteMetaspaceType: 1831 *chunk_word_size = SharedReadWriteSize / wordSize; 1832 *class_chunk_word_size = ClassSpecializedChunk; 1833 break; 1834 case Metaspace::AnonymousMetaspaceType: 1835 case Metaspace::ReflectionMetaspaceType: 1836 *chunk_word_size = SpecializedChunk; 1837 *class_chunk_word_size = ClassSpecializedChunk; 1838 break; 1839 default: 1840 *chunk_word_size = SmallChunk; 1841 *class_chunk_word_size = ClassSmallChunk; 1842 break; 1843 } 1844 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1845 "Initial chunks sizes bad: data " SIZE_FORMAT 1846 " class " SIZE_FORMAT, 1847 *chunk_word_size, *class_chunk_word_size); 1848 } 1849 1850 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1851 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1852 size_t free = 0; 1853 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1854 Metachunk* chunk = chunks_in_use(i); 1855 while (chunk != NULL) { 1856 free += chunk->free_word_size(); 1857 chunk = chunk->next(); 1858 } 1859 } 1860 return free; 1861 } 1862 1863 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1864 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1865 size_t result = 0; 1866 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1867 result += sum_waste_in_chunks_in_use(i); 1868 } 1869 1870 return result; 1871 } 1872 1873 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1874 size_t result = 0; 1875 Metachunk* chunk = chunks_in_use(index); 1876 // Count the free space in all the chunk but not the 1877 // current chunk from which allocations are still being done. 1878 while (chunk != NULL) { 1879 if (chunk != current_chunk()) { 1880 result += chunk->free_word_size(); 1881 } 1882 chunk = chunk->next(); 1883 } 1884 return result; 1885 } 1886 1887 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1888 // For CMS use "allocated_chunks_words()" which does not need the 1889 // Metaspace lock. For the other collectors sum over the 1890 // lists. Use both methods as a check that "allocated_chunks_words()" 1891 // is correct. That is, sum_capacity_in_chunks() is too expensive 1892 // to use in the product and allocated_chunks_words() should be used 1893 // but allow for checking that allocated_chunks_words() returns the same 1894 // value as sum_capacity_in_chunks_in_use() which is the definitive 1895 // answer. 1896 if (UseConcMarkSweepGC) { 1897 return allocated_chunks_words(); 1898 } else { 1899 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1900 size_t sum = 0; 1901 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1902 Metachunk* chunk = chunks_in_use(i); 1903 while (chunk != NULL) { 1904 sum += chunk->word_size(); 1905 chunk = chunk->next(); 1906 } 1907 } 1908 return sum; 1909 } 1910 } 1911 1912 size_t SpaceManager::sum_count_in_chunks_in_use() { 1913 size_t count = 0; 1914 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1915 count = count + sum_count_in_chunks_in_use(i); 1916 } 1917 1918 return count; 1919 } 1920 1921 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 1922 size_t count = 0; 1923 Metachunk* chunk = chunks_in_use(i); 1924 while (chunk != NULL) { 1925 count++; 1926 chunk = chunk->next(); 1927 } 1928 return count; 1929 } 1930 1931 1932 size_t SpaceManager::sum_used_in_chunks_in_use() const { 1933 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1934 size_t used = 0; 1935 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1936 Metachunk* chunk = chunks_in_use(i); 1937 while (chunk != NULL) { 1938 used += chunk->used_word_size(); 1939 chunk = chunk->next(); 1940 } 1941 } 1942 return used; 1943 } 1944 1945 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 1946 1947 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1948 Metachunk* chunk = chunks_in_use(i); 1949 st->print("SpaceManager: %s " PTR_FORMAT, 1950 chunk_size_name(i), p2i(chunk)); 1951 if (chunk != NULL) { 1952 st->print_cr(" free " SIZE_FORMAT, 1953 chunk->free_word_size()); 1954 } else { 1955 st->cr(); 1956 } 1957 } 1958 1959 chunk_manager()->locked_print_free_chunks(st); 1960 chunk_manager()->locked_print_sum_free_chunks(st); 1961 } 1962 1963 size_t SpaceManager::calc_chunk_size(size_t word_size) { 1964 1965 // Decide between a small chunk and a medium chunk. Up to 1966 // _small_chunk_limit small chunks can be allocated. 1967 // After that a medium chunk is preferred. 1968 size_t chunk_word_size; 1969 if (chunks_in_use(MediumIndex) == NULL && 1970 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 1971 chunk_word_size = (size_t) small_chunk_size(); 1972 if (word_size + Metachunk::overhead() > small_chunk_size()) { 1973 chunk_word_size = medium_chunk_size(); 1974 } 1975 } else { 1976 chunk_word_size = medium_chunk_size(); 1977 } 1978 1979 // Might still need a humongous chunk. Enforce 1980 // humongous allocations sizes to be aligned up to 1981 // the smallest chunk size. 1982 size_t if_humongous_sized_chunk = 1983 align_size_up(word_size + Metachunk::overhead(), 1984 smallest_chunk_size()); 1985 chunk_word_size = 1986 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 1987 1988 assert(!SpaceManager::is_humongous(word_size) || 1989 chunk_word_size == if_humongous_sized_chunk, 1990 "Size calculation is wrong, word_size " SIZE_FORMAT 1991 " chunk_word_size " SIZE_FORMAT, 1992 word_size, chunk_word_size); 1993 Log(gc, metaspace, alloc) log; 1994 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 1995 log.debug("Metadata humongous allocation:"); 1996 log.debug(" word_size " PTR_FORMAT, word_size); 1997 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 1998 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 1999 } 2000 return chunk_word_size; 2001 } 2002 2003 void SpaceManager::track_metaspace_memory_usage() { 2004 if (is_init_completed()) { 2005 if (is_class()) { 2006 MemoryService::track_compressed_class_memory_usage(); 2007 } 2008 MemoryService::track_metaspace_memory_usage(); 2009 } 2010 } 2011 2012 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2013 assert(vs_list()->current_virtual_space() != NULL, 2014 "Should have been set"); 2015 assert(current_chunk() == NULL || 2016 current_chunk()->allocate(word_size) == NULL, 2017 "Don't need to expand"); 2018 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2019 2020 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2021 size_t words_left = 0; 2022 size_t words_used = 0; 2023 if (current_chunk() != NULL) { 2024 words_left = current_chunk()->free_word_size(); 2025 words_used = current_chunk()->used_word_size(); 2026 } 2027 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2028 word_size, words_used, words_left); 2029 } 2030 2031 // Get another chunk 2032 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2033 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2034 2035 MetaWord* mem = NULL; 2036 2037 // If a chunk was available, add it to the in-use chunk list 2038 // and do an allocation from it. 2039 if (next != NULL) { 2040 // Add to this manager's list of chunks in use. 2041 add_chunk(next, false); 2042 mem = next->allocate(word_size); 2043 } 2044 2045 // Track metaspace memory usage statistic. 2046 track_metaspace_memory_usage(); 2047 2048 return mem; 2049 } 2050 2051 void SpaceManager::print_on(outputStream* st) const { 2052 2053 for (ChunkIndex i = ZeroIndex; 2054 i < NumberOfInUseLists ; 2055 i = next_chunk_index(i) ) { 2056 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2057 p2i(chunks_in_use(i)), 2058 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2059 } 2060 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2061 " Humongous " SIZE_FORMAT, 2062 sum_waste_in_chunks_in_use(SmallIndex), 2063 sum_waste_in_chunks_in_use(MediumIndex), 2064 sum_waste_in_chunks_in_use(HumongousIndex)); 2065 // block free lists 2066 if (block_freelists() != NULL) { 2067 st->print_cr("total in block free lists " SIZE_FORMAT, 2068 block_freelists()->total_size()); 2069 } 2070 } 2071 2072 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2073 Mutex* lock) : 2074 _mdtype(mdtype), 2075 _allocated_blocks_words(0), 2076 _allocated_chunks_words(0), 2077 _allocated_chunks_count(0), 2078 _lock(lock) 2079 { 2080 initialize(); 2081 } 2082 2083 void SpaceManager::inc_size_metrics(size_t words) { 2084 assert_lock_strong(SpaceManager::expand_lock()); 2085 // Total of allocated Metachunks and allocated Metachunks count 2086 // for each SpaceManager 2087 _allocated_chunks_words = _allocated_chunks_words + words; 2088 _allocated_chunks_count++; 2089 // Global total of capacity in allocated Metachunks 2090 MetaspaceAux::inc_capacity(mdtype(), words); 2091 // Global total of allocated Metablocks. 2092 // used_words_slow() includes the overhead in each 2093 // Metachunk so include it in the used when the 2094 // Metachunk is first added (so only added once per 2095 // Metachunk). 2096 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2097 } 2098 2099 void SpaceManager::inc_used_metrics(size_t words) { 2100 // Add to the per SpaceManager total 2101 Atomic::add_ptr(words, &_allocated_blocks_words); 2102 // Add to the global total 2103 MetaspaceAux::inc_used(mdtype(), words); 2104 } 2105 2106 void SpaceManager::dec_total_from_size_metrics() { 2107 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2108 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2109 // Also deduct the overhead per Metachunk 2110 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2111 } 2112 2113 void SpaceManager::initialize() { 2114 Metadebug::init_allocation_fail_alot_count(); 2115 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2116 _chunks_in_use[i] = NULL; 2117 } 2118 _current_chunk = NULL; 2119 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2120 } 2121 2122 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2123 if (chunks == NULL) { 2124 return; 2125 } 2126 ChunkList* list = free_chunks(index); 2127 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2128 assert_lock_strong(SpaceManager::expand_lock()); 2129 Metachunk* cur = chunks; 2130 2131 // This returns chunks one at a time. If a new 2132 // class List can be created that is a base class 2133 // of FreeList then something like FreeList::prepend() 2134 // can be used in place of this loop 2135 while (cur != NULL) { 2136 assert(cur->container() != NULL, "Container should have been set"); 2137 cur->container()->dec_container_count(); 2138 // Capture the next link before it is changed 2139 // by the call to return_chunk_at_head(); 2140 Metachunk* next = cur->next(); 2141 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2142 NOT_PRODUCT(cur->mangle(badMetaWordVal);) 2143 list->return_chunk_at_head(cur); 2144 cur = next; 2145 } 2146 } 2147 2148 SpaceManager::~SpaceManager() { 2149 // This call this->_lock which can't be done while holding expand_lock() 2150 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2151 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2152 " allocated_chunks_words() " SIZE_FORMAT, 2153 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2154 2155 MutexLockerEx fcl(SpaceManager::expand_lock(), 2156 Mutex::_no_safepoint_check_flag); 2157 2158 chunk_manager()->slow_locked_verify(); 2159 2160 dec_total_from_size_metrics(); 2161 2162 Log(gc, metaspace, freelist) log; 2163 if (log.is_trace()) { 2164 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2165 ResourceMark rm; 2166 locked_print_chunks_in_use_on(log.trace_stream()); 2167 block_freelists()->print_on(log.trace_stream()); 2168 } 2169 2170 // Have to update before the chunks_in_use lists are emptied 2171 // below. 2172 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2173 sum_count_in_chunks_in_use()); 2174 2175 // Add all the chunks in use by this space manager 2176 // to the global list of free chunks. 2177 2178 // Follow each list of chunks-in-use and add them to the 2179 // free lists. Each list is NULL terminated. 2180 2181 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2182 log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i)); 2183 Metachunk* chunks = chunks_in_use(i); 2184 chunk_manager()->return_chunks(i, chunks); 2185 set_chunks_in_use(i, NULL); 2186 log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i)); 2187 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2188 } 2189 2190 // The medium chunk case may be optimized by passing the head and 2191 // tail of the medium chunk list to add_at_head(). The tail is often 2192 // the current chunk but there are probably exceptions. 2193 2194 // Humongous chunks 2195 log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary", 2196 sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex)); 2197 log.trace("Humongous chunk dictionary: "); 2198 // Humongous chunks are never the current chunk. 2199 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2200 2201 while (humongous_chunks != NULL) { 2202 DEBUG_ONLY(humongous_chunks->set_is_tagged_free(true);) 2203 NOT_PRODUCT(humongous_chunks->mangle(badMetaWordVal);) 2204 log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size()); 2205 assert(humongous_chunks->word_size() == (size_t) 2206 align_size_up(humongous_chunks->word_size(), 2207 smallest_chunk_size()), 2208 "Humongous chunk size is wrong: word size " SIZE_FORMAT 2209 " granularity " SIZE_FORMAT, 2210 humongous_chunks->word_size(), smallest_chunk_size()); 2211 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2212 humongous_chunks->container()->dec_container_count(); 2213 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2214 humongous_chunks = next_humongous_chunks; 2215 } 2216 log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); 2217 chunk_manager()->slow_locked_verify(); 2218 } 2219 2220 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2221 switch (index) { 2222 case SpecializedIndex: 2223 return "Specialized"; 2224 case SmallIndex: 2225 return "Small"; 2226 case MediumIndex: 2227 return "Medium"; 2228 case HumongousIndex: 2229 return "Humongous"; 2230 default: 2231 return NULL; 2232 } 2233 } 2234 2235 ChunkIndex ChunkManager::list_index(size_t size) { 2236 switch (size) { 2237 case SpecializedChunk: 2238 assert(SpecializedChunk == ClassSpecializedChunk, 2239 "Need branch for ClassSpecializedChunk"); 2240 return SpecializedIndex; 2241 case SmallChunk: 2242 case ClassSmallChunk: 2243 return SmallIndex; 2244 case MediumChunk: 2245 case ClassMediumChunk: 2246 return MediumIndex; 2247 default: 2248 assert(size > MediumChunk || size > ClassMediumChunk, 2249 "Not a humongous chunk"); 2250 return HumongousIndex; 2251 } 2252 } 2253 2254 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2255 assert_lock_strong(_lock); 2256 size_t raw_word_size = get_raw_word_size(word_size); 2257 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2258 assert(raw_word_size >= min_size, 2259 "Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size); 2260 block_freelists()->return_block(p, raw_word_size); 2261 } 2262 2263 // Adds a chunk to the list of chunks in use. 2264 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2265 2266 assert(new_chunk != NULL, "Should not be NULL"); 2267 assert(new_chunk->next() == NULL, "Should not be on a list"); 2268 2269 new_chunk->reset_empty(); 2270 2271 // Find the correct list and and set the current 2272 // chunk for that list. 2273 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2274 2275 if (index != HumongousIndex) { 2276 retire_current_chunk(); 2277 set_current_chunk(new_chunk); 2278 new_chunk->set_next(chunks_in_use(index)); 2279 set_chunks_in_use(index, new_chunk); 2280 } else { 2281 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2282 // small, so small will be null. Link this first chunk as the current 2283 // chunk. 2284 if (make_current) { 2285 // Set as the current chunk but otherwise treat as a humongous chunk. 2286 set_current_chunk(new_chunk); 2287 } 2288 // Link at head. The _current_chunk only points to a humongous chunk for 2289 // the null class loader metaspace (class and data virtual space managers) 2290 // any humongous chunks so will not point to the tail 2291 // of the humongous chunks list. 2292 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2293 set_chunks_in_use(HumongousIndex, new_chunk); 2294 2295 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2296 } 2297 2298 // Add to the running sum of capacity 2299 inc_size_metrics(new_chunk->word_size()); 2300 2301 assert(new_chunk->is_empty(), "Not ready for reuse"); 2302 Log(gc, metaspace, freelist) log; 2303 if (log.is_trace()) { 2304 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2305 ResourceMark rm; 2306 outputStream* out = log.trace_stream(); 2307 new_chunk->print_on(out); 2308 chunk_manager()->locked_print_free_chunks(out); 2309 } 2310 } 2311 2312 void SpaceManager::retire_current_chunk() { 2313 if (current_chunk() != NULL) { 2314 size_t remaining_words = current_chunk()->free_word_size(); 2315 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2316 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2317 inc_used_metrics(remaining_words); 2318 } 2319 } 2320 } 2321 2322 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2323 size_t grow_chunks_by_words) { 2324 // Get a chunk from the chunk freelist 2325 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2326 2327 if (next == NULL) { 2328 next = vs_list()->get_new_chunk(word_size, 2329 grow_chunks_by_words, 2330 medium_chunk_bunch()); 2331 } 2332 2333 Log(gc, metaspace, alloc) log; 2334 if (log.is_debug() && next != NULL && 2335 SpaceManager::is_humongous(next->word_size())) { 2336 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2337 } 2338 2339 return next; 2340 } 2341 2342 /* 2343 * The policy is to allocate up to _small_chunk_limit small chunks 2344 * after which only medium chunks are allocated. This is done to 2345 * reduce fragmentation. In some cases, this can result in a lot 2346 * of small chunks being allocated to the point where it's not 2347 * possible to expand. If this happens, there may be no medium chunks 2348 * available and OOME would be thrown. Instead of doing that, 2349 * if the allocation request size fits in a small chunk, an attempt 2350 * will be made to allocate a small chunk. 2351 */ 2352 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2353 size_t raw_word_size = get_raw_word_size(word_size); 2354 2355 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2356 return NULL; 2357 } 2358 2359 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2360 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2361 2362 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2363 2364 MetaWord* mem = NULL; 2365 2366 if (chunk != NULL) { 2367 // Add chunk to the in-use chunk list and do an allocation from it. 2368 // Add to this manager's list of chunks in use. 2369 add_chunk(chunk, false); 2370 mem = chunk->allocate(raw_word_size); 2371 2372 inc_used_metrics(raw_word_size); 2373 2374 // Track metaspace memory usage statistic. 2375 track_metaspace_memory_usage(); 2376 } 2377 2378 return mem; 2379 } 2380 2381 MetaWord* SpaceManager::allocate(size_t word_size) { 2382 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2383 2384 size_t raw_word_size = get_raw_word_size(word_size); 2385 BlockFreelist* fl = block_freelists(); 2386 MetaWord* p = NULL; 2387 // Allocation from the dictionary is expensive in the sense that 2388 // the dictionary has to be searched for a size. Don't allocate 2389 // from the dictionary until it starts to get fat. Is this 2390 // a reasonable policy? Maybe an skinny dictionary is fast enough 2391 // for allocations. Do some profiling. JJJ 2392 if (fl->total_size() > allocation_from_dictionary_limit) { 2393 p = fl->get_block(raw_word_size); 2394 } 2395 if (p == NULL) { 2396 p = allocate_work(raw_word_size); 2397 } 2398 2399 return p; 2400 } 2401 2402 // Returns the address of spaced allocated for "word_size". 2403 // This methods does not know about blocks (Metablocks) 2404 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2405 assert_lock_strong(_lock); 2406 #ifdef ASSERT 2407 if (Metadebug::test_metadata_failure()) { 2408 return NULL; 2409 } 2410 #endif 2411 // Is there space in the current chunk? 2412 MetaWord* result = NULL; 2413 2414 // For DumpSharedSpaces, only allocate out of the current chunk which is 2415 // never null because we gave it the size we wanted. Caller reports out 2416 // of memory if this returns null. 2417 if (DumpSharedSpaces) { 2418 assert(current_chunk() != NULL, "should never happen"); 2419 inc_used_metrics(word_size); 2420 return current_chunk()->allocate(word_size); // caller handles null result 2421 } 2422 2423 if (current_chunk() != NULL) { 2424 result = current_chunk()->allocate(word_size); 2425 } 2426 2427 if (result == NULL) { 2428 result = grow_and_allocate(word_size); 2429 } 2430 2431 if (result != NULL) { 2432 inc_used_metrics(word_size); 2433 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2434 "Head of the list is being allocated"); 2435 } 2436 2437 return result; 2438 } 2439 2440 void SpaceManager::verify() { 2441 // If there are blocks in the dictionary, then 2442 // verification of chunks does not work since 2443 // being in the dictionary alters a chunk. 2444 if (block_freelists()->total_size() == 0) { 2445 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2446 Metachunk* curr = chunks_in_use(i); 2447 while (curr != NULL) { 2448 curr->verify(); 2449 verify_chunk_size(curr); 2450 curr = curr->next(); 2451 } 2452 } 2453 } 2454 } 2455 2456 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2457 assert(is_humongous(chunk->word_size()) || 2458 chunk->word_size() == medium_chunk_size() || 2459 chunk->word_size() == small_chunk_size() || 2460 chunk->word_size() == specialized_chunk_size(), 2461 "Chunk size is wrong"); 2462 return; 2463 } 2464 2465 #ifdef ASSERT 2466 void SpaceManager::verify_allocated_blocks_words() { 2467 // Verification is only guaranteed at a safepoint. 2468 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2469 "Verification can fail if the applications is running"); 2470 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2471 "allocation total is not consistent " SIZE_FORMAT 2472 " vs " SIZE_FORMAT, 2473 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2474 } 2475 2476 #endif 2477 2478 void SpaceManager::dump(outputStream* const out) const { 2479 size_t curr_total = 0; 2480 size_t waste = 0; 2481 uint i = 0; 2482 size_t used = 0; 2483 size_t capacity = 0; 2484 2485 // Add up statistics for all chunks in this SpaceManager. 2486 for (ChunkIndex index = ZeroIndex; 2487 index < NumberOfInUseLists; 2488 index = next_chunk_index(index)) { 2489 for (Metachunk* curr = chunks_in_use(index); 2490 curr != NULL; 2491 curr = curr->next()) { 2492 out->print("%d) ", i++); 2493 curr->print_on(out); 2494 curr_total += curr->word_size(); 2495 used += curr->used_word_size(); 2496 capacity += curr->word_size(); 2497 waste += curr->free_word_size() + curr->overhead();; 2498 } 2499 } 2500 2501 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2502 block_freelists()->print_on(out); 2503 } 2504 2505 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2506 // Free space isn't wasted. 2507 waste -= free; 2508 2509 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2510 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2511 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2512 } 2513 2514 // MetaspaceAux 2515 2516 2517 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2518 size_t MetaspaceAux::_used_words[] = {0, 0}; 2519 2520 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2521 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2522 return list == NULL ? 0 : list->free_bytes(); 2523 } 2524 2525 size_t MetaspaceAux::free_bytes() { 2526 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2527 } 2528 2529 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2530 assert_lock_strong(SpaceManager::expand_lock()); 2531 assert(words <= capacity_words(mdtype), 2532 "About to decrement below 0: words " SIZE_FORMAT 2533 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2534 words, mdtype, capacity_words(mdtype)); 2535 _capacity_words[mdtype] -= words; 2536 } 2537 2538 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2539 assert_lock_strong(SpaceManager::expand_lock()); 2540 // Needs to be atomic 2541 _capacity_words[mdtype] += words; 2542 } 2543 2544 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2545 assert(words <= used_words(mdtype), 2546 "About to decrement below 0: words " SIZE_FORMAT 2547 " is greater than _used_words[%u] " SIZE_FORMAT, 2548 words, mdtype, used_words(mdtype)); 2549 // For CMS deallocation of the Metaspaces occurs during the 2550 // sweep which is a concurrent phase. Protection by the expand_lock() 2551 // is not enough since allocation is on a per Metaspace basis 2552 // and protected by the Metaspace lock. 2553 jlong minus_words = (jlong) - (jlong) words; 2554 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2555 } 2556 2557 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2558 // _used_words tracks allocations for 2559 // each piece of metadata. Those allocations are 2560 // generally done concurrently by different application 2561 // threads so must be done atomically. 2562 Atomic::add_ptr(words, &_used_words[mdtype]); 2563 } 2564 2565 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2566 size_t used = 0; 2567 ClassLoaderDataGraphMetaspaceIterator iter; 2568 while (iter.repeat()) { 2569 Metaspace* msp = iter.get_next(); 2570 // Sum allocated_blocks_words for each metaspace 2571 if (msp != NULL) { 2572 used += msp->used_words_slow(mdtype); 2573 } 2574 } 2575 return used * BytesPerWord; 2576 } 2577 2578 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2579 size_t free = 0; 2580 ClassLoaderDataGraphMetaspaceIterator iter; 2581 while (iter.repeat()) { 2582 Metaspace* msp = iter.get_next(); 2583 if (msp != NULL) { 2584 free += msp->free_words_slow(mdtype); 2585 } 2586 } 2587 return free * BytesPerWord; 2588 } 2589 2590 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2591 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2592 return 0; 2593 } 2594 // Don't count the space in the freelists. That space will be 2595 // added to the capacity calculation as needed. 2596 size_t capacity = 0; 2597 ClassLoaderDataGraphMetaspaceIterator iter; 2598 while (iter.repeat()) { 2599 Metaspace* msp = iter.get_next(); 2600 if (msp != NULL) { 2601 capacity += msp->capacity_words_slow(mdtype); 2602 } 2603 } 2604 return capacity * BytesPerWord; 2605 } 2606 2607 size_t MetaspaceAux::capacity_bytes_slow() { 2608 #ifdef PRODUCT 2609 // Use capacity_bytes() in PRODUCT instead of this function. 2610 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2611 #endif 2612 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2613 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2614 assert(capacity_bytes() == class_capacity + non_class_capacity, 2615 "bad accounting: capacity_bytes() " SIZE_FORMAT 2616 " class_capacity + non_class_capacity " SIZE_FORMAT 2617 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2618 capacity_bytes(), class_capacity + non_class_capacity, 2619 class_capacity, non_class_capacity); 2620 2621 return class_capacity + non_class_capacity; 2622 } 2623 2624 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2625 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2626 return list == NULL ? 0 : list->reserved_bytes(); 2627 } 2628 2629 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2630 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2631 return list == NULL ? 0 : list->committed_bytes(); 2632 } 2633 2634 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2635 2636 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2637 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2638 if (chunk_manager == NULL) { 2639 return 0; 2640 } 2641 chunk_manager->slow_verify(); 2642 return chunk_manager->free_chunks_total_words(); 2643 } 2644 2645 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2646 return free_chunks_total_words(mdtype) * BytesPerWord; 2647 } 2648 2649 size_t MetaspaceAux::free_chunks_total_words() { 2650 return free_chunks_total_words(Metaspace::ClassType) + 2651 free_chunks_total_words(Metaspace::NonClassType); 2652 } 2653 2654 size_t MetaspaceAux::free_chunks_total_bytes() { 2655 return free_chunks_total_words() * BytesPerWord; 2656 } 2657 2658 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2659 return Metaspace::get_chunk_manager(mdtype) != NULL; 2660 } 2661 2662 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2663 if (!has_chunk_free_list(mdtype)) { 2664 return MetaspaceChunkFreeListSummary(); 2665 } 2666 2667 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2668 return cm->chunk_free_list_summary(); 2669 } 2670 2671 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2672 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2673 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2674 } 2675 2676 void MetaspaceAux::print_on(outputStream* out) { 2677 Metaspace::MetadataType nct = Metaspace::NonClassType; 2678 2679 out->print_cr(" Metaspace " 2680 "used " SIZE_FORMAT "K, " 2681 "capacity " SIZE_FORMAT "K, " 2682 "committed " SIZE_FORMAT "K, " 2683 "reserved " SIZE_FORMAT "K", 2684 used_bytes()/K, 2685 capacity_bytes()/K, 2686 committed_bytes()/K, 2687 reserved_bytes()/K); 2688 2689 if (Metaspace::using_class_space()) { 2690 Metaspace::MetadataType ct = Metaspace::ClassType; 2691 out->print_cr(" class space " 2692 "used " SIZE_FORMAT "K, " 2693 "capacity " SIZE_FORMAT "K, " 2694 "committed " SIZE_FORMAT "K, " 2695 "reserved " SIZE_FORMAT "K", 2696 used_bytes(ct)/K, 2697 capacity_bytes(ct)/K, 2698 committed_bytes(ct)/K, 2699 reserved_bytes(ct)/K); 2700 } 2701 } 2702 2703 // Print information for class space and data space separately. 2704 // This is almost the same as above. 2705 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2706 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2707 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2708 size_t used_bytes = used_bytes_slow(mdtype); 2709 size_t free_bytes = free_bytes_slow(mdtype); 2710 size_t used_and_free = used_bytes + free_bytes + 2711 free_chunks_capacity_bytes; 2712 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2713 "K + unused in chunks " SIZE_FORMAT "K + " 2714 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2715 "K capacity in allocated chunks " SIZE_FORMAT "K", 2716 used_bytes / K, 2717 free_bytes / K, 2718 free_chunks_capacity_bytes / K, 2719 used_and_free / K, 2720 capacity_bytes / K); 2721 // Accounting can only be correct if we got the values during a safepoint 2722 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2723 } 2724 2725 // Print total fragmentation for class metaspaces 2726 void MetaspaceAux::print_class_waste(outputStream* out) { 2727 assert(Metaspace::using_class_space(), "class metaspace not used"); 2728 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2729 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2730 ClassLoaderDataGraphMetaspaceIterator iter; 2731 while (iter.repeat()) { 2732 Metaspace* msp = iter.get_next(); 2733 if (msp != NULL) { 2734 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2735 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2736 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2737 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2738 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2739 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2740 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2741 } 2742 } 2743 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2744 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2745 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2746 "large count " SIZE_FORMAT, 2747 cls_specialized_count, cls_specialized_waste, 2748 cls_small_count, cls_small_waste, 2749 cls_medium_count, cls_medium_waste, cls_humongous_count); 2750 } 2751 2752 // Print total fragmentation for data and class metaspaces separately 2753 void MetaspaceAux::print_waste(outputStream* out) { 2754 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2755 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2756 2757 ClassLoaderDataGraphMetaspaceIterator iter; 2758 while (iter.repeat()) { 2759 Metaspace* msp = iter.get_next(); 2760 if (msp != NULL) { 2761 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2762 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2763 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2764 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2765 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2766 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2767 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2768 } 2769 } 2770 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2771 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2772 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2773 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2774 "large count " SIZE_FORMAT, 2775 specialized_count, specialized_waste, small_count, 2776 small_waste, medium_count, medium_waste, humongous_count); 2777 if (Metaspace::using_class_space()) { 2778 print_class_waste(out); 2779 } 2780 } 2781 2782 // Dump global metaspace things from the end of ClassLoaderDataGraph 2783 void MetaspaceAux::dump(outputStream* out) { 2784 out->print_cr("All Metaspace:"); 2785 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2786 out->print("class space: "); print_on(out, Metaspace::ClassType); 2787 print_waste(out); 2788 } 2789 2790 void MetaspaceAux::verify_free_chunks() { 2791 Metaspace::chunk_manager_metadata()->verify(); 2792 if (Metaspace::using_class_space()) { 2793 Metaspace::chunk_manager_class()->verify(); 2794 } 2795 } 2796 2797 void MetaspaceAux::verify_capacity() { 2798 #ifdef ASSERT 2799 size_t running_sum_capacity_bytes = capacity_bytes(); 2800 // For purposes of the running sum of capacity, verify against capacity 2801 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2802 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2803 "capacity_words() * BytesPerWord " SIZE_FORMAT 2804 " capacity_bytes_slow()" SIZE_FORMAT, 2805 running_sum_capacity_bytes, capacity_in_use_bytes); 2806 for (Metaspace::MetadataType i = Metaspace::ClassType; 2807 i < Metaspace:: MetadataTypeCount; 2808 i = (Metaspace::MetadataType)(i + 1)) { 2809 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2810 assert(capacity_bytes(i) == capacity_in_use_bytes, 2811 "capacity_bytes(%u) " SIZE_FORMAT 2812 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2813 i, capacity_bytes(i), i, capacity_in_use_bytes); 2814 } 2815 #endif 2816 } 2817 2818 void MetaspaceAux::verify_used() { 2819 #ifdef ASSERT 2820 size_t running_sum_used_bytes = used_bytes(); 2821 // For purposes of the running sum of used, verify against used 2822 size_t used_in_use_bytes = used_bytes_slow(); 2823 assert(used_bytes() == used_in_use_bytes, 2824 "used_bytes() " SIZE_FORMAT 2825 " used_bytes_slow()" SIZE_FORMAT, 2826 used_bytes(), used_in_use_bytes); 2827 for (Metaspace::MetadataType i = Metaspace::ClassType; 2828 i < Metaspace:: MetadataTypeCount; 2829 i = (Metaspace::MetadataType)(i + 1)) { 2830 size_t used_in_use_bytes = used_bytes_slow(i); 2831 assert(used_bytes(i) == used_in_use_bytes, 2832 "used_bytes(%u) " SIZE_FORMAT 2833 " used_bytes_slow(%u)" SIZE_FORMAT, 2834 i, used_bytes(i), i, used_in_use_bytes); 2835 } 2836 #endif 2837 } 2838 2839 void MetaspaceAux::verify_metrics() { 2840 verify_capacity(); 2841 verify_used(); 2842 } 2843 2844 2845 // Metaspace methods 2846 2847 size_t Metaspace::_first_chunk_word_size = 0; 2848 size_t Metaspace::_first_class_chunk_word_size = 0; 2849 2850 size_t Metaspace::_commit_alignment = 0; 2851 size_t Metaspace::_reserve_alignment = 0; 2852 2853 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2854 initialize(lock, type); 2855 } 2856 2857 Metaspace::~Metaspace() { 2858 delete _vsm; 2859 if (using_class_space()) { 2860 delete _class_vsm; 2861 } 2862 } 2863 2864 VirtualSpaceList* Metaspace::_space_list = NULL; 2865 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2866 2867 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2868 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2869 2870 #define VIRTUALSPACEMULTIPLIER 2 2871 2872 #ifdef _LP64 2873 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2874 2875 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2876 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2877 // narrow_klass_base is the lower of the metaspace base and the cds base 2878 // (if cds is enabled). The narrow_klass_shift depends on the distance 2879 // between the lower base and higher address. 2880 address lower_base; 2881 address higher_address; 2882 #if INCLUDE_CDS 2883 if (UseSharedSpaces) { 2884 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2885 (address)(metaspace_base + compressed_class_space_size())); 2886 lower_base = MIN2(metaspace_base, cds_base); 2887 } else 2888 #endif 2889 { 2890 higher_address = metaspace_base + compressed_class_space_size(); 2891 lower_base = metaspace_base; 2892 2893 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2894 // If compressed class space fits in lower 32G, we don't need a base. 2895 if (higher_address <= (address)klass_encoding_max) { 2896 lower_base = 0; // Effectively lower base is zero. 2897 } 2898 } 2899 2900 Universe::set_narrow_klass_base(lower_base); 2901 2902 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 2903 Universe::set_narrow_klass_shift(0); 2904 } else { 2905 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2906 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2907 } 2908 } 2909 2910 #if INCLUDE_CDS 2911 // Return TRUE if the specified metaspace_base and cds_base are close enough 2912 // to work with compressed klass pointers. 2913 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2914 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2915 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2916 address lower_base = MIN2((address)metaspace_base, cds_base); 2917 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2918 (address)(metaspace_base + compressed_class_space_size())); 2919 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 2920 } 2921 #endif 2922 2923 // Try to allocate the metaspace at the requested addr. 2924 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2925 assert(using_class_space(), "called improperly"); 2926 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2927 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 2928 "Metaspace size is too big"); 2929 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 2930 assert_is_ptr_aligned(cds_base, _reserve_alignment); 2931 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 2932 2933 // Don't use large pages for the class space. 2934 bool large_pages = false; 2935 2936 #if !(defined(AARCH64) || defined(AIX)) 2937 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 2938 _reserve_alignment, 2939 large_pages, 2940 requested_addr); 2941 #else // AARCH64 2942 ReservedSpace metaspace_rs; 2943 2944 // Our compressed klass pointers may fit nicely into the lower 32 2945 // bits. 2946 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 2947 metaspace_rs = ReservedSpace(compressed_class_space_size(), 2948 _reserve_alignment, 2949 large_pages, 2950 requested_addr); 2951 } 2952 2953 if (! metaspace_rs.is_reserved()) { 2954 // Aarch64: Try to align metaspace so that we can decode a compressed 2955 // klass with a single MOVK instruction. We can do this iff the 2956 // compressed class base is a multiple of 4G. 2957 // Aix: Search for a place where we can find memory. If we need to load 2958 // the base, 4G alignment is helpful, too. 2959 size_t increment = AARCH64_ONLY(4*)G; 2960 for (char *a = (char*)align_ptr_up(requested_addr, increment); 2961 a < (char*)(1024*G); 2962 a += increment) { 2963 if (a == (char *)(32*G)) { 2964 // Go faster from here on. Zero-based is no longer possible. 2965 increment = 4*G; 2966 } 2967 2968 #if INCLUDE_CDS 2969 if (UseSharedSpaces 2970 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 2971 // We failed to find an aligned base that will reach. Fall 2972 // back to using our requested addr. 2973 metaspace_rs = ReservedSpace(compressed_class_space_size(), 2974 _reserve_alignment, 2975 large_pages, 2976 requested_addr); 2977 break; 2978 } 2979 #endif 2980 2981 metaspace_rs = ReservedSpace(compressed_class_space_size(), 2982 _reserve_alignment, 2983 large_pages, 2984 a); 2985 if (metaspace_rs.is_reserved()) 2986 break; 2987 } 2988 } 2989 2990 #endif // AARCH64 2991 2992 if (!metaspace_rs.is_reserved()) { 2993 #if INCLUDE_CDS 2994 if (UseSharedSpaces) { 2995 size_t increment = align_size_up(1*G, _reserve_alignment); 2996 2997 // Keep trying to allocate the metaspace, increasing the requested_addr 2998 // by 1GB each time, until we reach an address that will no longer allow 2999 // use of CDS with compressed klass pointers. 3000 char *addr = requested_addr; 3001 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3002 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3003 addr = addr + increment; 3004 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3005 _reserve_alignment, large_pages, addr); 3006 } 3007 } 3008 #endif 3009 // If no successful allocation then try to allocate the space anywhere. If 3010 // that fails then OOM doom. At this point we cannot try allocating the 3011 // metaspace as if UseCompressedClassPointers is off because too much 3012 // initialization has happened that depends on UseCompressedClassPointers. 3013 // So, UseCompressedClassPointers cannot be turned off at this point. 3014 if (!metaspace_rs.is_reserved()) { 3015 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3016 _reserve_alignment, large_pages); 3017 if (!metaspace_rs.is_reserved()) { 3018 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3019 compressed_class_space_size())); 3020 } 3021 } 3022 } 3023 3024 // If we got here then the metaspace got allocated. 3025 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3026 3027 #if INCLUDE_CDS 3028 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3029 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3030 FileMapInfo::stop_sharing_and_unmap( 3031 "Could not allocate metaspace at a compatible address"); 3032 } 3033 #endif 3034 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3035 UseSharedSpaces ? (address)cds_base : 0); 3036 3037 initialize_class_space(metaspace_rs); 3038 3039 if (log_is_enabled(Trace, gc, metaspace)) { 3040 Log(gc, metaspace) log; 3041 ResourceMark rm; 3042 print_compressed_class_space(log.trace_stream(), requested_addr); 3043 } 3044 } 3045 3046 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3047 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3048 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3049 if (_class_space_list != NULL) { 3050 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3051 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3052 compressed_class_space_size(), p2i(base)); 3053 if (requested_addr != 0) { 3054 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3055 } 3056 st->cr(); 3057 } 3058 } 3059 3060 // For UseCompressedClassPointers the class space is reserved above the top of 3061 // the Java heap. The argument passed in is at the base of the compressed space. 3062 void Metaspace::initialize_class_space(ReservedSpace rs) { 3063 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3064 assert(rs.size() >= CompressedClassSpaceSize, 3065 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3066 assert(using_class_space(), "Must be using class space"); 3067 _class_space_list = new VirtualSpaceList(rs); 3068 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3069 3070 if (!_class_space_list->initialization_succeeded()) { 3071 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3072 } 3073 } 3074 3075 #endif 3076 3077 void Metaspace::ergo_initialize() { 3078 if (DumpSharedSpaces) { 3079 // Using large pages when dumping the shared archive is currently not implemented. 3080 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3081 } 3082 3083 size_t page_size = os::vm_page_size(); 3084 if (UseLargePages && UseLargePagesInMetaspace) { 3085 page_size = os::large_page_size(); 3086 } 3087 3088 _commit_alignment = page_size; 3089 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3090 3091 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3092 // override if MaxMetaspaceSize was set on the command line or not. 3093 // This information is needed later to conform to the specification of the 3094 // java.lang.management.MemoryUsage API. 3095 // 3096 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3097 // globals.hpp to the aligned value, but this is not possible, since the 3098 // alignment depends on other flags being parsed. 3099 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3100 3101 if (MetaspaceSize > MaxMetaspaceSize) { 3102 MetaspaceSize = MaxMetaspaceSize; 3103 } 3104 3105 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3106 3107 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3108 3109 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3110 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3111 3112 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3113 set_compressed_class_space_size(CompressedClassSpaceSize); 3114 } 3115 3116 void Metaspace::global_initialize() { 3117 MetaspaceGC::initialize(); 3118 3119 // Initialize the alignment for shared spaces. 3120 int max_alignment = os::vm_allocation_granularity(); 3121 size_t cds_total = 0; 3122 3123 MetaspaceShared::set_max_alignment(max_alignment); 3124 3125 if (DumpSharedSpaces) { 3126 #if INCLUDE_CDS 3127 MetaspaceShared::estimate_regions_size(); 3128 3129 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3130 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3131 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3132 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3133 3134 // Initialize with the sum of the shared space sizes. The read-only 3135 // and read write metaspace chunks will be allocated out of this and the 3136 // remainder is the misc code and data chunks. 3137 cds_total = FileMapInfo::shared_spaces_size(); 3138 cds_total = align_size_up(cds_total, _reserve_alignment); 3139 _space_list = new VirtualSpaceList(cds_total/wordSize); 3140 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3141 3142 if (!_space_list->initialization_succeeded()) { 3143 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3144 } 3145 3146 #ifdef _LP64 3147 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3148 vm_exit_during_initialization("Unable to dump shared archive.", 3149 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3150 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3151 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3152 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3153 } 3154 3155 // Set the compressed klass pointer base so that decoding of these pointers works 3156 // properly when creating the shared archive. 3157 assert(UseCompressedOops && UseCompressedClassPointers, 3158 "UseCompressedOops and UseCompressedClassPointers must be set"); 3159 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3160 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3161 p2i(_space_list->current_virtual_space()->bottom())); 3162 3163 Universe::set_narrow_klass_shift(0); 3164 #endif // _LP64 3165 #endif // INCLUDE_CDS 3166 } else { 3167 #if INCLUDE_CDS 3168 if (UseSharedSpaces) { 3169 // If using shared space, open the file that contains the shared space 3170 // and map in the memory before initializing the rest of metaspace (so 3171 // the addresses don't conflict) 3172 address cds_address = NULL; 3173 FileMapInfo* mapinfo = new FileMapInfo(); 3174 3175 // Open the shared archive file, read and validate the header. If 3176 // initialization fails, shared spaces [UseSharedSpaces] are 3177 // disabled and the file is closed. 3178 // Map in spaces now also 3179 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3180 cds_total = FileMapInfo::shared_spaces_size(); 3181 cds_address = (address)mapinfo->header()->region_addr(0); 3182 #ifdef _LP64 3183 if (using_class_space()) { 3184 char* cds_end = (char*)(cds_address + cds_total); 3185 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3186 // If UseCompressedClassPointers is set then allocate the metaspace area 3187 // above the heap and above the CDS area (if it exists). 3188 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3189 // Map the shared string space after compressed pointers 3190 // because it relies on compressed class pointers setting to work 3191 mapinfo->map_string_regions(); 3192 } 3193 #endif // _LP64 3194 } else { 3195 assert(!mapinfo->is_open() && !UseSharedSpaces, 3196 "archive file not closed or shared spaces not disabled."); 3197 } 3198 } 3199 #endif // INCLUDE_CDS 3200 3201 #ifdef _LP64 3202 if (!UseSharedSpaces && using_class_space()) { 3203 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3204 allocate_metaspace_compressed_klass_ptrs(base, 0); 3205 } 3206 #endif // _LP64 3207 3208 // Initialize these before initializing the VirtualSpaceList 3209 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3210 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3211 // Make the first class chunk bigger than a medium chunk so it's not put 3212 // on the medium chunk list. The next chunk will be small and progress 3213 // from there. This size calculated by -version. 3214 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3215 (CompressedClassSpaceSize/BytesPerWord)*2); 3216 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3217 // Arbitrarily set the initial virtual space to a multiple 3218 // of the boot class loader size. 3219 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3220 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3221 3222 // Initialize the list of virtual spaces. 3223 _space_list = new VirtualSpaceList(word_size); 3224 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3225 3226 if (!_space_list->initialization_succeeded()) { 3227 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3228 } 3229 } 3230 3231 _tracer = new MetaspaceTracer(); 3232 } 3233 3234 void Metaspace::post_initialize() { 3235 MetaspaceGC::post_initialize(); 3236 } 3237 3238 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3239 size_t chunk_word_size, 3240 size_t chunk_bunch) { 3241 // Get a chunk from the chunk freelist 3242 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3243 if (chunk != NULL) { 3244 return chunk; 3245 } 3246 3247 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3248 } 3249 3250 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3251 3252 assert(space_list() != NULL, 3253 "Metadata VirtualSpaceList has not been initialized"); 3254 assert(chunk_manager_metadata() != NULL, 3255 "Metadata ChunkManager has not been initialized"); 3256 3257 _vsm = new SpaceManager(NonClassType, lock); 3258 if (_vsm == NULL) { 3259 return; 3260 } 3261 size_t word_size; 3262 size_t class_word_size; 3263 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3264 3265 if (using_class_space()) { 3266 assert(class_space_list() != NULL, 3267 "Class VirtualSpaceList has not been initialized"); 3268 assert(chunk_manager_class() != NULL, 3269 "Class ChunkManager has not been initialized"); 3270 3271 // Allocate SpaceManager for classes. 3272 _class_vsm = new SpaceManager(ClassType, lock); 3273 if (_class_vsm == NULL) { 3274 return; 3275 } 3276 } 3277 3278 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3279 3280 // Allocate chunk for metadata objects 3281 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3282 word_size, 3283 vsm()->medium_chunk_bunch()); 3284 // For dumping shared archive, report error if allocation has failed. 3285 if (DumpSharedSpaces && new_chunk == NULL) { 3286 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3287 } 3288 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3289 if (new_chunk != NULL) { 3290 // Add to this manager's list of chunks in use and current_chunk(). 3291 vsm()->add_chunk(new_chunk, true); 3292 } 3293 3294 // Allocate chunk for class metadata objects 3295 if (using_class_space()) { 3296 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3297 class_word_size, 3298 class_vsm()->medium_chunk_bunch()); 3299 if (class_chunk != NULL) { 3300 class_vsm()->add_chunk(class_chunk, true); 3301 } else { 3302 // For dumping shared archive, report error if allocation has failed. 3303 if (DumpSharedSpaces) { 3304 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3305 } 3306 } 3307 } 3308 3309 _alloc_record_head = NULL; 3310 _alloc_record_tail = NULL; 3311 } 3312 3313 size_t Metaspace::align_word_size_up(size_t word_size) { 3314 size_t byte_size = word_size * wordSize; 3315 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3316 } 3317 3318 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3319 // DumpSharedSpaces doesn't use class metadata area (yet) 3320 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3321 if (is_class_space_allocation(mdtype)) { 3322 return class_vsm()->allocate(word_size); 3323 } else { 3324 return vsm()->allocate(word_size); 3325 } 3326 } 3327 3328 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3329 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3330 assert(delta_bytes > 0, "Must be"); 3331 3332 size_t before = 0; 3333 size_t after = 0; 3334 MetaWord* res; 3335 bool incremented; 3336 3337 // Each thread increments the HWM at most once. Even if the thread fails to increment 3338 // the HWM, an allocation is still attempted. This is because another thread must then 3339 // have incremented the HWM and therefore the allocation might still succeed. 3340 do { 3341 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3342 res = allocate(word_size, mdtype); 3343 } while (!incremented && res == NULL); 3344 3345 if (incremented) { 3346 tracer()->report_gc_threshold(before, after, 3347 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3348 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3349 } 3350 3351 return res; 3352 } 3353 3354 // Space allocated in the Metaspace. This may 3355 // be across several metadata virtual spaces. 3356 char* Metaspace::bottom() const { 3357 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3358 return (char*)vsm()->current_chunk()->bottom(); 3359 } 3360 3361 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3362 if (mdtype == ClassType) { 3363 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3364 } else { 3365 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3366 } 3367 } 3368 3369 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3370 if (mdtype == ClassType) { 3371 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3372 } else { 3373 return vsm()->sum_free_in_chunks_in_use(); 3374 } 3375 } 3376 3377 // Space capacity in the Metaspace. It includes 3378 // space in the list of chunks from which allocations 3379 // have been made. Don't include space in the global freelist and 3380 // in the space available in the dictionary which 3381 // is already counted in some chunk. 3382 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3383 if (mdtype == ClassType) { 3384 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3385 } else { 3386 return vsm()->sum_capacity_in_chunks_in_use(); 3387 } 3388 } 3389 3390 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3391 return used_words_slow(mdtype) * BytesPerWord; 3392 } 3393 3394 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3395 return capacity_words_slow(mdtype) * BytesPerWord; 3396 } 3397 3398 size_t Metaspace::allocated_blocks_bytes() const { 3399 return vsm()->allocated_blocks_bytes() + 3400 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3401 } 3402 3403 size_t Metaspace::allocated_chunks_bytes() const { 3404 return vsm()->allocated_chunks_bytes() + 3405 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3406 } 3407 3408 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3409 assert(!SafepointSynchronize::is_at_safepoint() 3410 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3411 3412 if (DumpSharedSpaces && PrintSharedSpaces) { 3413 record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); 3414 } 3415 3416 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3417 3418 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3419 // Dark matter. Too small for dictionary. 3420 #ifdef ASSERT 3421 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3422 #endif 3423 return; 3424 } 3425 if (is_class && using_class_space()) { 3426 class_vsm()->deallocate(ptr, word_size); 3427 } else { 3428 vsm()->deallocate(ptr, word_size); 3429 } 3430 } 3431 3432 3433 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3434 bool read_only, MetaspaceObj::Type type, TRAPS) { 3435 if (HAS_PENDING_EXCEPTION) { 3436 assert(false, "Should not allocate with exception pending"); 3437 return NULL; // caller does a CHECK_NULL too 3438 } 3439 3440 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3441 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3442 3443 // Allocate in metaspaces without taking out a lock, because it deadlocks 3444 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3445 // to revisit this for application class data sharing. 3446 if (DumpSharedSpaces) { 3447 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3448 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3449 MetaWord* result = space->allocate(word_size, NonClassType); 3450 if (result == NULL) { 3451 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3452 } 3453 if (PrintSharedSpaces) { 3454 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3455 } 3456 3457 // Zero initialize. 3458 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3459 3460 return result; 3461 } 3462 3463 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3464 3465 // Try to allocate metadata. 3466 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3467 3468 if (result == NULL) { 3469 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3470 3471 // Allocation failed. 3472 if (is_init_completed()) { 3473 // Only start a GC if the bootstrapping has completed. 3474 3475 // Try to clean out some memory and retry. 3476 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3477 loader_data, word_size, mdtype); 3478 } 3479 } 3480 3481 if (result == NULL) { 3482 SpaceManager* sm; 3483 if (is_class_space_allocation(mdtype)) { 3484 sm = loader_data->metaspace_non_null()->class_vsm(); 3485 } else { 3486 sm = loader_data->metaspace_non_null()->vsm(); 3487 } 3488 3489 result = sm->get_small_chunk_and_allocate(word_size); 3490 3491 if (result == NULL) { 3492 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3493 } 3494 } 3495 3496 // Zero initialize. 3497 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3498 3499 return result; 3500 } 3501 3502 size_t Metaspace::class_chunk_size(size_t word_size) { 3503 assert(using_class_space(), "Has to use class space"); 3504 return class_vsm()->calc_chunk_size(word_size); 3505 } 3506 3507 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3508 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3509 3510 // If result is still null, we are out of memory. 3511 Log(gc, metaspace, freelist) log; 3512 if (log.is_trace()) { 3513 log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size); 3514 ResourceMark rm; 3515 outputStream* out = log.trace_stream(); 3516 if (loader_data->metaspace_or_null() != NULL) { 3517 loader_data->dump(out); 3518 } 3519 MetaspaceAux::dump(out); 3520 } 3521 3522 bool out_of_compressed_class_space = false; 3523 if (is_class_space_allocation(mdtype)) { 3524 Metaspace* metaspace = loader_data->metaspace_non_null(); 3525 out_of_compressed_class_space = 3526 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3527 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3528 CompressedClassSpaceSize; 3529 } 3530 3531 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3532 const char* space_string = out_of_compressed_class_space ? 3533 "Compressed class space" : "Metaspace"; 3534 3535 report_java_out_of_memory(space_string); 3536 3537 if (JvmtiExport::should_post_resource_exhausted()) { 3538 JvmtiExport::post_resource_exhausted( 3539 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3540 space_string); 3541 } 3542 3543 if (!is_init_completed()) { 3544 vm_exit_during_initialization("OutOfMemoryError", space_string); 3545 } 3546 3547 if (out_of_compressed_class_space) { 3548 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3549 } else { 3550 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3551 } 3552 } 3553 3554 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3555 switch (mdtype) { 3556 case Metaspace::ClassType: return "Class"; 3557 case Metaspace::NonClassType: return "Metadata"; 3558 default: 3559 assert(false, "Got bad mdtype: %d", (int) mdtype); 3560 return NULL; 3561 } 3562 } 3563 3564 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3565 assert(DumpSharedSpaces, "sanity"); 3566 3567 int byte_size = (int)word_size * wordSize; 3568 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3569 3570 if (_alloc_record_head == NULL) { 3571 _alloc_record_head = _alloc_record_tail = rec; 3572 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3573 _alloc_record_tail->_next = rec; 3574 _alloc_record_tail = rec; 3575 } else { 3576 // slow linear search, but this doesn't happen that often, and only when dumping 3577 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3578 if (old->_ptr == ptr) { 3579 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3580 int remain_bytes = old->_byte_size - byte_size; 3581 assert(remain_bytes >= 0, "sanity"); 3582 old->_type = type; 3583 3584 if (remain_bytes == 0) { 3585 delete(rec); 3586 } else { 3587 address remain_ptr = address(ptr) + byte_size; 3588 rec->_ptr = remain_ptr; 3589 rec->_byte_size = remain_bytes; 3590 rec->_type = MetaspaceObj::DeallocatedType; 3591 rec->_next = old->_next; 3592 old->_byte_size = byte_size; 3593 old->_next = rec; 3594 } 3595 return; 3596 } 3597 } 3598 assert(0, "reallocating a freed pointer that was not recorded"); 3599 } 3600 } 3601 3602 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3603 assert(DumpSharedSpaces, "sanity"); 3604 3605 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3606 if (rec->_ptr == ptr) { 3607 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3608 rec->_type = MetaspaceObj::DeallocatedType; 3609 return; 3610 } 3611 } 3612 3613 assert(0, "deallocating a pointer that was not recorded"); 3614 } 3615 3616 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3617 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3618 3619 address last_addr = (address)bottom(); 3620 3621 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3622 address ptr = rec->_ptr; 3623 if (last_addr < ptr) { 3624 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3625 } 3626 closure->doit(ptr, rec->_type, rec->_byte_size); 3627 last_addr = ptr + rec->_byte_size; 3628 } 3629 3630 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3631 if (last_addr < top) { 3632 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3633 } 3634 } 3635 3636 void Metaspace::purge(MetadataType mdtype) { 3637 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3638 } 3639 3640 void Metaspace::purge() { 3641 MutexLockerEx cl(SpaceManager::expand_lock(), 3642 Mutex::_no_safepoint_check_flag); 3643 purge(NonClassType); 3644 if (using_class_space()) { 3645 purge(ClassType); 3646 } 3647 } 3648 3649 void Metaspace::print_on(outputStream* out) const { 3650 // Print both class virtual space counts and metaspace. 3651 if (Verbose) { 3652 vsm()->print_on(out); 3653 if (using_class_space()) { 3654 class_vsm()->print_on(out); 3655 } 3656 } 3657 } 3658 3659 bool Metaspace::contains(const void* ptr) { 3660 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3661 return true; 3662 } 3663 3664 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3665 return true; 3666 } 3667 3668 return get_space_list(NonClassType)->contains(ptr); 3669 } 3670 3671 void Metaspace::verify() { 3672 vsm()->verify(); 3673 if (using_class_space()) { 3674 class_vsm()->verify(); 3675 } 3676 } 3677 3678 void Metaspace::dump(outputStream* const out) const { 3679 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3680 vsm()->dump(out); 3681 if (using_class_space()) { 3682 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3683 class_vsm()->dump(out); 3684 } 3685 } 3686 3687 /////////////// Unit tests /////////////// 3688 3689 #ifndef PRODUCT 3690 3691 class TestMetaspaceAuxTest : AllStatic { 3692 public: 3693 static void test_reserved() { 3694 size_t reserved = MetaspaceAux::reserved_bytes(); 3695 3696 assert(reserved > 0, "assert"); 3697 3698 size_t committed = MetaspaceAux::committed_bytes(); 3699 assert(committed <= reserved, "assert"); 3700 3701 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3702 assert(reserved_metadata > 0, "assert"); 3703 assert(reserved_metadata <= reserved, "assert"); 3704 3705 if (UseCompressedClassPointers) { 3706 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3707 assert(reserved_class > 0, "assert"); 3708 assert(reserved_class < reserved, "assert"); 3709 } 3710 } 3711 3712 static void test_committed() { 3713 size_t committed = MetaspaceAux::committed_bytes(); 3714 3715 assert(committed > 0, "assert"); 3716 3717 size_t reserved = MetaspaceAux::reserved_bytes(); 3718 assert(committed <= reserved, "assert"); 3719 3720 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3721 assert(committed_metadata > 0, "assert"); 3722 assert(committed_metadata <= committed, "assert"); 3723 3724 if (UseCompressedClassPointers) { 3725 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3726 assert(committed_class > 0, "assert"); 3727 assert(committed_class < committed, "assert"); 3728 } 3729 } 3730 3731 static void test_virtual_space_list_large_chunk() { 3732 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3733 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3734 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3735 // vm_allocation_granularity aligned on Windows. 3736 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3737 large_size += (os::vm_page_size()/BytesPerWord); 3738 vs_list->get_new_chunk(large_size, large_size, 0); 3739 } 3740 3741 static void test() { 3742 test_reserved(); 3743 test_committed(); 3744 test_virtual_space_list_large_chunk(); 3745 } 3746 }; 3747 3748 void TestMetaspaceAux_test() { 3749 TestMetaspaceAuxTest::test(); 3750 } 3751 3752 class TestVirtualSpaceNodeTest { 3753 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3754 size_t& num_small_chunks, 3755 size_t& num_specialized_chunks) { 3756 num_medium_chunks = words_left / MediumChunk; 3757 words_left = words_left % MediumChunk; 3758 3759 num_small_chunks = words_left / SmallChunk; 3760 words_left = words_left % SmallChunk; 3761 // how many specialized chunks can we get? 3762 num_specialized_chunks = words_left / SpecializedChunk; 3763 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3764 } 3765 3766 public: 3767 static void test() { 3768 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3769 const size_t vsn_test_size_words = MediumChunk * 4; 3770 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3771 3772 // The chunk sizes must be multiples of eachother, or this will fail 3773 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3774 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3775 3776 { // No committed memory in VSN 3777 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3778 VirtualSpaceNode vsn(vsn_test_size_bytes); 3779 vsn.initialize(); 3780 vsn.retire(&cm); 3781 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3782 } 3783 3784 { // All of VSN is committed, half is used by chunks 3785 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3786 VirtualSpaceNode vsn(vsn_test_size_bytes); 3787 vsn.initialize(); 3788 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3789 vsn.get_chunk_vs(MediumChunk); 3790 vsn.get_chunk_vs(MediumChunk); 3791 vsn.retire(&cm); 3792 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3793 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3794 } 3795 3796 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3797 // This doesn't work for systems with vm_page_size >= 16K. 3798 if (page_chunks < MediumChunk) { 3799 // 4 pages of VSN is committed, some is used by chunks 3800 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3801 VirtualSpaceNode vsn(vsn_test_size_bytes); 3802 3803 vsn.initialize(); 3804 vsn.expand_by(page_chunks, page_chunks); 3805 vsn.get_chunk_vs(SmallChunk); 3806 vsn.get_chunk_vs(SpecializedChunk); 3807 vsn.retire(&cm); 3808 3809 // committed - used = words left to retire 3810 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3811 3812 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3813 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3814 3815 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3816 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3817 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3818 } 3819 3820 { // Half of VSN is committed, a humongous chunk is used 3821 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3822 VirtualSpaceNode vsn(vsn_test_size_bytes); 3823 vsn.initialize(); 3824 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3825 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3826 vsn.retire(&cm); 3827 3828 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3829 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3830 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3831 3832 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3833 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3834 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3835 } 3836 3837 } 3838 3839 #define assert_is_available_positive(word_size) \ 3840 assert(vsn.is_available(word_size), \ 3841 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3842 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3843 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3844 3845 #define assert_is_available_negative(word_size) \ 3846 assert(!vsn.is_available(word_size), \ 3847 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 3848 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3849 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3850 3851 static void test_is_available_positive() { 3852 // Reserve some memory. 3853 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3854 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3855 3856 // Commit some memory. 3857 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3858 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3859 assert(expanded, "Failed to commit"); 3860 3861 // Check that is_available accepts the committed size. 3862 assert_is_available_positive(commit_word_size); 3863 3864 // Check that is_available accepts half the committed size. 3865 size_t expand_word_size = commit_word_size / 2; 3866 assert_is_available_positive(expand_word_size); 3867 } 3868 3869 static void test_is_available_negative() { 3870 // Reserve some memory. 3871 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3872 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3873 3874 // Commit some memory. 3875 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3876 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3877 assert(expanded, "Failed to commit"); 3878 3879 // Check that is_available doesn't accept a too large size. 3880 size_t two_times_commit_word_size = commit_word_size * 2; 3881 assert_is_available_negative(two_times_commit_word_size); 3882 } 3883 3884 static void test_is_available_overflow() { 3885 // Reserve some memory. 3886 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3887 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3888 3889 // Commit some memory. 3890 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3891 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3892 assert(expanded, "Failed to commit"); 3893 3894 // Calculate a size that will overflow the virtual space size. 3895 void* virtual_space_max = (void*)(uintptr_t)-1; 3896 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3897 size_t overflow_size = bottom_to_max + BytesPerWord; 3898 size_t overflow_word_size = overflow_size / BytesPerWord; 3899 3900 // Check that is_available can handle the overflow. 3901 assert_is_available_negative(overflow_word_size); 3902 } 3903 3904 static void test_is_available() { 3905 TestVirtualSpaceNodeTest::test_is_available_positive(); 3906 TestVirtualSpaceNodeTest::test_is_available_negative(); 3907 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3908 } 3909 }; 3910 3911 void TestVirtualSpaceNode_test() { 3912 TestVirtualSpaceNodeTest::test(); 3913 TestVirtualSpaceNodeTest::test_is_available(); 3914 } 3915 #endif