1 /* 2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc_interface/collectedHeap.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/binaryTreeDictionary.hpp" 28 #include "memory/freeList.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/freeList.hpp" 32 #include "memory/gcLocker.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 52 53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 55 56 // Set this constant to enable slow integrity checking of the free chunk lists 57 const bool metaspace_slow_verify = false; 58 59 size_t const allocation_from_dictionary_limit = 4 * K; 60 61 MetaWord* last_allocated = 0; 62 63 size_t Metaspace::_compressed_class_space_size; 64 const MetaspaceTracer* Metaspace::_tracer = NULL; 65 66 // Used in declarations in SpaceManager and ChunkManager 67 enum ChunkIndex { 68 ZeroIndex = 0, 69 SpecializedIndex = ZeroIndex, 70 SmallIndex = SpecializedIndex + 1, 71 MediumIndex = SmallIndex + 1, 72 HumongousIndex = MediumIndex + 1, 73 NumberOfFreeLists = 3, 74 NumberOfInUseLists = 4 75 }; 76 77 enum ChunkSizes { // in words. 78 ClassSpecializedChunk = 128, 79 SpecializedChunk = 128, 80 ClassSmallChunk = 256, 81 SmallChunk = 512, 82 ClassMediumChunk = 4 * K, 83 MediumChunk = 8 * K 84 }; 85 86 static ChunkIndex next_chunk_index(ChunkIndex i) { 87 assert(i < NumberOfInUseLists, "Out of bound"); 88 return (ChunkIndex) (i+1); 89 } 90 91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 92 uint MetaspaceGC::_shrink_factor = 0; 93 bool MetaspaceGC::_should_concurrent_collect = false; 94 95 typedef class FreeList<Metachunk> ChunkList; 96 97 // Manages the global free lists of chunks. 98 class ChunkManager : public CHeapObj<mtInternal> { 99 friend class TestVirtualSpaceNodeTest; 100 101 // Free list of chunks of different sizes. 102 // SpecializedChunk 103 // SmallChunk 104 // MediumChunk 105 // HumongousChunk 106 ChunkList _free_chunks[NumberOfFreeLists]; 107 108 // HumongousChunk 109 ChunkTreeDictionary _humongous_dictionary; 110 111 // ChunkManager in all lists of this type 112 size_t _free_chunks_total; 113 size_t _free_chunks_count; 114 115 void dec_free_chunks_total(size_t v) { 116 assert(_free_chunks_count > 0 && 117 _free_chunks_total > 0, 118 "About to go negative"); 119 Atomic::add_ptr(-1, &_free_chunks_count); 120 jlong minus_v = (jlong) - (jlong) v; 121 Atomic::add_ptr(minus_v, &_free_chunks_total); 122 } 123 124 // Debug support 125 126 size_t sum_free_chunks(); 127 size_t sum_free_chunks_count(); 128 129 void locked_verify_free_chunks_total(); 130 void slow_locked_verify_free_chunks_total() { 131 if (metaspace_slow_verify) { 132 locked_verify_free_chunks_total(); 133 } 134 } 135 void locked_verify_free_chunks_count(); 136 void slow_locked_verify_free_chunks_count() { 137 if (metaspace_slow_verify) { 138 locked_verify_free_chunks_count(); 139 } 140 } 141 void verify_free_chunks_count(); 142 143 public: 144 145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 146 : _free_chunks_total(0), _free_chunks_count(0) { 147 _free_chunks[SpecializedIndex].set_size(specialized_size); 148 _free_chunks[SmallIndex].set_size(small_size); 149 _free_chunks[MediumIndex].set_size(medium_size); 150 } 151 152 // add or delete (return) a chunk to the global freelist. 153 Metachunk* chunk_freelist_allocate(size_t word_size); 154 155 // Map a size to a list index assuming that there are lists 156 // for special, small, medium, and humongous chunks. 157 static ChunkIndex list_index(size_t size); 158 159 // Remove the chunk from its freelist. It is 160 // expected to be on one of the _free_chunks[] lists. 161 void remove_chunk(Metachunk* chunk); 162 163 // Add the simple linked list of chunks to the freelist of chunks 164 // of type index. 165 void return_chunks(ChunkIndex index, Metachunk* chunks); 166 167 // Total of the space in the free chunks list 168 size_t free_chunks_total_words(); 169 size_t free_chunks_total_bytes(); 170 171 // Number of chunks in the free chunks list 172 size_t free_chunks_count(); 173 174 void inc_free_chunks_total(size_t v, size_t count = 1) { 175 Atomic::add_ptr(count, &_free_chunks_count); 176 Atomic::add_ptr(v, &_free_chunks_total); 177 } 178 ChunkTreeDictionary* humongous_dictionary() { 179 return &_humongous_dictionary; 180 } 181 182 ChunkList* free_chunks(ChunkIndex index); 183 184 // Returns the list for the given chunk word size. 185 ChunkList* find_free_chunks_list(size_t word_size); 186 187 // Remove from a list by size. Selects list based on size of chunk. 188 Metachunk* free_chunks_get(size_t chunk_word_size); 189 190 #define index_bounds_check(index) \ 191 assert(index == SpecializedIndex || \ 192 index == SmallIndex || \ 193 index == MediumIndex || \ 194 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) 195 196 size_t num_free_chunks(ChunkIndex index) const { 197 index_bounds_check(index); 198 199 if (index == HumongousIndex) { 200 return _humongous_dictionary.total_free_blocks(); 201 } 202 203 ssize_t count = _free_chunks[index].count(); 204 return count == -1 ? 0 : (size_t) count; 205 } 206 207 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 208 index_bounds_check(index); 209 210 size_t word_size = 0; 211 if (index == HumongousIndex) { 212 word_size = _humongous_dictionary.total_size(); 213 } else { 214 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 215 word_size = size_per_chunk_in_words * num_free_chunks(index); 216 } 217 218 return word_size * BytesPerWord; 219 } 220 221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 223 num_free_chunks(SmallIndex), 224 num_free_chunks(MediumIndex), 225 num_free_chunks(HumongousIndex), 226 size_free_chunks_in_bytes(SpecializedIndex), 227 size_free_chunks_in_bytes(SmallIndex), 228 size_free_chunks_in_bytes(MediumIndex), 229 size_free_chunks_in_bytes(HumongousIndex)); 230 } 231 232 // Debug support 233 void verify(); 234 void slow_verify() { 235 if (metaspace_slow_verify) { 236 verify(); 237 } 238 } 239 void locked_verify(); 240 void slow_locked_verify() { 241 if (metaspace_slow_verify) { 242 locked_verify(); 243 } 244 } 245 void verify_free_chunks_total(); 246 247 void locked_print_free_chunks(outputStream* st); 248 void locked_print_sum_free_chunks(outputStream* st); 249 250 void print_on(outputStream* st) const; 251 }; 252 253 // Used to manage the free list of Metablocks (a block corresponds 254 // to the allocation of a quantum of metadata). 255 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 256 BlockTreeDictionary* _dictionary; 257 258 // Only allocate and split from freelist if the size of the allocation 259 // is at least 1/4th the size of the available block. 260 const static int WasteMultiplier = 4; 261 262 // Accessors 263 BlockTreeDictionary* dictionary() const { return _dictionary; } 264 265 public: 266 BlockFreelist(); 267 ~BlockFreelist(); 268 269 // Get and return a block to the free list 270 MetaWord* get_block(size_t word_size); 271 void return_block(MetaWord* p, size_t word_size); 272 273 size_t total_size() { 274 if (dictionary() == NULL) { 275 return 0; 276 } else { 277 return dictionary()->total_size(); 278 } 279 } 280 281 void print_on(outputStream* st) const; 282 }; 283 284 // A VirtualSpaceList node. 285 class VirtualSpaceNode : public CHeapObj<mtClass> { 286 friend class VirtualSpaceList; 287 288 // Link to next VirtualSpaceNode 289 VirtualSpaceNode* _next; 290 291 // total in the VirtualSpace 292 MemRegion _reserved; 293 ReservedSpace _rs; 294 VirtualSpace _virtual_space; 295 MetaWord* _top; 296 // count of chunks contained in this VirtualSpace 297 uintx _container_count; 298 299 // Convenience functions to access the _virtual_space 300 char* low() const { return virtual_space()->low(); } 301 char* high() const { return virtual_space()->high(); } 302 303 // The first Metachunk will be allocated at the bottom of the 304 // VirtualSpace 305 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 306 307 // Committed but unused space in the virtual space 308 size_t free_words_in_vs() const; 309 public: 310 311 VirtualSpaceNode(size_t byte_size); 312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 313 ~VirtualSpaceNode(); 314 315 // Convenience functions for logical bottom and end 316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 318 319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 320 321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 323 324 bool is_pre_committed() const { return _virtual_space.special(); } 325 326 // address of next available space in _virtual_space; 327 // Accessors 328 VirtualSpaceNode* next() { return _next; } 329 void set_next(VirtualSpaceNode* v) { _next = v; } 330 331 void set_reserved(MemRegion const v) { _reserved = v; } 332 void set_top(MetaWord* v) { _top = v; } 333 334 // Accessors 335 MemRegion* reserved() { return &_reserved; } 336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 337 338 // Returns true if "word_size" is available in the VirtualSpace 339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 340 341 MetaWord* top() const { return _top; } 342 void inc_top(size_t word_size) { _top += word_size; } 343 344 uintx container_count() { return _container_count; } 345 void inc_container_count(); 346 void dec_container_count(); 347 #ifdef ASSERT 348 uint container_count_slow(); 349 void verify_container_count(); 350 #endif 351 352 // used and capacity in this single entry in the list 353 size_t used_words_in_vs() const; 354 size_t capacity_words_in_vs() const; 355 356 bool initialize(); 357 358 // get space from the virtual space 359 Metachunk* take_from_committed(size_t chunk_word_size); 360 361 // Allocate a chunk from the virtual space and return it. 362 Metachunk* get_chunk_vs(size_t chunk_word_size); 363 364 // Expands/shrinks the committed space in a virtual space. Delegates 365 // to Virtualspace 366 bool expand_by(size_t min_words, size_t preferred_words); 367 368 // In preparation for deleting this node, remove all the chunks 369 // in the node from any freelist. 370 void purge(ChunkManager* chunk_manager); 371 372 // If an allocation doesn't fit in the current node a new node is created. 373 // Allocate chunks out of the remaining committed space in this node 374 // to avoid wasting that memory. 375 // This always adds up because all the chunk sizes are multiples of 376 // the smallest chunk size. 377 void retire(ChunkManager* chunk_manager); 378 379 #ifdef ASSERT 380 // Debug support 381 void mangle(); 382 #endif 383 384 void print_on(outputStream* st) const; 385 }; 386 387 #define assert_is_ptr_aligned(ptr, alignment) \ 388 assert(is_ptr_aligned(ptr, alignment), \ 389 err_msg(PTR_FORMAT " is not aligned to " \ 390 SIZE_FORMAT, ptr, alignment)) 391 392 #define assert_is_size_aligned(size, alignment) \ 393 assert(is_size_aligned(size, alignment), \ 394 err_msg(SIZE_FORMAT " is not aligned to " \ 395 SIZE_FORMAT, size, alignment)) 396 397 398 // Decide if large pages should be committed when the memory is reserved. 399 static bool should_commit_large_pages_when_reserving(size_t bytes) { 400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 401 size_t words = bytes / BytesPerWord; 402 bool is_class = false; // We never reserve large pages for the class space. 403 if (MetaspaceGC::can_expand(words, is_class) && 404 MetaspaceGC::allowed_expansion() >= words) { 405 return true; 406 } 407 } 408 409 return false; 410 } 411 412 // byte_size is the size of the associated virtualspace. 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 415 416 #if INCLUDE_CDS 417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 418 // configurable address, generally at the top of the Java heap so other 419 // memory addresses don't conflict. 420 if (DumpSharedSpaces) { 421 bool large_pages = false; // No large pages when dumping the CDS archive. 422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 423 424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); 425 if (_rs.is_reserved()) { 426 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 427 } else { 428 // Get a mmap region anywhere if the SharedBaseAddress fails. 429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 430 } 431 MetaspaceShared::set_shared_rs(&_rs); 432 } else 433 #endif 434 { 435 bool large_pages = should_commit_large_pages_when_reserving(bytes); 436 437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 438 } 439 440 if (_rs.is_reserved()) { 441 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 442 assert(_rs.size() != 0, "Catch if we get a 0 size"); 443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 445 446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 447 } 448 } 449 450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 451 Metachunk* chunk = first_chunk(); 452 Metachunk* invalid_chunk = (Metachunk*) top(); 453 while (chunk < invalid_chunk ) { 454 assert(chunk->is_tagged_free(), "Should be tagged free"); 455 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 456 chunk_manager->remove_chunk(chunk); 457 assert(chunk->next() == NULL && 458 chunk->prev() == NULL, 459 "Was not removed from its list"); 460 chunk = (Metachunk*) next; 461 } 462 } 463 464 #ifdef ASSERT 465 uint VirtualSpaceNode::container_count_slow() { 466 uint count = 0; 467 Metachunk* chunk = first_chunk(); 468 Metachunk* invalid_chunk = (Metachunk*) top(); 469 while (chunk < invalid_chunk ) { 470 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 471 // Don't count the chunks on the free lists. Those are 472 // still part of the VirtualSpaceNode but not currently 473 // counted. 474 if (!chunk->is_tagged_free()) { 475 count++; 476 } 477 chunk = (Metachunk*) next; 478 } 479 return count; 480 } 481 #endif 482 483 // List of VirtualSpaces for metadata allocation. 484 class VirtualSpaceList : public CHeapObj<mtClass> { 485 friend class VirtualSpaceNode; 486 487 enum VirtualSpaceSizes { 488 VirtualSpaceSize = 256 * K 489 }; 490 491 // Head of the list 492 VirtualSpaceNode* _virtual_space_list; 493 // virtual space currently being used for allocations 494 VirtualSpaceNode* _current_virtual_space; 495 496 // Is this VirtualSpaceList used for the compressed class space 497 bool _is_class; 498 499 // Sum of reserved and committed memory in the virtual spaces 500 size_t _reserved_words; 501 size_t _committed_words; 502 503 // Number of virtual spaces 504 size_t _virtual_space_count; 505 506 ~VirtualSpaceList(); 507 508 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 509 510 void set_virtual_space_list(VirtualSpaceNode* v) { 511 _virtual_space_list = v; 512 } 513 void set_current_virtual_space(VirtualSpaceNode* v) { 514 _current_virtual_space = v; 515 } 516 517 void link_vs(VirtualSpaceNode* new_entry); 518 519 // Get another virtual space and add it to the list. This 520 // is typically prompted by a failed attempt to allocate a chunk 521 // and is typically followed by the allocation of a chunk. 522 bool create_new_virtual_space(size_t vs_word_size); 523 524 // Chunk up the unused committed space in the current 525 // virtual space and add the chunks to the free list. 526 void retire_current_virtual_space(); 527 528 public: 529 VirtualSpaceList(size_t word_size); 530 VirtualSpaceList(ReservedSpace rs); 531 532 size_t free_bytes(); 533 534 Metachunk* get_new_chunk(size_t word_size, 535 size_t grow_chunks_by_words, 536 size_t medium_chunk_bunch); 537 538 bool expand_node_by(VirtualSpaceNode* node, 539 size_t min_words, 540 size_t preferred_words); 541 542 bool expand_by(size_t min_words, 543 size_t preferred_words); 544 545 VirtualSpaceNode* current_virtual_space() { 546 return _current_virtual_space; 547 } 548 549 bool is_class() const { return _is_class; } 550 551 bool initialization_succeeded() { return _virtual_space_list != NULL; } 552 553 size_t reserved_words() { return _reserved_words; } 554 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 555 size_t committed_words() { return _committed_words; } 556 size_t committed_bytes() { return committed_words() * BytesPerWord; } 557 558 void inc_reserved_words(size_t v); 559 void dec_reserved_words(size_t v); 560 void inc_committed_words(size_t v); 561 void dec_committed_words(size_t v); 562 void inc_virtual_space_count(); 563 void dec_virtual_space_count(); 564 565 bool contains(const void* ptr); 566 567 // Unlink empty VirtualSpaceNodes and free it. 568 void purge(ChunkManager* chunk_manager); 569 570 void print_on(outputStream* st) const; 571 572 class VirtualSpaceListIterator : public StackObj { 573 VirtualSpaceNode* _virtual_spaces; 574 public: 575 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 576 _virtual_spaces(virtual_spaces) {} 577 578 bool repeat() { 579 return _virtual_spaces != NULL; 580 } 581 582 VirtualSpaceNode* get_next() { 583 VirtualSpaceNode* result = _virtual_spaces; 584 if (_virtual_spaces != NULL) { 585 _virtual_spaces = _virtual_spaces->next(); 586 } 587 return result; 588 } 589 }; 590 }; 591 592 class Metadebug : AllStatic { 593 // Debugging support for Metaspaces 594 static int _allocation_fail_alot_count; 595 596 public: 597 598 static void init_allocation_fail_alot_count(); 599 #ifdef ASSERT 600 static bool test_metadata_failure(); 601 #endif 602 }; 603 604 int Metadebug::_allocation_fail_alot_count = 0; 605 606 // SpaceManager - used by Metaspace to handle allocations 607 class SpaceManager : public CHeapObj<mtClass> { 608 friend class Metaspace; 609 friend class Metadebug; 610 611 private: 612 613 // protects allocations 614 Mutex* const _lock; 615 616 // Type of metadata allocated. 617 Metaspace::MetadataType _mdtype; 618 619 // List of chunks in use by this SpaceManager. Allocations 620 // are done from the current chunk. The list is used for deallocating 621 // chunks when the SpaceManager is freed. 622 Metachunk* _chunks_in_use[NumberOfInUseLists]; 623 Metachunk* _current_chunk; 624 625 // Number of small chunks to allocate to a manager 626 // If class space manager, small chunks are unlimited 627 static uint const _small_chunk_limit; 628 629 // Sum of all space in allocated chunks 630 size_t _allocated_blocks_words; 631 632 // Sum of all allocated chunks 633 size_t _allocated_chunks_words; 634 size_t _allocated_chunks_count; 635 636 // Free lists of blocks are per SpaceManager since they 637 // are assumed to be in chunks in use by the SpaceManager 638 // and all chunks in use by a SpaceManager are freed when 639 // the class loader using the SpaceManager is collected. 640 BlockFreelist _block_freelists; 641 642 // protects virtualspace and chunk expansions 643 static const char* _expand_lock_name; 644 static const int _expand_lock_rank; 645 static Mutex* const _expand_lock; 646 647 private: 648 // Accessors 649 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 650 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 651 _chunks_in_use[index] = v; 652 } 653 654 BlockFreelist* block_freelists() const { 655 return (BlockFreelist*) &_block_freelists; 656 } 657 658 Metaspace::MetadataType mdtype() { return _mdtype; } 659 660 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 661 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 662 663 Metachunk* current_chunk() const { return _current_chunk; } 664 void set_current_chunk(Metachunk* v) { 665 _current_chunk = v; 666 } 667 668 Metachunk* find_current_chunk(size_t word_size); 669 670 // Add chunk to the list of chunks in use 671 void add_chunk(Metachunk* v, bool make_current); 672 void retire_current_chunk(); 673 674 Mutex* lock() const { return _lock; } 675 676 const char* chunk_size_name(ChunkIndex index) const; 677 678 protected: 679 void initialize(); 680 681 public: 682 SpaceManager(Metaspace::MetadataType mdtype, 683 Mutex* lock); 684 ~SpaceManager(); 685 686 enum ChunkMultiples { 687 MediumChunkMultiple = 4 688 }; 689 690 bool is_class() { return _mdtype == Metaspace::ClassType; } 691 692 // Accessors 693 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 694 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 695 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 696 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 697 698 size_t smallest_chunk_size() { return specialized_chunk_size(); } 699 700 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 701 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 702 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 703 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 704 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 705 706 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 707 708 static Mutex* expand_lock() { return _expand_lock; } 709 710 // Increment the per Metaspace and global running sums for Metachunks 711 // by the given size. This is used when a Metachunk to added to 712 // the in-use list. 713 void inc_size_metrics(size_t words); 714 // Increment the per Metaspace and global running sums Metablocks by the given 715 // size. This is used when a Metablock is allocated. 716 void inc_used_metrics(size_t words); 717 // Delete the portion of the running sums for this SpaceManager. That is, 718 // the globals running sums for the Metachunks and Metablocks are 719 // decremented for all the Metachunks in-use by this SpaceManager. 720 void dec_total_from_size_metrics(); 721 722 // Set the sizes for the initial chunks. 723 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 724 size_t* chunk_word_size, 725 size_t* class_chunk_word_size); 726 727 size_t sum_capacity_in_chunks_in_use() const; 728 size_t sum_used_in_chunks_in_use() const; 729 size_t sum_free_in_chunks_in_use() const; 730 size_t sum_waste_in_chunks_in_use() const; 731 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 732 733 size_t sum_count_in_chunks_in_use(); 734 size_t sum_count_in_chunks_in_use(ChunkIndex i); 735 736 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 737 738 // Block allocation and deallocation. 739 // Allocates a block from the current chunk 740 MetaWord* allocate(size_t word_size); 741 742 // Helper for allocations 743 MetaWord* allocate_work(size_t word_size); 744 745 // Returns a block to the per manager freelist 746 void deallocate(MetaWord* p, size_t word_size); 747 748 // Based on the allocation size and a minimum chunk size, 749 // returned chunk size (for expanding space for chunk allocation). 750 size_t calc_chunk_size(size_t allocation_word_size); 751 752 // Called when an allocation from the current chunk fails. 753 // Gets a new chunk (may require getting a new virtual space), 754 // and allocates from that chunk. 755 MetaWord* grow_and_allocate(size_t word_size); 756 757 // Notify memory usage to MemoryService. 758 void track_metaspace_memory_usage(); 759 760 // debugging support. 761 762 void dump(outputStream* const out) const; 763 void print_on(outputStream* st) const; 764 void locked_print_chunks_in_use_on(outputStream* st) const; 765 766 void verify(); 767 void verify_chunk_size(Metachunk* chunk); 768 NOT_PRODUCT(void mangle_freed_chunks();) 769 #ifdef ASSERT 770 void verify_allocated_blocks_words(); 771 #endif 772 773 size_t get_raw_word_size(size_t word_size) { 774 size_t byte_size = word_size * BytesPerWord; 775 776 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 777 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 778 779 size_t raw_word_size = raw_bytes_size / BytesPerWord; 780 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 781 782 return raw_word_size; 783 } 784 }; 785 786 uint const SpaceManager::_small_chunk_limit = 4; 787 788 const char* SpaceManager::_expand_lock_name = 789 "SpaceManager chunk allocation lock"; 790 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 791 Mutex* const SpaceManager::_expand_lock = 792 new Mutex(SpaceManager::_expand_lock_rank, 793 SpaceManager::_expand_lock_name, 794 Mutex::_allow_vm_block_flag); 795 796 void VirtualSpaceNode::inc_container_count() { 797 assert_lock_strong(SpaceManager::expand_lock()); 798 _container_count++; 799 assert(_container_count == container_count_slow(), 800 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 801 " container_count_slow() " SIZE_FORMAT, 802 _container_count, container_count_slow())); 803 } 804 805 void VirtualSpaceNode::dec_container_count() { 806 assert_lock_strong(SpaceManager::expand_lock()); 807 _container_count--; 808 } 809 810 #ifdef ASSERT 811 void VirtualSpaceNode::verify_container_count() { 812 assert(_container_count == container_count_slow(), 813 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 814 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); 815 } 816 #endif 817 818 // BlockFreelist methods 819 820 BlockFreelist::BlockFreelist() : _dictionary(NULL) {} 821 822 BlockFreelist::~BlockFreelist() { 823 if (_dictionary != NULL) { 824 if (Verbose && TraceMetadataChunkAllocation) { 825 _dictionary->print_free_lists(gclog_or_tty); 826 } 827 delete _dictionary; 828 } 829 } 830 831 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 832 Metablock* free_chunk = ::new (p) Metablock(word_size); 833 if (dictionary() == NULL) { 834 _dictionary = new BlockTreeDictionary(); 835 } 836 dictionary()->return_chunk(free_chunk); 837 } 838 839 MetaWord* BlockFreelist::get_block(size_t word_size) { 840 if (dictionary() == NULL) { 841 return NULL; 842 } 843 844 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 845 // Dark matter. Too small for dictionary. 846 return NULL; 847 } 848 849 Metablock* free_block = 850 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 851 if (free_block == NULL) { 852 return NULL; 853 } 854 855 const size_t block_size = free_block->size(); 856 if (block_size > WasteMultiplier * word_size) { 857 return_block((MetaWord*)free_block, block_size); 858 return NULL; 859 } 860 861 MetaWord* new_block = (MetaWord*)free_block; 862 assert(block_size >= word_size, "Incorrect size of block from freelist"); 863 const size_t unused = block_size - word_size; 864 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 865 return_block(new_block + word_size, unused); 866 } 867 868 return new_block; 869 } 870 871 void BlockFreelist::print_on(outputStream* st) const { 872 if (dictionary() == NULL) { 873 return; 874 } 875 dictionary()->print_free_lists(st); 876 } 877 878 // VirtualSpaceNode methods 879 880 VirtualSpaceNode::~VirtualSpaceNode() { 881 _rs.release(); 882 #ifdef ASSERT 883 size_t word_size = sizeof(*this) / BytesPerWord; 884 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 885 #endif 886 } 887 888 size_t VirtualSpaceNode::used_words_in_vs() const { 889 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 890 } 891 892 // Space committed in the VirtualSpace 893 size_t VirtualSpaceNode::capacity_words_in_vs() const { 894 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 895 } 896 897 size_t VirtualSpaceNode::free_words_in_vs() const { 898 return pointer_delta(end(), top(), sizeof(MetaWord)); 899 } 900 901 // Allocates the chunk from the virtual space only. 902 // This interface is also used internally for debugging. Not all 903 // chunks removed here are necessarily used for allocation. 904 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 905 // Bottom of the new chunk 906 MetaWord* chunk_limit = top(); 907 assert(chunk_limit != NULL, "Not safe to call this method"); 908 909 // The virtual spaces are always expanded by the 910 // commit granularity to enforce the following condition. 911 // Without this the is_available check will not work correctly. 912 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 913 "The committed memory doesn't match the expanded memory."); 914 915 if (!is_available(chunk_word_size)) { 916 if (TraceMetadataChunkAllocation) { 917 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 918 // Dump some information about the virtual space that is nearly full 919 print_on(gclog_or_tty); 920 } 921 return NULL; 922 } 923 924 // Take the space (bump top on the current virtual space). 925 inc_top(chunk_word_size); 926 927 // Initialize the chunk 928 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 929 return result; 930 } 931 932 933 // Expand the virtual space (commit more of the reserved space) 934 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 935 size_t min_bytes = min_words * BytesPerWord; 936 size_t preferred_bytes = preferred_words * BytesPerWord; 937 938 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 939 940 if (uncommitted < min_bytes) { 941 return false; 942 } 943 944 size_t commit = MIN2(preferred_bytes, uncommitted); 945 bool result = virtual_space()->expand_by(commit, false); 946 947 assert(result, "Failed to commit memory"); 948 949 return result; 950 } 951 952 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 953 assert_lock_strong(SpaceManager::expand_lock()); 954 Metachunk* result = take_from_committed(chunk_word_size); 955 if (result != NULL) { 956 inc_container_count(); 957 } 958 return result; 959 } 960 961 bool VirtualSpaceNode::initialize() { 962 963 if (!_rs.is_reserved()) { 964 return false; 965 } 966 967 // These are necessary restriction to make sure that the virtual space always 968 // grows in steps of Metaspace::commit_alignment(). If both base and size are 969 // aligned only the middle alignment of the VirtualSpace is used. 970 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 971 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 972 973 // ReservedSpaces marked as special will have the entire memory 974 // pre-committed. Setting a committed size will make sure that 975 // committed_size and actual_committed_size agrees. 976 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 977 978 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 979 Metaspace::commit_alignment()); 980 if (result) { 981 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 982 "Checking that the pre-committed memory was registered by the VirtualSpace"); 983 984 set_top((MetaWord*)virtual_space()->low()); 985 set_reserved(MemRegion((HeapWord*)_rs.base(), 986 (HeapWord*)(_rs.base() + _rs.size()))); 987 988 assert(reserved()->start() == (HeapWord*) _rs.base(), 989 err_msg("Reserved start was not set properly " PTR_FORMAT 990 " != " PTR_FORMAT, reserved()->start(), _rs.base())); 991 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 992 err_msg("Reserved size was not set properly " SIZE_FORMAT 993 " != " SIZE_FORMAT, reserved()->word_size(), 994 _rs.size() / BytesPerWord)); 995 } 996 997 return result; 998 } 999 1000 void VirtualSpaceNode::print_on(outputStream* st) const { 1001 size_t used = used_words_in_vs(); 1002 size_t capacity = capacity_words_in_vs(); 1003 VirtualSpace* vs = virtual_space(); 1004 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used " 1005 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1006 PTR_FORMAT ", " PTR_FORMAT ")", 1007 vs, capacity / K, 1008 capacity == 0 ? 0 : used * 100 / capacity, 1009 bottom(), top(), end(), 1010 vs->high_boundary()); 1011 } 1012 1013 #ifdef ASSERT 1014 void VirtualSpaceNode::mangle() { 1015 size_t word_size = capacity_words_in_vs(); 1016 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1017 } 1018 #endif // ASSERT 1019 1020 // VirtualSpaceList methods 1021 // Space allocated from the VirtualSpace 1022 1023 VirtualSpaceList::~VirtualSpaceList() { 1024 VirtualSpaceListIterator iter(virtual_space_list()); 1025 while (iter.repeat()) { 1026 VirtualSpaceNode* vsl = iter.get_next(); 1027 delete vsl; 1028 } 1029 } 1030 1031 void VirtualSpaceList::inc_reserved_words(size_t v) { 1032 assert_lock_strong(SpaceManager::expand_lock()); 1033 _reserved_words = _reserved_words + v; 1034 } 1035 void VirtualSpaceList::dec_reserved_words(size_t v) { 1036 assert_lock_strong(SpaceManager::expand_lock()); 1037 _reserved_words = _reserved_words - v; 1038 } 1039 1040 #define assert_committed_below_limit() \ 1041 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1042 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 1043 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1044 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 1045 1046 void VirtualSpaceList::inc_committed_words(size_t v) { 1047 assert_lock_strong(SpaceManager::expand_lock()); 1048 _committed_words = _committed_words + v; 1049 1050 assert_committed_below_limit(); 1051 } 1052 void VirtualSpaceList::dec_committed_words(size_t v) { 1053 assert_lock_strong(SpaceManager::expand_lock()); 1054 _committed_words = _committed_words - v; 1055 1056 assert_committed_below_limit(); 1057 } 1058 1059 void VirtualSpaceList::inc_virtual_space_count() { 1060 assert_lock_strong(SpaceManager::expand_lock()); 1061 _virtual_space_count++; 1062 } 1063 void VirtualSpaceList::dec_virtual_space_count() { 1064 assert_lock_strong(SpaceManager::expand_lock()); 1065 _virtual_space_count--; 1066 } 1067 1068 void ChunkManager::remove_chunk(Metachunk* chunk) { 1069 size_t word_size = chunk->word_size(); 1070 ChunkIndex index = list_index(word_size); 1071 if (index != HumongousIndex) { 1072 free_chunks(index)->remove_chunk(chunk); 1073 } else { 1074 humongous_dictionary()->remove_chunk(chunk); 1075 } 1076 1077 // Chunk is being removed from the chunks free list. 1078 dec_free_chunks_total(chunk->word_size()); 1079 } 1080 1081 // Walk the list of VirtualSpaceNodes and delete 1082 // nodes with a 0 container_count. Remove Metachunks in 1083 // the node from their respective freelists. 1084 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1085 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1086 assert_lock_strong(SpaceManager::expand_lock()); 1087 // Don't use a VirtualSpaceListIterator because this 1088 // list is being changed and a straightforward use of an iterator is not safe. 1089 VirtualSpaceNode* purged_vsl = NULL; 1090 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1091 VirtualSpaceNode* next_vsl = prev_vsl; 1092 while (next_vsl != NULL) { 1093 VirtualSpaceNode* vsl = next_vsl; 1094 next_vsl = vsl->next(); 1095 // Don't free the current virtual space since it will likely 1096 // be needed soon. 1097 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1098 // Unlink it from the list 1099 if (prev_vsl == vsl) { 1100 // This is the case of the current node being the first node. 1101 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1102 set_virtual_space_list(vsl->next()); 1103 } else { 1104 prev_vsl->set_next(vsl->next()); 1105 } 1106 1107 vsl->purge(chunk_manager); 1108 dec_reserved_words(vsl->reserved_words()); 1109 dec_committed_words(vsl->committed_words()); 1110 dec_virtual_space_count(); 1111 purged_vsl = vsl; 1112 delete vsl; 1113 } else { 1114 prev_vsl = vsl; 1115 } 1116 } 1117 #ifdef ASSERT 1118 if (purged_vsl != NULL) { 1119 // List should be stable enough to use an iterator here. 1120 VirtualSpaceListIterator iter(virtual_space_list()); 1121 while (iter.repeat()) { 1122 VirtualSpaceNode* vsl = iter.get_next(); 1123 assert(vsl != purged_vsl, "Purge of vsl failed"); 1124 } 1125 } 1126 #endif 1127 } 1128 1129 1130 // This function looks at the mmap regions in the metaspace without locking. 1131 // The chunks are added with store ordering and not deleted except for at 1132 // unloading time during a safepoint. 1133 bool VirtualSpaceList::contains(const void* ptr) { 1134 // List should be stable enough to use an iterator here because removing virtual 1135 // space nodes is only allowed at a safepoint. 1136 VirtualSpaceListIterator iter(virtual_space_list()); 1137 while (iter.repeat()) { 1138 VirtualSpaceNode* vsn = iter.get_next(); 1139 if (vsn->contains(ptr)) { 1140 return true; 1141 } 1142 } 1143 return false; 1144 } 1145 1146 void VirtualSpaceList::retire_current_virtual_space() { 1147 assert_lock_strong(SpaceManager::expand_lock()); 1148 1149 VirtualSpaceNode* vsn = current_virtual_space(); 1150 1151 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1152 Metaspace::chunk_manager_metadata(); 1153 1154 vsn->retire(cm); 1155 } 1156 1157 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1158 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1159 ChunkIndex index = (ChunkIndex)i; 1160 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1161 1162 while (free_words_in_vs() >= chunk_size) { 1163 DEBUG_ONLY(verify_container_count();) 1164 Metachunk* chunk = get_chunk_vs(chunk_size); 1165 assert(chunk != NULL, "allocation should have been successful"); 1166 1167 chunk_manager->return_chunks(index, chunk); 1168 chunk_manager->inc_free_chunks_total(chunk_size); 1169 DEBUG_ONLY(verify_container_count();) 1170 } 1171 } 1172 assert(free_words_in_vs() == 0, "should be empty now"); 1173 } 1174 1175 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1176 _is_class(false), 1177 _virtual_space_list(NULL), 1178 _current_virtual_space(NULL), 1179 _reserved_words(0), 1180 _committed_words(0), 1181 _virtual_space_count(0) { 1182 MutexLockerEx cl(SpaceManager::expand_lock(), 1183 Mutex::_no_safepoint_check_flag); 1184 create_new_virtual_space(word_size); 1185 } 1186 1187 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1188 _is_class(true), 1189 _virtual_space_list(NULL), 1190 _current_virtual_space(NULL), 1191 _reserved_words(0), 1192 _committed_words(0), 1193 _virtual_space_count(0) { 1194 MutexLockerEx cl(SpaceManager::expand_lock(), 1195 Mutex::_no_safepoint_check_flag); 1196 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1197 bool succeeded = class_entry->initialize(); 1198 if (succeeded) { 1199 link_vs(class_entry); 1200 } 1201 } 1202 1203 size_t VirtualSpaceList::free_bytes() { 1204 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1205 } 1206 1207 // Allocate another meta virtual space and add it to the list. 1208 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1209 assert_lock_strong(SpaceManager::expand_lock()); 1210 1211 if (is_class()) { 1212 assert(false, "We currently don't support more than one VirtualSpace for" 1213 " the compressed class space. The initialization of the" 1214 " CCS uses another code path and should not hit this path."); 1215 return false; 1216 } 1217 1218 if (vs_word_size == 0) { 1219 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1220 return false; 1221 } 1222 1223 // Reserve the space 1224 size_t vs_byte_size = vs_word_size * BytesPerWord; 1225 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1226 1227 // Allocate the meta virtual space and initialize it. 1228 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1229 if (!new_entry->initialize()) { 1230 delete new_entry; 1231 return false; 1232 } else { 1233 assert(new_entry->reserved_words() == vs_word_size, 1234 "Reserved memory size differs from requested memory size"); 1235 // ensure lock-free iteration sees fully initialized node 1236 OrderAccess::storestore(); 1237 link_vs(new_entry); 1238 return true; 1239 } 1240 } 1241 1242 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1243 if (virtual_space_list() == NULL) { 1244 set_virtual_space_list(new_entry); 1245 } else { 1246 current_virtual_space()->set_next(new_entry); 1247 } 1248 set_current_virtual_space(new_entry); 1249 inc_reserved_words(new_entry->reserved_words()); 1250 inc_committed_words(new_entry->committed_words()); 1251 inc_virtual_space_count(); 1252 #ifdef ASSERT 1253 new_entry->mangle(); 1254 #endif 1255 if (TraceMetavirtualspaceAllocation && Verbose) { 1256 VirtualSpaceNode* vsl = current_virtual_space(); 1257 vsl->print_on(gclog_or_tty); 1258 } 1259 } 1260 1261 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1262 size_t min_words, 1263 size_t preferred_words) { 1264 size_t before = node->committed_words(); 1265 1266 bool result = node->expand_by(min_words, preferred_words); 1267 1268 size_t after = node->committed_words(); 1269 1270 // after and before can be the same if the memory was pre-committed. 1271 assert(after >= before, "Inconsistency"); 1272 inc_committed_words(after - before); 1273 1274 return result; 1275 } 1276 1277 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1278 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1279 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1280 assert(min_words <= preferred_words, "Invalid arguments"); 1281 1282 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1283 return false; 1284 } 1285 1286 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1287 if (allowed_expansion_words < min_words) { 1288 return false; 1289 } 1290 1291 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1292 1293 // Commit more memory from the the current virtual space. 1294 bool vs_expanded = expand_node_by(current_virtual_space(), 1295 min_words, 1296 max_expansion_words); 1297 if (vs_expanded) { 1298 return true; 1299 } 1300 retire_current_virtual_space(); 1301 1302 // Get another virtual space. 1303 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1304 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1305 1306 if (create_new_virtual_space(grow_vs_words)) { 1307 if (current_virtual_space()->is_pre_committed()) { 1308 // The memory was pre-committed, so we are done here. 1309 assert(min_words <= current_virtual_space()->committed_words(), 1310 "The new VirtualSpace was pre-committed, so it" 1311 "should be large enough to fit the alloc request."); 1312 return true; 1313 } 1314 1315 return expand_node_by(current_virtual_space(), 1316 min_words, 1317 max_expansion_words); 1318 } 1319 1320 return false; 1321 } 1322 1323 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1324 size_t grow_chunks_by_words, 1325 size_t medium_chunk_bunch) { 1326 1327 // Allocate a chunk out of the current virtual space. 1328 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1329 1330 if (next != NULL) { 1331 return next; 1332 } 1333 1334 // The expand amount is currently only determined by the requested sizes 1335 // and not how much committed memory is left in the current virtual space. 1336 1337 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1338 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1339 if (min_word_size >= preferred_word_size) { 1340 // Can happen when humongous chunks are allocated. 1341 preferred_word_size = min_word_size; 1342 } 1343 1344 bool expanded = expand_by(min_word_size, preferred_word_size); 1345 if (expanded) { 1346 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1347 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1348 } 1349 1350 return next; 1351 } 1352 1353 void VirtualSpaceList::print_on(outputStream* st) const { 1354 if (TraceMetadataChunkAllocation && Verbose) { 1355 VirtualSpaceListIterator iter(virtual_space_list()); 1356 while (iter.repeat()) { 1357 VirtualSpaceNode* node = iter.get_next(); 1358 node->print_on(st); 1359 } 1360 } 1361 } 1362 1363 // MetaspaceGC methods 1364 1365 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1366 // Within the VM operation after the GC the attempt to allocate the metadata 1367 // should succeed. If the GC did not free enough space for the metaspace 1368 // allocation, the HWM is increased so that another virtualspace will be 1369 // allocated for the metadata. With perm gen the increase in the perm 1370 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1371 // metaspace policy uses those as the small and large steps for the HWM. 1372 // 1373 // After the GC the compute_new_size() for MetaspaceGC is called to 1374 // resize the capacity of the metaspaces. The current implementation 1375 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1376 // to resize the Java heap by some GC's. New flags can be implemented 1377 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1378 // free space is desirable in the metaspace capacity to decide how much 1379 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1380 // free space is desirable in the metaspace capacity before decreasing 1381 // the HWM. 1382 1383 // Calculate the amount to increase the high water mark (HWM). 1384 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1385 // another expansion is not requested too soon. If that is not 1386 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1387 // If that is still not enough, expand by the size of the allocation 1388 // plus some. 1389 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1390 size_t min_delta = MinMetaspaceExpansion; 1391 size_t max_delta = MaxMetaspaceExpansion; 1392 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1393 1394 if (delta <= min_delta) { 1395 delta = min_delta; 1396 } else if (delta <= max_delta) { 1397 // Don't want to hit the high water mark on the next 1398 // allocation so make the delta greater than just enough 1399 // for this allocation. 1400 delta = max_delta; 1401 } else { 1402 // This allocation is large but the next ones are probably not 1403 // so increase by the minimum. 1404 delta = delta + min_delta; 1405 } 1406 1407 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1408 1409 return delta; 1410 } 1411 1412 size_t MetaspaceGC::capacity_until_GC() { 1413 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1414 assert(value >= MetaspaceSize, "Not initialied properly?"); 1415 return value; 1416 } 1417 1418 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1419 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1420 1421 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1422 size_t new_value = capacity_until_GC + v; 1423 1424 if (new_value < capacity_until_GC) { 1425 // The addition wrapped around, set new_value to aligned max value. 1426 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1427 } 1428 1429 intptr_t expected = (intptr_t) capacity_until_GC; 1430 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1431 1432 if (expected != actual) { 1433 return false; 1434 } 1435 1436 if (new_cap_until_GC != NULL) { 1437 *new_cap_until_GC = new_value; 1438 } 1439 if (old_cap_until_GC != NULL) { 1440 *old_cap_until_GC = capacity_until_GC; 1441 } 1442 return true; 1443 } 1444 1445 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1446 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1447 1448 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1449 } 1450 1451 void MetaspaceGC::initialize() { 1452 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1453 // we can't do a GC during initialization. 1454 _capacity_until_GC = MaxMetaspaceSize; 1455 } 1456 1457 void MetaspaceGC::post_initialize() { 1458 // Reset the high-water mark once the VM initialization is done. 1459 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1460 } 1461 1462 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1463 // Check if the compressed class space is full. 1464 if (is_class && Metaspace::using_class_space()) { 1465 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1466 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1467 return false; 1468 } 1469 } 1470 1471 // Check if the user has imposed a limit on the metaspace memory. 1472 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1473 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1474 return false; 1475 } 1476 1477 return true; 1478 } 1479 1480 size_t MetaspaceGC::allowed_expansion() { 1481 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1482 size_t capacity_until_gc = capacity_until_GC(); 1483 1484 assert(capacity_until_gc >= committed_bytes, 1485 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1486 capacity_until_gc, committed_bytes)); 1487 1488 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1489 size_t left_until_GC = capacity_until_gc - committed_bytes; 1490 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1491 1492 return left_to_commit / BytesPerWord; 1493 } 1494 1495 void MetaspaceGC::compute_new_size() { 1496 assert(_shrink_factor <= 100, "invalid shrink factor"); 1497 uint current_shrink_factor = _shrink_factor; 1498 _shrink_factor = 0; 1499 1500 // Using committed_bytes() for used_after_gc is an overestimation, since the 1501 // chunk free lists are included in committed_bytes() and the memory in an 1502 // un-fragmented chunk free list is available for future allocations. 1503 // However, if the chunk free lists becomes fragmented, then the memory may 1504 // not be available for future allocations and the memory is therefore "in use". 1505 // Including the chunk free lists in the definition of "in use" is therefore 1506 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1507 // shrink below committed_bytes() and this has caused serious bugs in the past. 1508 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1509 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1510 1511 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1512 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1513 1514 const double min_tmp = used_after_gc / maximum_used_percentage; 1515 size_t minimum_desired_capacity = 1516 (size_t)MIN2(min_tmp, double(max_uintx)); 1517 // Don't shrink less than the initial generation size 1518 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1519 MetaspaceSize); 1520 1521 if (PrintGCDetails && Verbose) { 1522 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); 1523 gclog_or_tty->print_cr(" " 1524 " minimum_free_percentage: %6.2f" 1525 " maximum_used_percentage: %6.2f", 1526 minimum_free_percentage, 1527 maximum_used_percentage); 1528 gclog_or_tty->print_cr(" " 1529 " used_after_gc : %6.1fKB", 1530 used_after_gc / (double) K); 1531 } 1532 1533 1534 size_t shrink_bytes = 0; 1535 if (capacity_until_GC < minimum_desired_capacity) { 1536 // If we have less capacity below the metaspace HWM, then 1537 // increment the HWM. 1538 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1539 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1540 // Don't expand unless it's significant 1541 if (expand_bytes >= MinMetaspaceExpansion) { 1542 size_t new_capacity_until_GC = 0; 1543 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1544 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1545 1546 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1547 new_capacity_until_GC, 1548 MetaspaceGCThresholdUpdater::ComputeNewSize); 1549 if (PrintGCDetails && Verbose) { 1550 gclog_or_tty->print_cr(" expanding:" 1551 " minimum_desired_capacity: %6.1fKB" 1552 " expand_bytes: %6.1fKB" 1553 " MinMetaspaceExpansion: %6.1fKB" 1554 " new metaspace HWM: %6.1fKB", 1555 minimum_desired_capacity / (double) K, 1556 expand_bytes / (double) K, 1557 MinMetaspaceExpansion / (double) K, 1558 new_capacity_until_GC / (double) K); 1559 } 1560 } 1561 return; 1562 } 1563 1564 // No expansion, now see if we want to shrink 1565 // We would never want to shrink more than this 1566 assert(capacity_until_GC >= minimum_desired_capacity, 1567 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT, 1568 capacity_until_GC, minimum_desired_capacity)); 1569 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1570 1571 // Should shrinking be considered? 1572 if (MaxMetaspaceFreeRatio < 100) { 1573 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1574 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1575 const double max_tmp = used_after_gc / minimum_used_percentage; 1576 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1577 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1578 MetaspaceSize); 1579 if (PrintGCDetails && Verbose) { 1580 gclog_or_tty->print_cr(" " 1581 " maximum_free_percentage: %6.2f" 1582 " minimum_used_percentage: %6.2f", 1583 maximum_free_percentage, 1584 minimum_used_percentage); 1585 gclog_or_tty->print_cr(" " 1586 " minimum_desired_capacity: %6.1fKB" 1587 " maximum_desired_capacity: %6.1fKB", 1588 minimum_desired_capacity / (double) K, 1589 maximum_desired_capacity / (double) K); 1590 } 1591 1592 assert(minimum_desired_capacity <= maximum_desired_capacity, 1593 "sanity check"); 1594 1595 if (capacity_until_GC > maximum_desired_capacity) { 1596 // Capacity too large, compute shrinking size 1597 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1598 // We don't want shrink all the way back to initSize if people call 1599 // System.gc(), because some programs do that between "phases" and then 1600 // we'd just have to grow the heap up again for the next phase. So we 1601 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1602 // on the third call, and 100% by the fourth call. But if we recompute 1603 // size without shrinking, it goes back to 0%. 1604 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1605 1606 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1607 1608 assert(shrink_bytes <= max_shrink_bytes, 1609 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1610 shrink_bytes, max_shrink_bytes)); 1611 if (current_shrink_factor == 0) { 1612 _shrink_factor = 10; 1613 } else { 1614 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1615 } 1616 if (PrintGCDetails && Verbose) { 1617 gclog_or_tty->print_cr(" " 1618 " shrinking:" 1619 " initSize: %.1fK" 1620 " maximum_desired_capacity: %.1fK", 1621 MetaspaceSize / (double) K, 1622 maximum_desired_capacity / (double) K); 1623 gclog_or_tty->print_cr(" " 1624 " shrink_bytes: %.1fK" 1625 " current_shrink_factor: %d" 1626 " new shrink factor: %d" 1627 " MinMetaspaceExpansion: %.1fK", 1628 shrink_bytes / (double) K, 1629 current_shrink_factor, 1630 _shrink_factor, 1631 MinMetaspaceExpansion / (double) K); 1632 } 1633 } 1634 } 1635 1636 // Don't shrink unless it's significant 1637 if (shrink_bytes >= MinMetaspaceExpansion && 1638 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1639 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1640 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1641 new_capacity_until_GC, 1642 MetaspaceGCThresholdUpdater::ComputeNewSize); 1643 } 1644 } 1645 1646 // Metadebug methods 1647 1648 void Metadebug::init_allocation_fail_alot_count() { 1649 if (MetadataAllocationFailALot) { 1650 _allocation_fail_alot_count = 1651 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1652 } 1653 } 1654 1655 #ifdef ASSERT 1656 bool Metadebug::test_metadata_failure() { 1657 if (MetadataAllocationFailALot && 1658 Threads::is_vm_complete()) { 1659 if (_allocation_fail_alot_count > 0) { 1660 _allocation_fail_alot_count--; 1661 } else { 1662 if (TraceMetadataChunkAllocation && Verbose) { 1663 gclog_or_tty->print_cr("Metadata allocation failing for " 1664 "MetadataAllocationFailALot"); 1665 } 1666 init_allocation_fail_alot_count(); 1667 return true; 1668 } 1669 } 1670 return false; 1671 } 1672 #endif 1673 1674 // ChunkManager methods 1675 1676 size_t ChunkManager::free_chunks_total_words() { 1677 return _free_chunks_total; 1678 } 1679 1680 size_t ChunkManager::free_chunks_total_bytes() { 1681 return free_chunks_total_words() * BytesPerWord; 1682 } 1683 1684 size_t ChunkManager::free_chunks_count() { 1685 #ifdef ASSERT 1686 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1687 MutexLockerEx cl(SpaceManager::expand_lock(), 1688 Mutex::_no_safepoint_check_flag); 1689 // This lock is only needed in debug because the verification 1690 // of the _free_chunks_totals walks the list of free chunks 1691 slow_locked_verify_free_chunks_count(); 1692 } 1693 #endif 1694 return _free_chunks_count; 1695 } 1696 1697 void ChunkManager::locked_verify_free_chunks_total() { 1698 assert_lock_strong(SpaceManager::expand_lock()); 1699 assert(sum_free_chunks() == _free_chunks_total, 1700 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" 1701 " same as sum " SIZE_FORMAT, _free_chunks_total, 1702 sum_free_chunks())); 1703 } 1704 1705 void ChunkManager::verify_free_chunks_total() { 1706 MutexLockerEx cl(SpaceManager::expand_lock(), 1707 Mutex::_no_safepoint_check_flag); 1708 locked_verify_free_chunks_total(); 1709 } 1710 1711 void ChunkManager::locked_verify_free_chunks_count() { 1712 assert_lock_strong(SpaceManager::expand_lock()); 1713 assert(sum_free_chunks_count() == _free_chunks_count, 1714 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" 1715 " same as sum " SIZE_FORMAT, _free_chunks_count, 1716 sum_free_chunks_count())); 1717 } 1718 1719 void ChunkManager::verify_free_chunks_count() { 1720 #ifdef ASSERT 1721 MutexLockerEx cl(SpaceManager::expand_lock(), 1722 Mutex::_no_safepoint_check_flag); 1723 locked_verify_free_chunks_count(); 1724 #endif 1725 } 1726 1727 void ChunkManager::verify() { 1728 MutexLockerEx cl(SpaceManager::expand_lock(), 1729 Mutex::_no_safepoint_check_flag); 1730 locked_verify(); 1731 } 1732 1733 void ChunkManager::locked_verify() { 1734 locked_verify_free_chunks_count(); 1735 locked_verify_free_chunks_total(); 1736 } 1737 1738 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1739 assert_lock_strong(SpaceManager::expand_lock()); 1740 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1741 _free_chunks_total, _free_chunks_count); 1742 } 1743 1744 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1745 assert_lock_strong(SpaceManager::expand_lock()); 1746 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1747 sum_free_chunks(), sum_free_chunks_count()); 1748 } 1749 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1750 return &_free_chunks[index]; 1751 } 1752 1753 // These methods that sum the free chunk lists are used in printing 1754 // methods that are used in product builds. 1755 size_t ChunkManager::sum_free_chunks() { 1756 assert_lock_strong(SpaceManager::expand_lock()); 1757 size_t result = 0; 1758 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1759 ChunkList* list = free_chunks(i); 1760 1761 if (list == NULL) { 1762 continue; 1763 } 1764 1765 result = result + list->count() * list->size(); 1766 } 1767 result = result + humongous_dictionary()->total_size(); 1768 return result; 1769 } 1770 1771 size_t ChunkManager::sum_free_chunks_count() { 1772 assert_lock_strong(SpaceManager::expand_lock()); 1773 size_t count = 0; 1774 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1775 ChunkList* list = free_chunks(i); 1776 if (list == NULL) { 1777 continue; 1778 } 1779 count = count + list->count(); 1780 } 1781 count = count + humongous_dictionary()->total_free_blocks(); 1782 return count; 1783 } 1784 1785 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1786 ChunkIndex index = list_index(word_size); 1787 assert(index < HumongousIndex, "No humongous list"); 1788 return free_chunks(index); 1789 } 1790 1791 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1792 assert_lock_strong(SpaceManager::expand_lock()); 1793 1794 slow_locked_verify(); 1795 1796 Metachunk* chunk = NULL; 1797 if (list_index(word_size) != HumongousIndex) { 1798 ChunkList* free_list = find_free_chunks_list(word_size); 1799 assert(free_list != NULL, "Sanity check"); 1800 1801 chunk = free_list->head(); 1802 1803 if (chunk == NULL) { 1804 return NULL; 1805 } 1806 1807 // Remove the chunk as the head of the list. 1808 free_list->remove_chunk(chunk); 1809 1810 if (TraceMetadataChunkAllocation && Verbose) { 1811 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1812 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1813 free_list, chunk, chunk->word_size()); 1814 } 1815 } else { 1816 chunk = humongous_dictionary()->get_chunk( 1817 word_size, 1818 FreeBlockDictionary<Metachunk>::atLeast); 1819 1820 if (chunk == NULL) { 1821 return NULL; 1822 } 1823 1824 if (TraceMetadataHumongousAllocation) { 1825 size_t waste = chunk->word_size() - word_size; 1826 gclog_or_tty->print_cr("Free list allocate humongous chunk size " 1827 SIZE_FORMAT " for requested size " SIZE_FORMAT 1828 " waste " SIZE_FORMAT, 1829 chunk->word_size(), word_size, waste); 1830 } 1831 } 1832 1833 // Chunk is being removed from the chunks free list. 1834 dec_free_chunks_total(chunk->word_size()); 1835 1836 // Remove it from the links to this freelist 1837 chunk->set_next(NULL); 1838 chunk->set_prev(NULL); 1839 #ifdef ASSERT 1840 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1841 // work. 1842 chunk->set_is_tagged_free(false); 1843 #endif 1844 chunk->container()->inc_container_count(); 1845 1846 slow_locked_verify(); 1847 return chunk; 1848 } 1849 1850 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1851 assert_lock_strong(SpaceManager::expand_lock()); 1852 slow_locked_verify(); 1853 1854 // Take from the beginning of the list 1855 Metachunk* chunk = free_chunks_get(word_size); 1856 if (chunk == NULL) { 1857 return NULL; 1858 } 1859 1860 assert((word_size <= chunk->word_size()) || 1861 list_index(chunk->word_size() == HumongousIndex), 1862 "Non-humongous variable sized chunk"); 1863 if (TraceMetadataChunkAllocation) { 1864 size_t list_count; 1865 if (list_index(word_size) < HumongousIndex) { 1866 ChunkList* list = find_free_chunks_list(word_size); 1867 list_count = list->count(); 1868 } else { 1869 list_count = humongous_dictionary()->total_count(); 1870 } 1871 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1872 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1873 this, chunk, chunk->word_size(), list_count); 1874 locked_print_free_chunks(gclog_or_tty); 1875 } 1876 1877 return chunk; 1878 } 1879 1880 void ChunkManager::print_on(outputStream* out) const { 1881 if (PrintFLSStatistics != 0) { 1882 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); 1883 } 1884 } 1885 1886 // SpaceManager methods 1887 1888 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1889 size_t* chunk_word_size, 1890 size_t* class_chunk_word_size) { 1891 switch (type) { 1892 case Metaspace::BootMetaspaceType: 1893 *chunk_word_size = Metaspace::first_chunk_word_size(); 1894 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1895 break; 1896 case Metaspace::ROMetaspaceType: 1897 *chunk_word_size = SharedReadOnlySize / wordSize; 1898 *class_chunk_word_size = ClassSpecializedChunk; 1899 break; 1900 case Metaspace::ReadWriteMetaspaceType: 1901 *chunk_word_size = SharedReadWriteSize / wordSize; 1902 *class_chunk_word_size = ClassSpecializedChunk; 1903 break; 1904 case Metaspace::AnonymousMetaspaceType: 1905 case Metaspace::ReflectionMetaspaceType: 1906 *chunk_word_size = SpecializedChunk; 1907 *class_chunk_word_size = ClassSpecializedChunk; 1908 break; 1909 default: 1910 *chunk_word_size = SmallChunk; 1911 *class_chunk_word_size = ClassSmallChunk; 1912 break; 1913 } 1914 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1915 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT 1916 " class " SIZE_FORMAT, 1917 *chunk_word_size, *class_chunk_word_size)); 1918 } 1919 1920 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1921 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1922 size_t free = 0; 1923 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1924 Metachunk* chunk = chunks_in_use(i); 1925 while (chunk != NULL) { 1926 free += chunk->free_word_size(); 1927 chunk = chunk->next(); 1928 } 1929 } 1930 return free; 1931 } 1932 1933 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1934 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1935 size_t result = 0; 1936 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1937 result += sum_waste_in_chunks_in_use(i); 1938 } 1939 1940 return result; 1941 } 1942 1943 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1944 size_t result = 0; 1945 Metachunk* chunk = chunks_in_use(index); 1946 // Count the free space in all the chunk but not the 1947 // current chunk from which allocations are still being done. 1948 while (chunk != NULL) { 1949 if (chunk != current_chunk()) { 1950 result += chunk->free_word_size(); 1951 } 1952 chunk = chunk->next(); 1953 } 1954 return result; 1955 } 1956 1957 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1958 // For CMS use "allocated_chunks_words()" which does not need the 1959 // Metaspace lock. For the other collectors sum over the 1960 // lists. Use both methods as a check that "allocated_chunks_words()" 1961 // is correct. That is, sum_capacity_in_chunks() is too expensive 1962 // to use in the product and allocated_chunks_words() should be used 1963 // but allow for checking that allocated_chunks_words() returns the same 1964 // value as sum_capacity_in_chunks_in_use() which is the definitive 1965 // answer. 1966 if (UseConcMarkSweepGC) { 1967 return allocated_chunks_words(); 1968 } else { 1969 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1970 size_t sum = 0; 1971 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1972 Metachunk* chunk = chunks_in_use(i); 1973 while (chunk != NULL) { 1974 sum += chunk->word_size(); 1975 chunk = chunk->next(); 1976 } 1977 } 1978 return sum; 1979 } 1980 } 1981 1982 size_t SpaceManager::sum_count_in_chunks_in_use() { 1983 size_t count = 0; 1984 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1985 count = count + sum_count_in_chunks_in_use(i); 1986 } 1987 1988 return count; 1989 } 1990 1991 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 1992 size_t count = 0; 1993 Metachunk* chunk = chunks_in_use(i); 1994 while (chunk != NULL) { 1995 count++; 1996 chunk = chunk->next(); 1997 } 1998 return count; 1999 } 2000 2001 2002 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2003 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2004 size_t used = 0; 2005 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2006 Metachunk* chunk = chunks_in_use(i); 2007 while (chunk != NULL) { 2008 used += chunk->used_word_size(); 2009 chunk = chunk->next(); 2010 } 2011 } 2012 return used; 2013 } 2014 2015 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2016 2017 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2018 Metachunk* chunk = chunks_in_use(i); 2019 st->print("SpaceManager: %s " PTR_FORMAT, 2020 chunk_size_name(i), chunk); 2021 if (chunk != NULL) { 2022 st->print_cr(" free " SIZE_FORMAT, 2023 chunk->free_word_size()); 2024 } else { 2025 st->cr(); 2026 } 2027 } 2028 2029 chunk_manager()->locked_print_free_chunks(st); 2030 chunk_manager()->locked_print_sum_free_chunks(st); 2031 } 2032 2033 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2034 2035 // Decide between a small chunk and a medium chunk. Up to 2036 // _small_chunk_limit small chunks can be allocated but 2037 // once a medium chunk has been allocated, no more small 2038 // chunks will be allocated. 2039 size_t chunk_word_size; 2040 if (chunks_in_use(MediumIndex) == NULL && 2041 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2042 chunk_word_size = (size_t) small_chunk_size(); 2043 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2044 chunk_word_size = medium_chunk_size(); 2045 } 2046 } else { 2047 chunk_word_size = medium_chunk_size(); 2048 } 2049 2050 // Might still need a humongous chunk. Enforce 2051 // humongous allocations sizes to be aligned up to 2052 // the smallest chunk size. 2053 size_t if_humongous_sized_chunk = 2054 align_size_up(word_size + Metachunk::overhead(), 2055 smallest_chunk_size()); 2056 chunk_word_size = 2057 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2058 2059 assert(!SpaceManager::is_humongous(word_size) || 2060 chunk_word_size == if_humongous_sized_chunk, 2061 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT 2062 " chunk_word_size " SIZE_FORMAT, 2063 word_size, chunk_word_size)); 2064 if (TraceMetadataHumongousAllocation && 2065 SpaceManager::is_humongous(word_size)) { 2066 gclog_or_tty->print_cr("Metadata humongous allocation:"); 2067 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); 2068 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, 2069 chunk_word_size); 2070 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, 2071 Metachunk::overhead()); 2072 } 2073 return chunk_word_size; 2074 } 2075 2076 void SpaceManager::track_metaspace_memory_usage() { 2077 if (is_init_completed()) { 2078 if (is_class()) { 2079 MemoryService::track_compressed_class_memory_usage(); 2080 } 2081 MemoryService::track_metaspace_memory_usage(); 2082 } 2083 } 2084 2085 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2086 assert(vs_list()->current_virtual_space() != NULL, 2087 "Should have been set"); 2088 assert(current_chunk() == NULL || 2089 current_chunk()->allocate(word_size) == NULL, 2090 "Don't need to expand"); 2091 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2092 2093 if (TraceMetadataChunkAllocation && Verbose) { 2094 size_t words_left = 0; 2095 size_t words_used = 0; 2096 if (current_chunk() != NULL) { 2097 words_left = current_chunk()->free_word_size(); 2098 words_used = current_chunk()->used_word_size(); 2099 } 2100 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT 2101 " words " SIZE_FORMAT " words used " SIZE_FORMAT 2102 " words left", 2103 word_size, words_used, words_left); 2104 } 2105 2106 // Get another chunk out of the virtual space 2107 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2108 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2109 2110 MetaWord* mem = NULL; 2111 2112 // If a chunk was available, add it to the in-use chunk list 2113 // and do an allocation from it. 2114 if (next != NULL) { 2115 // Add to this manager's list of chunks in use. 2116 add_chunk(next, false); 2117 mem = next->allocate(word_size); 2118 } 2119 2120 // Track metaspace memory usage statistic. 2121 track_metaspace_memory_usage(); 2122 2123 return mem; 2124 } 2125 2126 void SpaceManager::print_on(outputStream* st) const { 2127 2128 for (ChunkIndex i = ZeroIndex; 2129 i < NumberOfInUseLists ; 2130 i = next_chunk_index(i) ) { 2131 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, 2132 chunks_in_use(i), 2133 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2134 } 2135 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2136 " Humongous " SIZE_FORMAT, 2137 sum_waste_in_chunks_in_use(SmallIndex), 2138 sum_waste_in_chunks_in_use(MediumIndex), 2139 sum_waste_in_chunks_in_use(HumongousIndex)); 2140 // block free lists 2141 if (block_freelists() != NULL) { 2142 st->print_cr("total in block free lists " SIZE_FORMAT, 2143 block_freelists()->total_size()); 2144 } 2145 } 2146 2147 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2148 Mutex* lock) : 2149 _mdtype(mdtype), 2150 _allocated_blocks_words(0), 2151 _allocated_chunks_words(0), 2152 _allocated_chunks_count(0), 2153 _lock(lock) 2154 { 2155 initialize(); 2156 } 2157 2158 void SpaceManager::inc_size_metrics(size_t words) { 2159 assert_lock_strong(SpaceManager::expand_lock()); 2160 // Total of allocated Metachunks and allocated Metachunks count 2161 // for each SpaceManager 2162 _allocated_chunks_words = _allocated_chunks_words + words; 2163 _allocated_chunks_count++; 2164 // Global total of capacity in allocated Metachunks 2165 MetaspaceAux::inc_capacity(mdtype(), words); 2166 // Global total of allocated Metablocks. 2167 // used_words_slow() includes the overhead in each 2168 // Metachunk so include it in the used when the 2169 // Metachunk is first added (so only added once per 2170 // Metachunk). 2171 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2172 } 2173 2174 void SpaceManager::inc_used_metrics(size_t words) { 2175 // Add to the per SpaceManager total 2176 Atomic::add_ptr(words, &_allocated_blocks_words); 2177 // Add to the global total 2178 MetaspaceAux::inc_used(mdtype(), words); 2179 } 2180 2181 void SpaceManager::dec_total_from_size_metrics() { 2182 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2183 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2184 // Also deduct the overhead per Metachunk 2185 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2186 } 2187 2188 void SpaceManager::initialize() { 2189 Metadebug::init_allocation_fail_alot_count(); 2190 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2191 _chunks_in_use[i] = NULL; 2192 } 2193 _current_chunk = NULL; 2194 if (TraceMetadataChunkAllocation && Verbose) { 2195 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this); 2196 } 2197 } 2198 2199 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2200 if (chunks == NULL) { 2201 return; 2202 } 2203 ChunkList* list = free_chunks(index); 2204 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2205 assert_lock_strong(SpaceManager::expand_lock()); 2206 Metachunk* cur = chunks; 2207 2208 // This returns chunks one at a time. If a new 2209 // class List can be created that is a base class 2210 // of FreeList then something like FreeList::prepend() 2211 // can be used in place of this loop 2212 while (cur != NULL) { 2213 assert(cur->container() != NULL, "Container should have been set"); 2214 cur->container()->dec_container_count(); 2215 // Capture the next link before it is changed 2216 // by the call to return_chunk_at_head(); 2217 Metachunk* next = cur->next(); 2218 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2219 list->return_chunk_at_head(cur); 2220 cur = next; 2221 } 2222 } 2223 2224 SpaceManager::~SpaceManager() { 2225 // This call this->_lock which can't be done while holding expand_lock() 2226 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2227 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2228 " allocated_chunks_words() " SIZE_FORMAT, 2229 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2230 2231 MutexLockerEx fcl(SpaceManager::expand_lock(), 2232 Mutex::_no_safepoint_check_flag); 2233 2234 chunk_manager()->slow_locked_verify(); 2235 2236 dec_total_from_size_metrics(); 2237 2238 if (TraceMetadataChunkAllocation && Verbose) { 2239 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); 2240 locked_print_chunks_in_use_on(gclog_or_tty); 2241 } 2242 2243 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2244 // is during the freeing of a VirtualSpaceNodes. 2245 2246 // Have to update before the chunks_in_use lists are emptied 2247 // below. 2248 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2249 sum_count_in_chunks_in_use()); 2250 2251 // Add all the chunks in use by this space manager 2252 // to the global list of free chunks. 2253 2254 // Follow each list of chunks-in-use and add them to the 2255 // free lists. Each list is NULL terminated. 2256 2257 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2258 if (TraceMetadataChunkAllocation && Verbose) { 2259 gclog_or_tty->print_cr("returned %d %s chunks to freelist", 2260 sum_count_in_chunks_in_use(i), 2261 chunk_size_name(i)); 2262 } 2263 Metachunk* chunks = chunks_in_use(i); 2264 chunk_manager()->return_chunks(i, chunks); 2265 set_chunks_in_use(i, NULL); 2266 if (TraceMetadataChunkAllocation && Verbose) { 2267 gclog_or_tty->print_cr("updated freelist count %d %s", 2268 chunk_manager()->free_chunks(i)->count(), 2269 chunk_size_name(i)); 2270 } 2271 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2272 } 2273 2274 // The medium chunk case may be optimized by passing the head and 2275 // tail of the medium chunk list to add_at_head(). The tail is often 2276 // the current chunk but there are probably exceptions. 2277 2278 // Humongous chunks 2279 if (TraceMetadataChunkAllocation && Verbose) { 2280 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary", 2281 sum_count_in_chunks_in_use(HumongousIndex), 2282 chunk_size_name(HumongousIndex)); 2283 gclog_or_tty->print("Humongous chunk dictionary: "); 2284 } 2285 // Humongous chunks are never the current chunk. 2286 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2287 2288 while (humongous_chunks != NULL) { 2289 #ifdef ASSERT 2290 humongous_chunks->set_is_tagged_free(true); 2291 #endif 2292 if (TraceMetadataChunkAllocation && Verbose) { 2293 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2294 humongous_chunks, 2295 humongous_chunks->word_size()); 2296 } 2297 assert(humongous_chunks->word_size() == (size_t) 2298 align_size_up(humongous_chunks->word_size(), 2299 smallest_chunk_size()), 2300 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2301 " granularity %d", 2302 humongous_chunks->word_size(), smallest_chunk_size())); 2303 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2304 humongous_chunks->container()->dec_container_count(); 2305 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2306 humongous_chunks = next_humongous_chunks; 2307 } 2308 if (TraceMetadataChunkAllocation && Verbose) { 2309 gclog_or_tty->cr(); 2310 gclog_or_tty->print_cr("updated dictionary count %d %s", 2311 chunk_manager()->humongous_dictionary()->total_count(), 2312 chunk_size_name(HumongousIndex)); 2313 } 2314 chunk_manager()->slow_locked_verify(); 2315 } 2316 2317 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2318 switch (index) { 2319 case SpecializedIndex: 2320 return "Specialized"; 2321 case SmallIndex: 2322 return "Small"; 2323 case MediumIndex: 2324 return "Medium"; 2325 case HumongousIndex: 2326 return "Humongous"; 2327 default: 2328 return NULL; 2329 } 2330 } 2331 2332 ChunkIndex ChunkManager::list_index(size_t size) { 2333 switch (size) { 2334 case SpecializedChunk: 2335 assert(SpecializedChunk == ClassSpecializedChunk, 2336 "Need branch for ClassSpecializedChunk"); 2337 return SpecializedIndex; 2338 case SmallChunk: 2339 case ClassSmallChunk: 2340 return SmallIndex; 2341 case MediumChunk: 2342 case ClassMediumChunk: 2343 return MediumIndex; 2344 default: 2345 assert(size > MediumChunk || size > ClassMediumChunk, 2346 "Not a humongous chunk"); 2347 return HumongousIndex; 2348 } 2349 } 2350 2351 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2352 assert_lock_strong(_lock); 2353 size_t raw_word_size = get_raw_word_size(word_size); 2354 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2355 assert(raw_word_size >= min_size, 2356 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 2357 block_freelists()->return_block(p, raw_word_size); 2358 } 2359 2360 // Adds a chunk to the list of chunks in use. 2361 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2362 2363 assert(new_chunk != NULL, "Should not be NULL"); 2364 assert(new_chunk->next() == NULL, "Should not be on a list"); 2365 2366 new_chunk->reset_empty(); 2367 2368 // Find the correct list and and set the current 2369 // chunk for that list. 2370 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2371 2372 if (index != HumongousIndex) { 2373 retire_current_chunk(); 2374 set_current_chunk(new_chunk); 2375 new_chunk->set_next(chunks_in_use(index)); 2376 set_chunks_in_use(index, new_chunk); 2377 } else { 2378 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2379 // small, so small will be null. Link this first chunk as the current 2380 // chunk. 2381 if (make_current) { 2382 // Set as the current chunk but otherwise treat as a humongous chunk. 2383 set_current_chunk(new_chunk); 2384 } 2385 // Link at head. The _current_chunk only points to a humongous chunk for 2386 // the null class loader metaspace (class and data virtual space managers) 2387 // any humongous chunks so will not point to the tail 2388 // of the humongous chunks list. 2389 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2390 set_chunks_in_use(HumongousIndex, new_chunk); 2391 2392 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2393 } 2394 2395 // Add to the running sum of capacity 2396 inc_size_metrics(new_chunk->word_size()); 2397 2398 assert(new_chunk->is_empty(), "Not ready for reuse"); 2399 if (TraceMetadataChunkAllocation && Verbose) { 2400 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", 2401 sum_count_in_chunks_in_use()); 2402 new_chunk->print_on(gclog_or_tty); 2403 chunk_manager()->locked_print_free_chunks(gclog_or_tty); 2404 } 2405 } 2406 2407 void SpaceManager::retire_current_chunk() { 2408 if (current_chunk() != NULL) { 2409 size_t remaining_words = current_chunk()->free_word_size(); 2410 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2411 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2412 inc_used_metrics(remaining_words); 2413 } 2414 } 2415 } 2416 2417 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2418 size_t grow_chunks_by_words) { 2419 // Get a chunk from the chunk freelist 2420 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2421 2422 if (next == NULL) { 2423 next = vs_list()->get_new_chunk(word_size, 2424 grow_chunks_by_words, 2425 medium_chunk_bunch()); 2426 } 2427 2428 if (TraceMetadataHumongousAllocation && next != NULL && 2429 SpaceManager::is_humongous(next->word_size())) { 2430 gclog_or_tty->print_cr(" new humongous chunk word size " 2431 PTR_FORMAT, next->word_size()); 2432 } 2433 2434 return next; 2435 } 2436 2437 MetaWord* SpaceManager::allocate(size_t word_size) { 2438 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2439 2440 size_t raw_word_size = get_raw_word_size(word_size); 2441 BlockFreelist* fl = block_freelists(); 2442 MetaWord* p = NULL; 2443 // Allocation from the dictionary is expensive in the sense that 2444 // the dictionary has to be searched for a size. Don't allocate 2445 // from the dictionary until it starts to get fat. Is this 2446 // a reasonable policy? Maybe an skinny dictionary is fast enough 2447 // for allocations. Do some profiling. JJJ 2448 if (fl->total_size() > allocation_from_dictionary_limit) { 2449 p = fl->get_block(raw_word_size); 2450 } 2451 if (p == NULL) { 2452 p = allocate_work(raw_word_size); 2453 } 2454 2455 return p; 2456 } 2457 2458 // Returns the address of spaced allocated for "word_size". 2459 // This methods does not know about blocks (Metablocks) 2460 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2461 assert_lock_strong(_lock); 2462 #ifdef ASSERT 2463 if (Metadebug::test_metadata_failure()) { 2464 return NULL; 2465 } 2466 #endif 2467 // Is there space in the current chunk? 2468 MetaWord* result = NULL; 2469 2470 // For DumpSharedSpaces, only allocate out of the current chunk which is 2471 // never null because we gave it the size we wanted. Caller reports out 2472 // of memory if this returns null. 2473 if (DumpSharedSpaces) { 2474 assert(current_chunk() != NULL, "should never happen"); 2475 inc_used_metrics(word_size); 2476 return current_chunk()->allocate(word_size); // caller handles null result 2477 } 2478 2479 if (current_chunk() != NULL) { 2480 result = current_chunk()->allocate(word_size); 2481 } 2482 2483 if (result == NULL) { 2484 result = grow_and_allocate(word_size); 2485 } 2486 2487 if (result != NULL) { 2488 inc_used_metrics(word_size); 2489 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2490 "Head of the list is being allocated"); 2491 } 2492 2493 return result; 2494 } 2495 2496 void SpaceManager::verify() { 2497 // If there are blocks in the dictionary, then 2498 // verification of chunks does not work since 2499 // being in the dictionary alters a chunk. 2500 if (block_freelists()->total_size() == 0) { 2501 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2502 Metachunk* curr = chunks_in_use(i); 2503 while (curr != NULL) { 2504 curr->verify(); 2505 verify_chunk_size(curr); 2506 curr = curr->next(); 2507 } 2508 } 2509 } 2510 } 2511 2512 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2513 assert(is_humongous(chunk->word_size()) || 2514 chunk->word_size() == medium_chunk_size() || 2515 chunk->word_size() == small_chunk_size() || 2516 chunk->word_size() == specialized_chunk_size(), 2517 "Chunk size is wrong"); 2518 return; 2519 } 2520 2521 #ifdef ASSERT 2522 void SpaceManager::verify_allocated_blocks_words() { 2523 // Verification is only guaranteed at a safepoint. 2524 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2525 "Verification can fail if the applications is running"); 2526 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2527 err_msg("allocation total is not consistent " SIZE_FORMAT 2528 " vs " SIZE_FORMAT, 2529 allocated_blocks_words(), sum_used_in_chunks_in_use())); 2530 } 2531 2532 #endif 2533 2534 void SpaceManager::dump(outputStream* const out) const { 2535 size_t curr_total = 0; 2536 size_t waste = 0; 2537 uint i = 0; 2538 size_t used = 0; 2539 size_t capacity = 0; 2540 2541 // Add up statistics for all chunks in this SpaceManager. 2542 for (ChunkIndex index = ZeroIndex; 2543 index < NumberOfInUseLists; 2544 index = next_chunk_index(index)) { 2545 for (Metachunk* curr = chunks_in_use(index); 2546 curr != NULL; 2547 curr = curr->next()) { 2548 out->print("%d) ", i++); 2549 curr->print_on(out); 2550 curr_total += curr->word_size(); 2551 used += curr->used_word_size(); 2552 capacity += curr->word_size(); 2553 waste += curr->free_word_size() + curr->overhead();; 2554 } 2555 } 2556 2557 if (TraceMetadataChunkAllocation && Verbose) { 2558 block_freelists()->print_on(out); 2559 } 2560 2561 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2562 // Free space isn't wasted. 2563 waste -= free; 2564 2565 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2566 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2567 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2568 } 2569 2570 #ifndef PRODUCT 2571 void SpaceManager::mangle_freed_chunks() { 2572 for (ChunkIndex index = ZeroIndex; 2573 index < NumberOfInUseLists; 2574 index = next_chunk_index(index)) { 2575 for (Metachunk* curr = chunks_in_use(index); 2576 curr != NULL; 2577 curr = curr->next()) { 2578 curr->mangle(); 2579 } 2580 } 2581 } 2582 #endif // PRODUCT 2583 2584 // MetaspaceAux 2585 2586 2587 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2588 size_t MetaspaceAux::_used_words[] = {0, 0}; 2589 2590 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2591 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2592 return list == NULL ? 0 : list->free_bytes(); 2593 } 2594 2595 size_t MetaspaceAux::free_bytes() { 2596 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2597 } 2598 2599 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2600 assert_lock_strong(SpaceManager::expand_lock()); 2601 assert(words <= capacity_words(mdtype), 2602 err_msg("About to decrement below 0: words " SIZE_FORMAT 2603 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2604 words, mdtype, capacity_words(mdtype))); 2605 _capacity_words[mdtype] -= words; 2606 } 2607 2608 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2609 assert_lock_strong(SpaceManager::expand_lock()); 2610 // Needs to be atomic 2611 _capacity_words[mdtype] += words; 2612 } 2613 2614 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2615 assert(words <= used_words(mdtype), 2616 err_msg("About to decrement below 0: words " SIZE_FORMAT 2617 " is greater than _used_words[%u] " SIZE_FORMAT, 2618 words, mdtype, used_words(mdtype))); 2619 // For CMS deallocation of the Metaspaces occurs during the 2620 // sweep which is a concurrent phase. Protection by the expand_lock() 2621 // is not enough since allocation is on a per Metaspace basis 2622 // and protected by the Metaspace lock. 2623 jlong minus_words = (jlong) - (jlong) words; 2624 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2625 } 2626 2627 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2628 // _used_words tracks allocations for 2629 // each piece of metadata. Those allocations are 2630 // generally done concurrently by different application 2631 // threads so must be done atomically. 2632 Atomic::add_ptr(words, &_used_words[mdtype]); 2633 } 2634 2635 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2636 size_t used = 0; 2637 ClassLoaderDataGraphMetaspaceIterator iter; 2638 while (iter.repeat()) { 2639 Metaspace* msp = iter.get_next(); 2640 // Sum allocated_blocks_words for each metaspace 2641 if (msp != NULL) { 2642 used += msp->used_words_slow(mdtype); 2643 } 2644 } 2645 return used * BytesPerWord; 2646 } 2647 2648 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2649 size_t free = 0; 2650 ClassLoaderDataGraphMetaspaceIterator iter; 2651 while (iter.repeat()) { 2652 Metaspace* msp = iter.get_next(); 2653 if (msp != NULL) { 2654 free += msp->free_words_slow(mdtype); 2655 } 2656 } 2657 return free * BytesPerWord; 2658 } 2659 2660 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2661 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2662 return 0; 2663 } 2664 // Don't count the space in the freelists. That space will be 2665 // added to the capacity calculation as needed. 2666 size_t capacity = 0; 2667 ClassLoaderDataGraphMetaspaceIterator iter; 2668 while (iter.repeat()) { 2669 Metaspace* msp = iter.get_next(); 2670 if (msp != NULL) { 2671 capacity += msp->capacity_words_slow(mdtype); 2672 } 2673 } 2674 return capacity * BytesPerWord; 2675 } 2676 2677 size_t MetaspaceAux::capacity_bytes_slow() { 2678 #ifdef PRODUCT 2679 // Use capacity_bytes() in PRODUCT instead of this function. 2680 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2681 #endif 2682 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2683 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2684 assert(capacity_bytes() == class_capacity + non_class_capacity, 2685 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT 2686 " class_capacity + non_class_capacity " SIZE_FORMAT 2687 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2688 capacity_bytes(), class_capacity + non_class_capacity, 2689 class_capacity, non_class_capacity)); 2690 2691 return class_capacity + non_class_capacity; 2692 } 2693 2694 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2695 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2696 return list == NULL ? 0 : list->reserved_bytes(); 2697 } 2698 2699 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2700 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2701 return list == NULL ? 0 : list->committed_bytes(); 2702 } 2703 2704 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2705 2706 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2707 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2708 if (chunk_manager == NULL) { 2709 return 0; 2710 } 2711 chunk_manager->slow_verify(); 2712 return chunk_manager->free_chunks_total_words(); 2713 } 2714 2715 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2716 return free_chunks_total_words(mdtype) * BytesPerWord; 2717 } 2718 2719 size_t MetaspaceAux::free_chunks_total_words() { 2720 return free_chunks_total_words(Metaspace::ClassType) + 2721 free_chunks_total_words(Metaspace::NonClassType); 2722 } 2723 2724 size_t MetaspaceAux::free_chunks_total_bytes() { 2725 return free_chunks_total_words() * BytesPerWord; 2726 } 2727 2728 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2729 return Metaspace::get_chunk_manager(mdtype) != NULL; 2730 } 2731 2732 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2733 if (!has_chunk_free_list(mdtype)) { 2734 return MetaspaceChunkFreeListSummary(); 2735 } 2736 2737 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2738 return cm->chunk_free_list_summary(); 2739 } 2740 2741 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2742 gclog_or_tty->print(", [Metaspace:"); 2743 if (PrintGCDetails && Verbose) { 2744 gclog_or_tty->print(" " SIZE_FORMAT 2745 "->" SIZE_FORMAT 2746 "(" SIZE_FORMAT ")", 2747 prev_metadata_used, 2748 used_bytes(), 2749 reserved_bytes()); 2750 } else { 2751 gclog_or_tty->print(" " SIZE_FORMAT "K" 2752 "->" SIZE_FORMAT "K" 2753 "(" SIZE_FORMAT "K)", 2754 prev_metadata_used/K, 2755 used_bytes()/K, 2756 reserved_bytes()/K); 2757 } 2758 2759 gclog_or_tty->print("]"); 2760 } 2761 2762 // This is printed when PrintGCDetails 2763 void MetaspaceAux::print_on(outputStream* out) { 2764 Metaspace::MetadataType nct = Metaspace::NonClassType; 2765 2766 out->print_cr(" Metaspace " 2767 "used " SIZE_FORMAT "K, " 2768 "capacity " SIZE_FORMAT "K, " 2769 "committed " SIZE_FORMAT "K, " 2770 "reserved " SIZE_FORMAT "K", 2771 used_bytes()/K, 2772 capacity_bytes()/K, 2773 committed_bytes()/K, 2774 reserved_bytes()/K); 2775 2776 if (Metaspace::using_class_space()) { 2777 Metaspace::MetadataType ct = Metaspace::ClassType; 2778 out->print_cr(" class space " 2779 "used " SIZE_FORMAT "K, " 2780 "capacity " SIZE_FORMAT "K, " 2781 "committed " SIZE_FORMAT "K, " 2782 "reserved " SIZE_FORMAT "K", 2783 used_bytes(ct)/K, 2784 capacity_bytes(ct)/K, 2785 committed_bytes(ct)/K, 2786 reserved_bytes(ct)/K); 2787 } 2788 } 2789 2790 // Print information for class space and data space separately. 2791 // This is almost the same as above. 2792 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2793 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2794 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2795 size_t used_bytes = used_bytes_slow(mdtype); 2796 size_t free_bytes = free_bytes_slow(mdtype); 2797 size_t used_and_free = used_bytes + free_bytes + 2798 free_chunks_capacity_bytes; 2799 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2800 "K + unused in chunks " SIZE_FORMAT "K + " 2801 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2802 "K capacity in allocated chunks " SIZE_FORMAT "K", 2803 used_bytes / K, 2804 free_bytes / K, 2805 free_chunks_capacity_bytes / K, 2806 used_and_free / K, 2807 capacity_bytes / K); 2808 // Accounting can only be correct if we got the values during a safepoint 2809 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2810 } 2811 2812 // Print total fragmentation for class metaspaces 2813 void MetaspaceAux::print_class_waste(outputStream* out) { 2814 assert(Metaspace::using_class_space(), "class metaspace not used"); 2815 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2816 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2817 ClassLoaderDataGraphMetaspaceIterator iter; 2818 while (iter.repeat()) { 2819 Metaspace* msp = iter.get_next(); 2820 if (msp != NULL) { 2821 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2822 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2823 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2824 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2825 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2826 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2827 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2828 } 2829 } 2830 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2831 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2832 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2833 "large count " SIZE_FORMAT, 2834 cls_specialized_count, cls_specialized_waste, 2835 cls_small_count, cls_small_waste, 2836 cls_medium_count, cls_medium_waste, cls_humongous_count); 2837 } 2838 2839 // Print total fragmentation for data and class metaspaces separately 2840 void MetaspaceAux::print_waste(outputStream* out) { 2841 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2842 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2843 2844 ClassLoaderDataGraphMetaspaceIterator iter; 2845 while (iter.repeat()) { 2846 Metaspace* msp = iter.get_next(); 2847 if (msp != NULL) { 2848 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2849 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2850 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2851 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2852 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2853 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2854 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2855 } 2856 } 2857 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2858 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2859 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2860 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2861 "large count " SIZE_FORMAT, 2862 specialized_count, specialized_waste, small_count, 2863 small_waste, medium_count, medium_waste, humongous_count); 2864 if (Metaspace::using_class_space()) { 2865 print_class_waste(out); 2866 } 2867 } 2868 2869 // Dump global metaspace things from the end of ClassLoaderDataGraph 2870 void MetaspaceAux::dump(outputStream* out) { 2871 out->print_cr("All Metaspace:"); 2872 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2873 out->print("class space: "); print_on(out, Metaspace::ClassType); 2874 print_waste(out); 2875 } 2876 2877 void MetaspaceAux::verify_free_chunks() { 2878 Metaspace::chunk_manager_metadata()->verify(); 2879 if (Metaspace::using_class_space()) { 2880 Metaspace::chunk_manager_class()->verify(); 2881 } 2882 } 2883 2884 void MetaspaceAux::verify_capacity() { 2885 #ifdef ASSERT 2886 size_t running_sum_capacity_bytes = capacity_bytes(); 2887 // For purposes of the running sum of capacity, verify against capacity 2888 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2889 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2890 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT 2891 " capacity_bytes_slow()" SIZE_FORMAT, 2892 running_sum_capacity_bytes, capacity_in_use_bytes)); 2893 for (Metaspace::MetadataType i = Metaspace::ClassType; 2894 i < Metaspace:: MetadataTypeCount; 2895 i = (Metaspace::MetadataType)(i + 1)) { 2896 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2897 assert(capacity_bytes(i) == capacity_in_use_bytes, 2898 err_msg("capacity_bytes(%u) " SIZE_FORMAT 2899 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2900 i, capacity_bytes(i), i, capacity_in_use_bytes)); 2901 } 2902 #endif 2903 } 2904 2905 void MetaspaceAux::verify_used() { 2906 #ifdef ASSERT 2907 size_t running_sum_used_bytes = used_bytes(); 2908 // For purposes of the running sum of used, verify against used 2909 size_t used_in_use_bytes = used_bytes_slow(); 2910 assert(used_bytes() == used_in_use_bytes, 2911 err_msg("used_bytes() " SIZE_FORMAT 2912 " used_bytes_slow()" SIZE_FORMAT, 2913 used_bytes(), used_in_use_bytes)); 2914 for (Metaspace::MetadataType i = Metaspace::ClassType; 2915 i < Metaspace:: MetadataTypeCount; 2916 i = (Metaspace::MetadataType)(i + 1)) { 2917 size_t used_in_use_bytes = used_bytes_slow(i); 2918 assert(used_bytes(i) == used_in_use_bytes, 2919 err_msg("used_bytes(%u) " SIZE_FORMAT 2920 " used_bytes_slow(%u)" SIZE_FORMAT, 2921 i, used_bytes(i), i, used_in_use_bytes)); 2922 } 2923 #endif 2924 } 2925 2926 void MetaspaceAux::verify_metrics() { 2927 verify_capacity(); 2928 verify_used(); 2929 } 2930 2931 2932 // Metaspace methods 2933 2934 size_t Metaspace::_first_chunk_word_size = 0; 2935 size_t Metaspace::_first_class_chunk_word_size = 0; 2936 2937 size_t Metaspace::_commit_alignment = 0; 2938 size_t Metaspace::_reserve_alignment = 0; 2939 2940 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2941 initialize(lock, type); 2942 } 2943 2944 Metaspace::~Metaspace() { 2945 delete _vsm; 2946 if (using_class_space()) { 2947 delete _class_vsm; 2948 } 2949 } 2950 2951 VirtualSpaceList* Metaspace::_space_list = NULL; 2952 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2953 2954 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2955 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2956 2957 #define VIRTUALSPACEMULTIPLIER 2 2958 2959 #ifdef _LP64 2960 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2961 2962 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2963 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2964 // narrow_klass_base is the lower of the metaspace base and the cds base 2965 // (if cds is enabled). The narrow_klass_shift depends on the distance 2966 // between the lower base and higher address. 2967 address lower_base; 2968 address higher_address; 2969 #if INCLUDE_CDS 2970 if (UseSharedSpaces) { 2971 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2972 (address)(metaspace_base + compressed_class_space_size())); 2973 lower_base = MIN2(metaspace_base, cds_base); 2974 } else 2975 #endif 2976 { 2977 higher_address = metaspace_base + compressed_class_space_size(); 2978 lower_base = metaspace_base; 2979 2980 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2981 // If compressed class space fits in lower 32G, we don't need a base. 2982 if (higher_address <= (address)klass_encoding_max) { 2983 lower_base = 0; // Effectively lower base is zero. 2984 } 2985 } 2986 2987 Universe::set_narrow_klass_base(lower_base); 2988 2989 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 2990 Universe::set_narrow_klass_shift(0); 2991 } else { 2992 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2993 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2994 } 2995 } 2996 2997 #if INCLUDE_CDS 2998 // Return TRUE if the specified metaspace_base and cds_base are close enough 2999 // to work with compressed klass pointers. 3000 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3001 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3002 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3003 address lower_base = MIN2((address)metaspace_base, cds_base); 3004 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3005 (address)(metaspace_base + compressed_class_space_size())); 3006 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3007 } 3008 #endif 3009 3010 // Try to allocate the metaspace at the requested addr. 3011 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3012 assert(using_class_space(), "called improperly"); 3013 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3014 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3015 "Metaspace size is too big"); 3016 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3017 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3018 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3019 3020 // Don't use large pages for the class space. 3021 bool large_pages = false; 3022 3023 #ifndef AARCH64 3024 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3025 _reserve_alignment, 3026 large_pages, 3027 requested_addr, 0); 3028 #else // AARCH64 3029 ReservedSpace metaspace_rs; 3030 3031 // Our compressed klass pointers may fit nicely into the lower 32 3032 // bits. 3033 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3034 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3035 _reserve_alignment, 3036 large_pages, 3037 requested_addr, 0); 3038 } 3039 3040 if (! metaspace_rs.is_reserved()) { 3041 // Try to align metaspace so that we can decode a compressed klass 3042 // with a single MOVK instruction. We can do this iff the 3043 // compressed class base is a multiple of 4G. 3044 for (char *a = (char*)align_ptr_up(requested_addr, 4*G); 3045 a < (char*)(1024*G); 3046 a += 4*G) { 3047 3048 #if INCLUDE_CDS 3049 if (UseSharedSpaces 3050 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3051 // We failed to find an aligned base that will reach. Fall 3052 // back to using our requested addr. 3053 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3054 _reserve_alignment, 3055 large_pages, 3056 requested_addr, 0); 3057 break; 3058 } 3059 #endif 3060 3061 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3062 _reserve_alignment, 3063 large_pages, 3064 a, 0); 3065 if (metaspace_rs.is_reserved()) 3066 break; 3067 } 3068 } 3069 3070 #endif // AARCH64 3071 3072 if (!metaspace_rs.is_reserved()) { 3073 #if INCLUDE_CDS 3074 if (UseSharedSpaces) { 3075 size_t increment = align_size_up(1*G, _reserve_alignment); 3076 3077 // Keep trying to allocate the metaspace, increasing the requested_addr 3078 // by 1GB each time, until we reach an address that will no longer allow 3079 // use of CDS with compressed klass pointers. 3080 char *addr = requested_addr; 3081 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3082 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3083 addr = addr + increment; 3084 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3085 _reserve_alignment, large_pages, addr, 0); 3086 } 3087 } 3088 #endif 3089 // If no successful allocation then try to allocate the space anywhere. If 3090 // that fails then OOM doom. At this point we cannot try allocating the 3091 // metaspace as if UseCompressedClassPointers is off because too much 3092 // initialization has happened that depends on UseCompressedClassPointers. 3093 // So, UseCompressedClassPointers cannot be turned off at this point. 3094 if (!metaspace_rs.is_reserved()) { 3095 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3096 _reserve_alignment, large_pages); 3097 if (!metaspace_rs.is_reserved()) { 3098 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 3099 compressed_class_space_size())); 3100 } 3101 } 3102 } 3103 3104 // If we got here then the metaspace got allocated. 3105 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3106 3107 #if INCLUDE_CDS 3108 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3109 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3110 FileMapInfo::stop_sharing_and_unmap( 3111 "Could not allocate metaspace at a compatible address"); 3112 } 3113 #endif 3114 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3115 UseSharedSpaces ? (address)cds_base : 0); 3116 3117 initialize_class_space(metaspace_rs); 3118 3119 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 3120 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 3121 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 3122 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 3123 compressed_class_space_size(), metaspace_rs.base(), requested_addr); 3124 } 3125 } 3126 3127 // For UseCompressedClassPointers the class space is reserved above the top of 3128 // the Java heap. The argument passed in is at the base of the compressed space. 3129 void Metaspace::initialize_class_space(ReservedSpace rs) { 3130 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3131 assert(rs.size() >= CompressedClassSpaceSize, 3132 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); 3133 assert(using_class_space(), "Must be using class space"); 3134 _class_space_list = new VirtualSpaceList(rs); 3135 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3136 3137 if (!_class_space_list->initialization_succeeded()) { 3138 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3139 } 3140 } 3141 3142 #endif 3143 3144 void Metaspace::ergo_initialize() { 3145 if (DumpSharedSpaces) { 3146 // Using large pages when dumping the shared archive is currently not implemented. 3147 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3148 } 3149 3150 size_t page_size = os::vm_page_size(); 3151 if (UseLargePages && UseLargePagesInMetaspace) { 3152 page_size = os::large_page_size(); 3153 } 3154 3155 _commit_alignment = page_size; 3156 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3157 3158 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3159 // override if MaxMetaspaceSize was set on the command line or not. 3160 // This information is needed later to conform to the specification of the 3161 // java.lang.management.MemoryUsage API. 3162 // 3163 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3164 // globals.hpp to the aligned value, but this is not possible, since the 3165 // alignment depends on other flags being parsed. 3166 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3167 3168 if (MetaspaceSize > MaxMetaspaceSize) { 3169 MetaspaceSize = MaxMetaspaceSize; 3170 } 3171 3172 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3173 3174 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3175 3176 if (MetaspaceSize < 256*K) { 3177 vm_exit_during_initialization("Too small initial Metaspace size"); 3178 } 3179 3180 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3181 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3182 3183 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3184 set_compressed_class_space_size(CompressedClassSpaceSize); 3185 } 3186 3187 void Metaspace::global_initialize() { 3188 MetaspaceGC::initialize(); 3189 3190 // Initialize the alignment for shared spaces. 3191 int max_alignment = os::vm_allocation_granularity(); 3192 size_t cds_total = 0; 3193 3194 MetaspaceShared::set_max_alignment(max_alignment); 3195 3196 if (DumpSharedSpaces) { 3197 #if INCLUDE_CDS 3198 MetaspaceShared::estimate_regions_size(); 3199 3200 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3201 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3202 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3203 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3204 3205 // Initialize with the sum of the shared space sizes. The read-only 3206 // and read write metaspace chunks will be allocated out of this and the 3207 // remainder is the misc code and data chunks. 3208 cds_total = FileMapInfo::shared_spaces_size(); 3209 cds_total = align_size_up(cds_total, _reserve_alignment); 3210 _space_list = new VirtualSpaceList(cds_total/wordSize); 3211 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3212 3213 if (!_space_list->initialization_succeeded()) { 3214 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3215 } 3216 3217 #ifdef _LP64 3218 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3219 vm_exit_during_initialization("Unable to dump shared archive.", 3220 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3221 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3222 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), 3223 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3224 } 3225 3226 // Set the compressed klass pointer base so that decoding of these pointers works 3227 // properly when creating the shared archive. 3228 assert(UseCompressedOops && UseCompressedClassPointers, 3229 "UseCompressedOops and UseCompressedClassPointers must be set"); 3230 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3231 if (TraceMetavirtualspaceAllocation && Verbose) { 3232 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3233 _space_list->current_virtual_space()->bottom()); 3234 } 3235 3236 Universe::set_narrow_klass_shift(0); 3237 #endif // _LP64 3238 #endif // INCLUDE_CDS 3239 } else { 3240 #if INCLUDE_CDS 3241 // If using shared space, open the file that contains the shared space 3242 // and map in the memory before initializing the rest of metaspace (so 3243 // the addresses don't conflict) 3244 address cds_address = NULL; 3245 if (UseSharedSpaces) { 3246 FileMapInfo* mapinfo = new FileMapInfo(); 3247 3248 // Open the shared archive file, read and validate the header. If 3249 // initialization fails, shared spaces [UseSharedSpaces] are 3250 // disabled and the file is closed. 3251 // Map in spaces now also 3252 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3253 cds_total = FileMapInfo::shared_spaces_size(); 3254 cds_address = (address)mapinfo->region_base(0); 3255 } else { 3256 assert(!mapinfo->is_open() && !UseSharedSpaces, 3257 "archive file not closed or shared spaces not disabled."); 3258 } 3259 } 3260 #endif // INCLUDE_CDS 3261 #ifdef _LP64 3262 // If UseCompressedClassPointers is set then allocate the metaspace area 3263 // above the heap and above the CDS area (if it exists). 3264 if (using_class_space()) { 3265 if (UseSharedSpaces) { 3266 #if INCLUDE_CDS 3267 char* cds_end = (char*)(cds_address + cds_total); 3268 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3269 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3270 #endif 3271 } else { 3272 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3273 allocate_metaspace_compressed_klass_ptrs(base, 0); 3274 } 3275 } 3276 #endif // _LP64 3277 3278 // Initialize these before initializing the VirtualSpaceList 3279 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3280 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3281 // Make the first class chunk bigger than a medium chunk so it's not put 3282 // on the medium chunk list. The next chunk will be small and progress 3283 // from there. This size calculated by -version. 3284 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3285 (CompressedClassSpaceSize/BytesPerWord)*2); 3286 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3287 // Arbitrarily set the initial virtual space to a multiple 3288 // of the boot class loader size. 3289 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3290 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3291 3292 // Initialize the list of virtual spaces. 3293 _space_list = new VirtualSpaceList(word_size); 3294 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3295 3296 if (!_space_list->initialization_succeeded()) { 3297 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3298 } 3299 } 3300 3301 _tracer = new MetaspaceTracer(); 3302 } 3303 3304 void Metaspace::post_initialize() { 3305 MetaspaceGC::post_initialize(); 3306 } 3307 3308 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3309 size_t chunk_word_size, 3310 size_t chunk_bunch) { 3311 // Get a chunk from the chunk freelist 3312 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3313 if (chunk != NULL) { 3314 return chunk; 3315 } 3316 3317 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3318 } 3319 3320 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3321 3322 assert(space_list() != NULL, 3323 "Metadata VirtualSpaceList has not been initialized"); 3324 assert(chunk_manager_metadata() != NULL, 3325 "Metadata ChunkManager has not been initialized"); 3326 3327 _vsm = new SpaceManager(NonClassType, lock); 3328 if (_vsm == NULL) { 3329 return; 3330 } 3331 size_t word_size; 3332 size_t class_word_size; 3333 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3334 3335 if (using_class_space()) { 3336 assert(class_space_list() != NULL, 3337 "Class VirtualSpaceList has not been initialized"); 3338 assert(chunk_manager_class() != NULL, 3339 "Class ChunkManager has not been initialized"); 3340 3341 // Allocate SpaceManager for classes. 3342 _class_vsm = new SpaceManager(ClassType, lock); 3343 if (_class_vsm == NULL) { 3344 return; 3345 } 3346 } 3347 3348 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3349 3350 // Allocate chunk for metadata objects 3351 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3352 word_size, 3353 vsm()->medium_chunk_bunch()); 3354 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3355 if (new_chunk != NULL) { 3356 // Add to this manager's list of chunks in use and current_chunk(). 3357 vsm()->add_chunk(new_chunk, true); 3358 } 3359 3360 // Allocate chunk for class metadata objects 3361 if (using_class_space()) { 3362 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3363 class_word_size, 3364 class_vsm()->medium_chunk_bunch()); 3365 if (class_chunk != NULL) { 3366 class_vsm()->add_chunk(class_chunk, true); 3367 } 3368 } 3369 3370 _alloc_record_head = NULL; 3371 _alloc_record_tail = NULL; 3372 } 3373 3374 size_t Metaspace::align_word_size_up(size_t word_size) { 3375 size_t byte_size = word_size * wordSize; 3376 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3377 } 3378 3379 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3380 // DumpSharedSpaces doesn't use class metadata area (yet) 3381 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3382 if (is_class_space_allocation(mdtype)) { 3383 return class_vsm()->allocate(word_size); 3384 } else { 3385 return vsm()->allocate(word_size); 3386 } 3387 } 3388 3389 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3390 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3391 assert(delta_bytes > 0, "Must be"); 3392 3393 size_t before = 0; 3394 size_t after = 0; 3395 MetaWord* res; 3396 bool incremented; 3397 3398 // Each thread increments the HWM at most once. Even if the thread fails to increment 3399 // the HWM, an allocation is still attempted. This is because another thread must then 3400 // have incremented the HWM and therefore the allocation might still succeed. 3401 do { 3402 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3403 res = allocate(word_size, mdtype); 3404 } while (!incremented && res == NULL); 3405 3406 if (incremented) { 3407 tracer()->report_gc_threshold(before, after, 3408 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3409 if (PrintGCDetails && Verbose) { 3410 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3411 " to " SIZE_FORMAT, before, after); 3412 } 3413 } 3414 3415 return res; 3416 } 3417 3418 // Space allocated in the Metaspace. This may 3419 // be across several metadata virtual spaces. 3420 char* Metaspace::bottom() const { 3421 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3422 return (char*)vsm()->current_chunk()->bottom(); 3423 } 3424 3425 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3426 if (mdtype == ClassType) { 3427 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3428 } else { 3429 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3430 } 3431 } 3432 3433 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3434 if (mdtype == ClassType) { 3435 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3436 } else { 3437 return vsm()->sum_free_in_chunks_in_use(); 3438 } 3439 } 3440 3441 // Space capacity in the Metaspace. It includes 3442 // space in the list of chunks from which allocations 3443 // have been made. Don't include space in the global freelist and 3444 // in the space available in the dictionary which 3445 // is already counted in some chunk. 3446 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3447 if (mdtype == ClassType) { 3448 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3449 } else { 3450 return vsm()->sum_capacity_in_chunks_in_use(); 3451 } 3452 } 3453 3454 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3455 return used_words_slow(mdtype) * BytesPerWord; 3456 } 3457 3458 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3459 return capacity_words_slow(mdtype) * BytesPerWord; 3460 } 3461 3462 size_t Metaspace::allocated_blocks_bytes() const { 3463 return vsm()->allocated_blocks_bytes() + 3464 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3465 } 3466 3467 size_t Metaspace::allocated_chunks_bytes() const { 3468 return vsm()->allocated_chunks_bytes() + 3469 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3470 } 3471 3472 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3473 assert(!SafepointSynchronize::is_at_safepoint() 3474 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3475 3476 if (DumpSharedSpaces && PrintSharedSpaces) { 3477 record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); 3478 } 3479 3480 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3481 3482 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3483 // Dark matter. Too small for dictionary. 3484 #ifdef ASSERT 3485 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3486 #endif 3487 return; 3488 } 3489 if (is_class && using_class_space()) { 3490 class_vsm()->deallocate(ptr, word_size); 3491 } else { 3492 vsm()->deallocate(ptr, word_size); 3493 } 3494 } 3495 3496 3497 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3498 bool read_only, MetaspaceObj::Type type, TRAPS) { 3499 if (HAS_PENDING_EXCEPTION) { 3500 assert(false, "Should not allocate with exception pending"); 3501 return NULL; // caller does a CHECK_NULL too 3502 } 3503 3504 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3505 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3506 3507 // Allocate in metaspaces without taking out a lock, because it deadlocks 3508 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3509 // to revisit this for application class data sharing. 3510 if (DumpSharedSpaces) { 3511 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3512 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3513 MetaWord* result = space->allocate(word_size, NonClassType); 3514 if (result == NULL) { 3515 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3516 } 3517 if (PrintSharedSpaces) { 3518 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3519 } 3520 3521 // Zero initialize. 3522 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3523 3524 return result; 3525 } 3526 3527 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3528 3529 // Try to allocate metadata. 3530 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3531 3532 if (result == NULL) { 3533 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3534 3535 // Allocation failed. 3536 if (is_init_completed()) { 3537 // Only start a GC if the bootstrapping has completed. 3538 3539 // Try to clean out some memory and retry. 3540 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3541 loader_data, word_size, mdtype); 3542 } 3543 } 3544 3545 if (result == NULL) { 3546 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3547 } 3548 3549 // Zero initialize. 3550 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3551 3552 return result; 3553 } 3554 3555 size_t Metaspace::class_chunk_size(size_t word_size) { 3556 assert(using_class_space(), "Has to use class space"); 3557 return class_vsm()->calc_chunk_size(word_size); 3558 } 3559 3560 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3561 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3562 3563 // If result is still null, we are out of memory. 3564 if (Verbose && TraceMetadataChunkAllocation) { 3565 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3566 SIZE_FORMAT, word_size); 3567 if (loader_data->metaspace_or_null() != NULL) { 3568 loader_data->dump(gclog_or_tty); 3569 } 3570 MetaspaceAux::dump(gclog_or_tty); 3571 } 3572 3573 bool out_of_compressed_class_space = false; 3574 if (is_class_space_allocation(mdtype)) { 3575 Metaspace* metaspace = loader_data->metaspace_non_null(); 3576 out_of_compressed_class_space = 3577 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3578 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3579 CompressedClassSpaceSize; 3580 } 3581 3582 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3583 const char* space_string = out_of_compressed_class_space ? 3584 "Compressed class space" : "Metaspace"; 3585 3586 report_java_out_of_memory(space_string); 3587 3588 if (JvmtiExport::should_post_resource_exhausted()) { 3589 JvmtiExport::post_resource_exhausted( 3590 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3591 space_string); 3592 } 3593 3594 if (!is_init_completed()) { 3595 vm_exit_during_initialization("OutOfMemoryError", space_string); 3596 } 3597 3598 if (out_of_compressed_class_space) { 3599 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3600 } else { 3601 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3602 } 3603 } 3604 3605 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3606 switch (mdtype) { 3607 case Metaspace::ClassType: return "Class"; 3608 case Metaspace::NonClassType: return "Metadata"; 3609 default: 3610 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); 3611 return NULL; 3612 } 3613 } 3614 3615 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3616 assert(DumpSharedSpaces, "sanity"); 3617 3618 int byte_size = (int)word_size * HeapWordSize; 3619 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3620 3621 if (_alloc_record_head == NULL) { 3622 _alloc_record_head = _alloc_record_tail = rec; 3623 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3624 _alloc_record_tail->_next = rec; 3625 _alloc_record_tail = rec; 3626 } else { 3627 // slow linear search, but this doesn't happen that often, and only when dumping 3628 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3629 if (old->_ptr == ptr) { 3630 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3631 int remain_bytes = old->_byte_size - byte_size; 3632 assert(remain_bytes >= 0, "sanity"); 3633 old->_type = type; 3634 3635 if (remain_bytes == 0) { 3636 delete(rec); 3637 } else { 3638 address remain_ptr = address(ptr) + byte_size; 3639 rec->_ptr = remain_ptr; 3640 rec->_byte_size = remain_bytes; 3641 rec->_type = MetaspaceObj::DeallocatedType; 3642 rec->_next = old->_next; 3643 old->_byte_size = byte_size; 3644 old->_next = rec; 3645 } 3646 return; 3647 } 3648 } 3649 assert(0, "reallocating a freed pointer that was not recorded"); 3650 } 3651 } 3652 3653 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3654 assert(DumpSharedSpaces, "sanity"); 3655 3656 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3657 if (rec->_ptr == ptr) { 3658 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity"); 3659 rec->_type = MetaspaceObj::DeallocatedType; 3660 return; 3661 } 3662 } 3663 3664 assert(0, "deallocating a pointer that was not recorded"); 3665 } 3666 3667 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3668 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3669 3670 address last_addr = (address)bottom(); 3671 3672 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3673 address ptr = rec->_ptr; 3674 if (last_addr < ptr) { 3675 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3676 } 3677 closure->doit(ptr, rec->_type, rec->_byte_size); 3678 last_addr = ptr + rec->_byte_size; 3679 } 3680 3681 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3682 if (last_addr < top) { 3683 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3684 } 3685 } 3686 3687 void Metaspace::purge(MetadataType mdtype) { 3688 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3689 } 3690 3691 void Metaspace::purge() { 3692 MutexLockerEx cl(SpaceManager::expand_lock(), 3693 Mutex::_no_safepoint_check_flag); 3694 purge(NonClassType); 3695 if (using_class_space()) { 3696 purge(ClassType); 3697 } 3698 } 3699 3700 void Metaspace::print_on(outputStream* out) const { 3701 // Print both class virtual space counts and metaspace. 3702 if (Verbose) { 3703 vsm()->print_on(out); 3704 if (using_class_space()) { 3705 class_vsm()->print_on(out); 3706 } 3707 } 3708 } 3709 3710 bool Metaspace::contains(const void* ptr) { 3711 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3712 return true; 3713 } 3714 3715 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3716 return true; 3717 } 3718 3719 return get_space_list(NonClassType)->contains(ptr); 3720 } 3721 3722 void Metaspace::verify() { 3723 vsm()->verify(); 3724 if (using_class_space()) { 3725 class_vsm()->verify(); 3726 } 3727 } 3728 3729 void Metaspace::dump(outputStream* const out) const { 3730 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); 3731 vsm()->dump(out); 3732 if (using_class_space()) { 3733 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 3734 class_vsm()->dump(out); 3735 } 3736 } 3737 3738 /////////////// Unit tests /////////////// 3739 3740 #ifndef PRODUCT 3741 3742 class TestMetaspaceAuxTest : AllStatic { 3743 public: 3744 static void test_reserved() { 3745 size_t reserved = MetaspaceAux::reserved_bytes(); 3746 3747 assert(reserved > 0, "assert"); 3748 3749 size_t committed = MetaspaceAux::committed_bytes(); 3750 assert(committed <= reserved, "assert"); 3751 3752 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3753 assert(reserved_metadata > 0, "assert"); 3754 assert(reserved_metadata <= reserved, "assert"); 3755 3756 if (UseCompressedClassPointers) { 3757 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3758 assert(reserved_class > 0, "assert"); 3759 assert(reserved_class < reserved, "assert"); 3760 } 3761 } 3762 3763 static void test_committed() { 3764 size_t committed = MetaspaceAux::committed_bytes(); 3765 3766 assert(committed > 0, "assert"); 3767 3768 size_t reserved = MetaspaceAux::reserved_bytes(); 3769 assert(committed <= reserved, "assert"); 3770 3771 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3772 assert(committed_metadata > 0, "assert"); 3773 assert(committed_metadata <= committed, "assert"); 3774 3775 if (UseCompressedClassPointers) { 3776 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3777 assert(committed_class > 0, "assert"); 3778 assert(committed_class < committed, "assert"); 3779 } 3780 } 3781 3782 static void test_virtual_space_list_large_chunk() { 3783 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3784 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3785 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3786 // vm_allocation_granularity aligned on Windows. 3787 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3788 large_size += (os::vm_page_size()/BytesPerWord); 3789 vs_list->get_new_chunk(large_size, large_size, 0); 3790 } 3791 3792 static void test() { 3793 test_reserved(); 3794 test_committed(); 3795 test_virtual_space_list_large_chunk(); 3796 } 3797 }; 3798 3799 void TestMetaspaceAux_test() { 3800 TestMetaspaceAuxTest::test(); 3801 } 3802 3803 class TestVirtualSpaceNodeTest { 3804 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3805 size_t& num_small_chunks, 3806 size_t& num_specialized_chunks) { 3807 num_medium_chunks = words_left / MediumChunk; 3808 words_left = words_left % MediumChunk; 3809 3810 num_small_chunks = words_left / SmallChunk; 3811 words_left = words_left % SmallChunk; 3812 // how many specialized chunks can we get? 3813 num_specialized_chunks = words_left / SpecializedChunk; 3814 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3815 } 3816 3817 public: 3818 static void test() { 3819 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3820 const size_t vsn_test_size_words = MediumChunk * 4; 3821 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3822 3823 // The chunk sizes must be multiples of eachother, or this will fail 3824 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3825 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3826 3827 { // No committed memory in VSN 3828 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3829 VirtualSpaceNode vsn(vsn_test_size_bytes); 3830 vsn.initialize(); 3831 vsn.retire(&cm); 3832 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3833 } 3834 3835 { // All of VSN is committed, half is used by chunks 3836 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3837 VirtualSpaceNode vsn(vsn_test_size_bytes); 3838 vsn.initialize(); 3839 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3840 vsn.get_chunk_vs(MediumChunk); 3841 vsn.get_chunk_vs(MediumChunk); 3842 vsn.retire(&cm); 3843 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3844 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3845 } 3846 3847 { // 4 pages of VSN is committed, some is used by chunks 3848 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3849 VirtualSpaceNode vsn(vsn_test_size_bytes); 3850 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3851 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); 3852 vsn.initialize(); 3853 vsn.expand_by(page_chunks, page_chunks); 3854 vsn.get_chunk_vs(SmallChunk); 3855 vsn.get_chunk_vs(SpecializedChunk); 3856 vsn.retire(&cm); 3857 3858 // committed - used = words left to retire 3859 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3860 3861 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3862 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3863 3864 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3865 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3866 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3867 } 3868 3869 { // Half of VSN is committed, a humongous chunk is used 3870 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3871 VirtualSpaceNode vsn(vsn_test_size_bytes); 3872 vsn.initialize(); 3873 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3874 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3875 vsn.retire(&cm); 3876 3877 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3878 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3879 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3880 3881 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3882 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3883 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3884 } 3885 3886 } 3887 3888 #define assert_is_available_positive(word_size) \ 3889 assert(vsn.is_available(word_size), \ 3890 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ 3891 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3892 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3893 3894 #define assert_is_available_negative(word_size) \ 3895 assert(!vsn.is_available(word_size), \ 3896 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ 3897 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3898 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3899 3900 static void test_is_available_positive() { 3901 // Reserve some memory. 3902 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3903 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3904 3905 // Commit some memory. 3906 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3907 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3908 assert(expanded, "Failed to commit"); 3909 3910 // Check that is_available accepts the committed size. 3911 assert_is_available_positive(commit_word_size); 3912 3913 // Check that is_available accepts half the committed size. 3914 size_t expand_word_size = commit_word_size / 2; 3915 assert_is_available_positive(expand_word_size); 3916 } 3917 3918 static void test_is_available_negative() { 3919 // Reserve some memory. 3920 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3921 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3922 3923 // Commit some memory. 3924 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3925 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3926 assert(expanded, "Failed to commit"); 3927 3928 // Check that is_available doesn't accept a too large size. 3929 size_t two_times_commit_word_size = commit_word_size * 2; 3930 assert_is_available_negative(two_times_commit_word_size); 3931 } 3932 3933 static void test_is_available_overflow() { 3934 // Reserve some memory. 3935 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3936 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3937 3938 // Commit some memory. 3939 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3940 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3941 assert(expanded, "Failed to commit"); 3942 3943 // Calculate a size that will overflow the virtual space size. 3944 void* virtual_space_max = (void*)(uintptr_t)-1; 3945 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3946 size_t overflow_size = bottom_to_max + BytesPerWord; 3947 size_t overflow_word_size = overflow_size / BytesPerWord; 3948 3949 // Check that is_available can handle the overflow. 3950 assert_is_available_negative(overflow_word_size); 3951 } 3952 3953 static void test_is_available() { 3954 TestVirtualSpaceNodeTest::test_is_available_positive(); 3955 TestVirtualSpaceNodeTest::test_is_available_negative(); 3956 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3957 } 3958 }; 3959 3960 void TestVirtualSpaceNode_test() { 3961 TestVirtualSpaceNodeTest::test(); 3962 TestVirtualSpaceNodeTest::test_is_available(); 3963 } 3964 #endif