172 void inc_free_chunks_total(size_t v, size_t count = 1) {
173 Atomic::add_ptr(count, &_free_chunks_count);
174 Atomic::add_ptr(v, &_free_chunks_total);
175 }
176 ChunkTreeDictionary* humongous_dictionary() {
177 return &_humongous_dictionary;
178 }
179
180 ChunkList* free_chunks(ChunkIndex index);
181
182 // Returns the list for the given chunk word size.
183 ChunkList* find_free_chunks_list(size_t word_size);
184
185 // Remove from a list by size. Selects list based on size of chunk.
186 Metachunk* free_chunks_get(size_t chunk_word_size);
187
188 #define index_bounds_check(index) \
189 assert(index == SpecializedIndex || \
190 index == SmallIndex || \
191 index == MediumIndex || \
192 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
193
194 size_t num_free_chunks(ChunkIndex index) const {
195 index_bounds_check(index);
196
197 if (index == HumongousIndex) {
198 return _humongous_dictionary.total_free_blocks();
199 }
200
201 ssize_t count = _free_chunks[index].count();
202 return count == -1 ? 0 : (size_t) count;
203 }
204
205 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206 index_bounds_check(index);
207
208 size_t word_size = 0;
209 if (index == HumongousIndex) {
210 word_size = _humongous_dictionary.total_size();
211 } else {
212 const size_t size_per_chunk_in_words = _free_chunks[index].size();
361 // in the node from any freelist.
362 void purge(ChunkManager* chunk_manager);
363
364 // If an allocation doesn't fit in the current node a new node is created.
365 // Allocate chunks out of the remaining committed space in this node
366 // to avoid wasting that memory.
367 // This always adds up because all the chunk sizes are multiples of
368 // the smallest chunk size.
369 void retire(ChunkManager* chunk_manager);
370
371 #ifdef ASSERT
372 // Debug support
373 void mangle();
374 #endif
375
376 void print_on(outputStream* st) const;
377 };
378
379 #define assert_is_ptr_aligned(ptr, alignment) \
380 assert(is_ptr_aligned(ptr, alignment), \
381 err_msg(PTR_FORMAT " is not aligned to " \
382 SIZE_FORMAT, p2i(ptr), alignment))
383
384 #define assert_is_size_aligned(size, alignment) \
385 assert(is_size_aligned(size, alignment), \
386 err_msg(SIZE_FORMAT " is not aligned to " \
387 SIZE_FORMAT, size, alignment))
388
389
390 // Decide if large pages should be committed when the memory is reserved.
391 static bool should_commit_large_pages_when_reserving(size_t bytes) {
392 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
393 size_t words = bytes / BytesPerWord;
394 bool is_class = false; // We never reserve large pages for the class space.
395 if (MetaspaceGC::can_expand(words, is_class) &&
396 MetaspaceGC::allowed_expansion() >= words) {
397 return true;
398 }
399 }
400
401 return false;
402 }
403
404 // byte_size is the size of the associated virtualspace.
405 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
406 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
407
784 Mutex* const SpaceManager::_expand_lock =
785 new Mutex(SpaceManager::_expand_lock_rank,
786 SpaceManager::_expand_lock_name,
787 Mutex::_allow_vm_block_flag,
788 Monitor::_safepoint_check_never);
789
790 void VirtualSpaceNode::inc_container_count() {
791 assert_lock_strong(SpaceManager::expand_lock());
792 _container_count++;
793 DEBUG_ONLY(verify_container_count();)
794 }
795
796 void VirtualSpaceNode::dec_container_count() {
797 assert_lock_strong(SpaceManager::expand_lock());
798 _container_count--;
799 }
800
801 #ifdef ASSERT
802 void VirtualSpaceNode::verify_container_count() {
803 assert(_container_count == container_count_slow(),
804 err_msg("Inconsistency in container_count _container_count " UINTX_FORMAT
805 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()));
806 }
807 #endif
808
809 // BlockFreelist methods
810
811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
812
813 BlockFreelist::~BlockFreelist() {
814 if (Verbose && TraceMetadataChunkAllocation) {
815 dictionary()->print_free_lists(gclog_or_tty);
816 }
817 delete _dictionary;
818 }
819
820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
821 Metablock* free_chunk = ::new (p) Metablock(word_size);
822 dictionary()->return_chunk(free_chunk);
823 }
824
825 MetaWord* BlockFreelist::get_block(size_t word_size) {
948 // aligned only the middle alignment of the VirtualSpace is used.
949 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
950 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
951
952 // ReservedSpaces marked as special will have the entire memory
953 // pre-committed. Setting a committed size will make sure that
954 // committed_size and actual_committed_size agrees.
955 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
956
957 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
958 Metaspace::commit_alignment());
959 if (result) {
960 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
961 "Checking that the pre-committed memory was registered by the VirtualSpace");
962
963 set_top((MetaWord*)virtual_space()->low());
964 set_reserved(MemRegion((HeapWord*)_rs.base(),
965 (HeapWord*)(_rs.base() + _rs.size())));
966
967 assert(reserved()->start() == (HeapWord*) _rs.base(),
968 err_msg("Reserved start was not set properly " PTR_FORMAT
969 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())));
970 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
971 err_msg("Reserved size was not set properly " SIZE_FORMAT
972 " != " SIZE_FORMAT, reserved()->word_size(),
973 _rs.size() / BytesPerWord));
974 }
975
976 return result;
977 }
978
979 void VirtualSpaceNode::print_on(outputStream* st) const {
980 size_t used = used_words_in_vs();
981 size_t capacity = capacity_words_in_vs();
982 VirtualSpace* vs = virtual_space();
983 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
984 "[" PTR_FORMAT ", " PTR_FORMAT ", "
985 PTR_FORMAT ", " PTR_FORMAT ")",
986 p2i(vs), capacity / K,
987 capacity == 0 ? 0 : used * 100 / capacity,
988 p2i(bottom()), p2i(top()), p2i(end()),
989 p2i(vs->high_boundary()));
990 }
991
992 #ifdef ASSERT
993 void VirtualSpaceNode::mangle() {
1001
1002 VirtualSpaceList::~VirtualSpaceList() {
1003 VirtualSpaceListIterator iter(virtual_space_list());
1004 while (iter.repeat()) {
1005 VirtualSpaceNode* vsl = iter.get_next();
1006 delete vsl;
1007 }
1008 }
1009
1010 void VirtualSpaceList::inc_reserved_words(size_t v) {
1011 assert_lock_strong(SpaceManager::expand_lock());
1012 _reserved_words = _reserved_words + v;
1013 }
1014 void VirtualSpaceList::dec_reserved_words(size_t v) {
1015 assert_lock_strong(SpaceManager::expand_lock());
1016 _reserved_words = _reserved_words - v;
1017 }
1018
1019 #define assert_committed_below_limit() \
1020 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1021 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1022 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1023 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1024
1025 void VirtualSpaceList::inc_committed_words(size_t v) {
1026 assert_lock_strong(SpaceManager::expand_lock());
1027 _committed_words = _committed_words + v;
1028
1029 assert_committed_below_limit();
1030 }
1031 void VirtualSpaceList::dec_committed_words(size_t v) {
1032 assert_lock_strong(SpaceManager::expand_lock());
1033 _committed_words = _committed_words - v;
1034
1035 assert_committed_below_limit();
1036 }
1037
1038 void VirtualSpaceList::inc_virtual_space_count() {
1039 assert_lock_strong(SpaceManager::expand_lock());
1040 _virtual_space_count++;
1041 }
1042 void VirtualSpaceList::dec_virtual_space_count() {
1043 assert_lock_strong(SpaceManager::expand_lock());
1444 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1445 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1446 return false;
1447 }
1448 }
1449
1450 // Check if the user has imposed a limit on the metaspace memory.
1451 size_t committed_bytes = MetaspaceAux::committed_bytes();
1452 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1453 return false;
1454 }
1455
1456 return true;
1457 }
1458
1459 size_t MetaspaceGC::allowed_expansion() {
1460 size_t committed_bytes = MetaspaceAux::committed_bytes();
1461 size_t capacity_until_gc = capacity_until_GC();
1462
1463 assert(capacity_until_gc >= committed_bytes,
1464 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1465 capacity_until_gc, committed_bytes));
1466
1467 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1468 size_t left_until_GC = capacity_until_gc - committed_bytes;
1469 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1470
1471 return left_to_commit / BytesPerWord;
1472 }
1473
1474 void MetaspaceGC::compute_new_size() {
1475 assert(_shrink_factor <= 100, "invalid shrink factor");
1476 uint current_shrink_factor = _shrink_factor;
1477 _shrink_factor = 0;
1478
1479 // Using committed_bytes() for used_after_gc is an overestimation, since the
1480 // chunk free lists are included in committed_bytes() and the memory in an
1481 // un-fragmented chunk free list is available for future allocations.
1482 // However, if the chunk free lists becomes fragmented, then the memory may
1483 // not be available for future allocations and the memory is therefore "in use".
1484 // Including the chunk free lists in the definition of "in use" is therefore
1485 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1526 new_capacity_until_GC,
1527 MetaspaceGCThresholdUpdater::ComputeNewSize);
1528 if (PrintGCDetails && Verbose) {
1529 gclog_or_tty->print_cr(" expanding:"
1530 " minimum_desired_capacity: %6.1fKB"
1531 " expand_bytes: %6.1fKB"
1532 " MinMetaspaceExpansion: %6.1fKB"
1533 " new metaspace HWM: %6.1fKB",
1534 minimum_desired_capacity / (double) K,
1535 expand_bytes / (double) K,
1536 MinMetaspaceExpansion / (double) K,
1537 new_capacity_until_GC / (double) K);
1538 }
1539 }
1540 return;
1541 }
1542
1543 // No expansion, now see if we want to shrink
1544 // We would never want to shrink more than this
1545 assert(capacity_until_GC >= minimum_desired_capacity,
1546 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
1547 capacity_until_GC, minimum_desired_capacity));
1548 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1549
1550 // Should shrinking be considered?
1551 if (MaxMetaspaceFreeRatio < 100) {
1552 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1553 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1554 const double max_tmp = used_after_gc / minimum_used_percentage;
1555 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1556 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1557 MetaspaceSize);
1558 if (PrintGCDetails && Verbose) {
1559 gclog_or_tty->print_cr(" "
1560 " maximum_free_percentage: %6.2f"
1561 " minimum_used_percentage: %6.2f",
1562 maximum_free_percentage,
1563 minimum_used_percentage);
1564 gclog_or_tty->print_cr(" "
1565 " minimum_desired_capacity: %6.1fKB"
1566 " maximum_desired_capacity: %6.1fKB",
1567 minimum_desired_capacity / (double) K,
1568 maximum_desired_capacity / (double) K);
1569 }
1570
1571 assert(minimum_desired_capacity <= maximum_desired_capacity,
1572 "sanity check");
1573
1574 if (capacity_until_GC > maximum_desired_capacity) {
1575 // Capacity too large, compute shrinking size
1576 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1577 // We don't want shrink all the way back to initSize if people call
1578 // System.gc(), because some programs do that between "phases" and then
1579 // we'd just have to grow the heap up again for the next phase. So we
1580 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1581 // on the third call, and 100% by the fourth call. But if we recompute
1582 // size without shrinking, it goes back to 0%.
1583 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1584
1585 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1586
1587 assert(shrink_bytes <= max_shrink_bytes,
1588 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1589 shrink_bytes, max_shrink_bytes));
1590 if (current_shrink_factor == 0) {
1591 _shrink_factor = 10;
1592 } else {
1593 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1594 }
1595 if (PrintGCDetails && Verbose) {
1596 gclog_or_tty->print_cr(" "
1597 " shrinking:"
1598 " initSize: %.1fK"
1599 " maximum_desired_capacity: %.1fK",
1600 MetaspaceSize / (double) K,
1601 maximum_desired_capacity / (double) K);
1602 gclog_or_tty->print_cr(" "
1603 " shrink_bytes: %.1fK"
1604 " current_shrink_factor: %d"
1605 " new shrink factor: %d"
1606 " MinMetaspaceExpansion: %.1fK",
1607 shrink_bytes / (double) K,
1608 current_shrink_factor,
1609 _shrink_factor,
1659 size_t ChunkManager::free_chunks_total_bytes() {
1660 return free_chunks_total_words() * BytesPerWord;
1661 }
1662
1663 size_t ChunkManager::free_chunks_count() {
1664 #ifdef ASSERT
1665 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1666 MutexLockerEx cl(SpaceManager::expand_lock(),
1667 Mutex::_no_safepoint_check_flag);
1668 // This lock is only needed in debug because the verification
1669 // of the _free_chunks_totals walks the list of free chunks
1670 slow_locked_verify_free_chunks_count();
1671 }
1672 #endif
1673 return _free_chunks_count;
1674 }
1675
1676 void ChunkManager::locked_verify_free_chunks_total() {
1677 assert_lock_strong(SpaceManager::expand_lock());
1678 assert(sum_free_chunks() == _free_chunks_total,
1679 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1680 " same as sum " SIZE_FORMAT, _free_chunks_total,
1681 sum_free_chunks()));
1682 }
1683
1684 void ChunkManager::verify_free_chunks_total() {
1685 MutexLockerEx cl(SpaceManager::expand_lock(),
1686 Mutex::_no_safepoint_check_flag);
1687 locked_verify_free_chunks_total();
1688 }
1689
1690 void ChunkManager::locked_verify_free_chunks_count() {
1691 assert_lock_strong(SpaceManager::expand_lock());
1692 assert(sum_free_chunks_count() == _free_chunks_count,
1693 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1694 " same as sum " SIZE_FORMAT, _free_chunks_count,
1695 sum_free_chunks_count()));
1696 }
1697
1698 void ChunkManager::verify_free_chunks_count() {
1699 #ifdef ASSERT
1700 MutexLockerEx cl(SpaceManager::expand_lock(),
1701 Mutex::_no_safepoint_check_flag);
1702 locked_verify_free_chunks_count();
1703 #endif
1704 }
1705
1706 void ChunkManager::verify() {
1707 MutexLockerEx cl(SpaceManager::expand_lock(),
1708 Mutex::_no_safepoint_check_flag);
1709 locked_verify();
1710 }
1711
1712 void ChunkManager::locked_verify() {
1713 locked_verify_free_chunks_count();
1714 locked_verify_free_chunks_total();
1715 }
1874 break;
1875 case Metaspace::ROMetaspaceType:
1876 *chunk_word_size = SharedReadOnlySize / wordSize;
1877 *class_chunk_word_size = ClassSpecializedChunk;
1878 break;
1879 case Metaspace::ReadWriteMetaspaceType:
1880 *chunk_word_size = SharedReadWriteSize / wordSize;
1881 *class_chunk_word_size = ClassSpecializedChunk;
1882 break;
1883 case Metaspace::AnonymousMetaspaceType:
1884 case Metaspace::ReflectionMetaspaceType:
1885 *chunk_word_size = SpecializedChunk;
1886 *class_chunk_word_size = ClassSpecializedChunk;
1887 break;
1888 default:
1889 *chunk_word_size = SmallChunk;
1890 *class_chunk_word_size = ClassSmallChunk;
1891 break;
1892 }
1893 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1894 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1895 " class " SIZE_FORMAT,
1896 *chunk_word_size, *class_chunk_word_size));
1897 }
1898
1899 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1900 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1901 size_t free = 0;
1902 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1903 Metachunk* chunk = chunks_in_use(i);
1904 while (chunk != NULL) {
1905 free += chunk->free_word_size();
1906 chunk = chunk->next();
1907 }
1908 }
1909 return free;
1910 }
1911
1912 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1913 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1914 size_t result = 0;
1915 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1916 result += sum_waste_in_chunks_in_use(i);
2019 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2020 chunk_word_size = (size_t) small_chunk_size();
2021 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2022 chunk_word_size = medium_chunk_size();
2023 }
2024 } else {
2025 chunk_word_size = medium_chunk_size();
2026 }
2027
2028 // Might still need a humongous chunk. Enforce
2029 // humongous allocations sizes to be aligned up to
2030 // the smallest chunk size.
2031 size_t if_humongous_sized_chunk =
2032 align_size_up(word_size + Metachunk::overhead(),
2033 smallest_chunk_size());
2034 chunk_word_size =
2035 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036
2037 assert(!SpaceManager::is_humongous(word_size) ||
2038 chunk_word_size == if_humongous_sized_chunk,
2039 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2040 " chunk_word_size " SIZE_FORMAT,
2041 word_size, chunk_word_size));
2042 if (TraceMetadataHumongousAllocation &&
2043 SpaceManager::is_humongous(word_size)) {
2044 gclog_or_tty->print_cr("Metadata humongous allocation:");
2045 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2046 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2047 chunk_word_size);
2048 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2049 Metachunk::overhead());
2050 }
2051 return chunk_word_size;
2052 }
2053
2054 void SpaceManager::track_metaspace_memory_usage() {
2055 if (is_init_completed()) {
2056 if (is_class()) {
2057 MemoryService::track_compressed_class_memory_usage();
2058 }
2059 MemoryService::track_metaspace_memory_usage();
2060 }
2061 }
2185
2186 // This returns chunks one at a time. If a new
2187 // class List can be created that is a base class
2188 // of FreeList then something like FreeList::prepend()
2189 // can be used in place of this loop
2190 while (cur != NULL) {
2191 assert(cur->container() != NULL, "Container should have been set");
2192 cur->container()->dec_container_count();
2193 // Capture the next link before it is changed
2194 // by the call to return_chunk_at_head();
2195 Metachunk* next = cur->next();
2196 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197 list->return_chunk_at_head(cur);
2198 cur = next;
2199 }
2200 }
2201
2202 SpaceManager::~SpaceManager() {
2203 // This call this->_lock which can't be done while holding expand_lock()
2204 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206 " allocated_chunks_words() " SIZE_FORMAT,
2207 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2208
2209 MutexLockerEx fcl(SpaceManager::expand_lock(),
2210 Mutex::_no_safepoint_check_flag);
2211
2212 chunk_manager()->slow_locked_verify();
2213
2214 dec_total_from_size_metrics();
2215
2216 if (TraceMetadataChunkAllocation && Verbose) {
2217 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218 locked_print_chunks_in_use_on(gclog_or_tty);
2219 }
2220
2221 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2222 // is during the freeing of a VirtualSpaceNodes.
2223
2224 // Have to update before the chunks_in_use lists are emptied
2225 // below.
2226 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227 sum_count_in_chunks_in_use());
2258 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259 sum_count_in_chunks_in_use(HumongousIndex),
2260 chunk_size_name(HumongousIndex));
2261 gclog_or_tty->print("Humongous chunk dictionary: ");
2262 }
2263 // Humongous chunks are never the current chunk.
2264 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265
2266 while (humongous_chunks != NULL) {
2267 #ifdef ASSERT
2268 humongous_chunks->set_is_tagged_free(true);
2269 #endif
2270 if (TraceMetadataChunkAllocation && Verbose) {
2271 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272 p2i(humongous_chunks),
2273 humongous_chunks->word_size());
2274 }
2275 assert(humongous_chunks->word_size() == (size_t)
2276 align_size_up(humongous_chunks->word_size(),
2277 smallest_chunk_size()),
2278 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2279 " granularity " SIZE_FORMAT,
2280 humongous_chunks->word_size(), smallest_chunk_size()));
2281 Metachunk* next_humongous_chunks = humongous_chunks->next();
2282 humongous_chunks->container()->dec_container_count();
2283 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284 humongous_chunks = next_humongous_chunks;
2285 }
2286 if (TraceMetadataChunkAllocation && Verbose) {
2287 gclog_or_tty->cr();
2288 gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289 chunk_manager()->humongous_dictionary()->total_count(),
2290 chunk_size_name(HumongousIndex));
2291 }
2292 chunk_manager()->slow_locked_verify();
2293 }
2294
2295 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296 switch (index) {
2297 case SpecializedIndex:
2298 return "Specialized";
2299 case SmallIndex:
2300 return "Small";
2314 "Need branch for ClassSpecializedChunk");
2315 return SpecializedIndex;
2316 case SmallChunk:
2317 case ClassSmallChunk:
2318 return SmallIndex;
2319 case MediumChunk:
2320 case ClassMediumChunk:
2321 return MediumIndex;
2322 default:
2323 assert(size > MediumChunk || size > ClassMediumChunk,
2324 "Not a humongous chunk");
2325 return HumongousIndex;
2326 }
2327 }
2328
2329 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2330 assert_lock_strong(_lock);
2331 size_t raw_word_size = get_raw_word_size(word_size);
2332 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2333 assert(raw_word_size >= min_size,
2334 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2335 block_freelists()->return_block(p, raw_word_size);
2336 }
2337
2338 // Adds a chunk to the list of chunks in use.
2339 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2340
2341 assert(new_chunk != NULL, "Should not be NULL");
2342 assert(new_chunk->next() == NULL, "Should not be on a list");
2343
2344 new_chunk->reset_empty();
2345
2346 // Find the correct list and and set the current
2347 // chunk for that list.
2348 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2349
2350 if (index != HumongousIndex) {
2351 retire_current_chunk();
2352 set_current_chunk(new_chunk);
2353 new_chunk->set_next(chunks_in_use(index));
2354 set_chunks_in_use(index, new_chunk);
2524 }
2525 }
2526 }
2527 }
2528
2529 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2530 assert(is_humongous(chunk->word_size()) ||
2531 chunk->word_size() == medium_chunk_size() ||
2532 chunk->word_size() == small_chunk_size() ||
2533 chunk->word_size() == specialized_chunk_size(),
2534 "Chunk size is wrong");
2535 return;
2536 }
2537
2538 #ifdef ASSERT
2539 void SpaceManager::verify_allocated_blocks_words() {
2540 // Verification is only guaranteed at a safepoint.
2541 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2542 "Verification can fail if the applications is running");
2543 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2544 err_msg("allocation total is not consistent " SIZE_FORMAT
2545 " vs " SIZE_FORMAT,
2546 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2547 }
2548
2549 #endif
2550
2551 void SpaceManager::dump(outputStream* const out) const {
2552 size_t curr_total = 0;
2553 size_t waste = 0;
2554 uint i = 0;
2555 size_t used = 0;
2556 size_t capacity = 0;
2557
2558 // Add up statistics for all chunks in this SpaceManager.
2559 for (ChunkIndex index = ZeroIndex;
2560 index < NumberOfInUseLists;
2561 index = next_chunk_index(index)) {
2562 for (Metachunk* curr = chunks_in_use(index);
2563 curr != NULL;
2564 curr = curr->next()) {
2565 out->print("%d) ", i++);
2566 curr->print_on(out);
2599 #endif // PRODUCT
2600
2601 // MetaspaceAux
2602
2603
2604 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2605 size_t MetaspaceAux::_used_words[] = {0, 0};
2606
2607 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2608 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2609 return list == NULL ? 0 : list->free_bytes();
2610 }
2611
2612 size_t MetaspaceAux::free_bytes() {
2613 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2614 }
2615
2616 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2617 assert_lock_strong(SpaceManager::expand_lock());
2618 assert(words <= capacity_words(mdtype),
2619 err_msg("About to decrement below 0: words " SIZE_FORMAT
2620 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2621 words, mdtype, capacity_words(mdtype)));
2622 _capacity_words[mdtype] -= words;
2623 }
2624
2625 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2626 assert_lock_strong(SpaceManager::expand_lock());
2627 // Needs to be atomic
2628 _capacity_words[mdtype] += words;
2629 }
2630
2631 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2632 assert(words <= used_words(mdtype),
2633 err_msg("About to decrement below 0: words " SIZE_FORMAT
2634 " is greater than _used_words[%u] " SIZE_FORMAT,
2635 words, mdtype, used_words(mdtype)));
2636 // For CMS deallocation of the Metaspaces occurs during the
2637 // sweep which is a concurrent phase. Protection by the expand_lock()
2638 // is not enough since allocation is on a per Metaspace basis
2639 // and protected by the Metaspace lock.
2640 jlong minus_words = (jlong) - (jlong) words;
2641 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2642 }
2643
2644 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2645 // _used_words tracks allocations for
2646 // each piece of metadata. Those allocations are
2647 // generally done concurrently by different application
2648 // threads so must be done atomically.
2649 Atomic::add_ptr(words, &_used_words[mdtype]);
2650 }
2651
2652 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2653 size_t used = 0;
2654 ClassLoaderDataGraphMetaspaceIterator iter;
2655 while (iter.repeat()) {
2682 // added to the capacity calculation as needed.
2683 size_t capacity = 0;
2684 ClassLoaderDataGraphMetaspaceIterator iter;
2685 while (iter.repeat()) {
2686 Metaspace* msp = iter.get_next();
2687 if (msp != NULL) {
2688 capacity += msp->capacity_words_slow(mdtype);
2689 }
2690 }
2691 return capacity * BytesPerWord;
2692 }
2693
2694 size_t MetaspaceAux::capacity_bytes_slow() {
2695 #ifdef PRODUCT
2696 // Use capacity_bytes() in PRODUCT instead of this function.
2697 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2698 #endif
2699 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2700 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2701 assert(capacity_bytes() == class_capacity + non_class_capacity,
2702 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2703 " class_capacity + non_class_capacity " SIZE_FORMAT
2704 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2705 capacity_bytes(), class_capacity + non_class_capacity,
2706 class_capacity, non_class_capacity));
2707
2708 return class_capacity + non_class_capacity;
2709 }
2710
2711 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2712 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2713 return list == NULL ? 0 : list->reserved_bytes();
2714 }
2715
2716 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2717 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2718 return list == NULL ? 0 : list->committed_bytes();
2719 }
2720
2721 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2722
2723 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2724 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2725 if (chunk_manager == NULL) {
2726 return 0;
2887 void MetaspaceAux::dump(outputStream* out) {
2888 out->print_cr("All Metaspace:");
2889 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2890 out->print("class space: "); print_on(out, Metaspace::ClassType);
2891 print_waste(out);
2892 }
2893
2894 void MetaspaceAux::verify_free_chunks() {
2895 Metaspace::chunk_manager_metadata()->verify();
2896 if (Metaspace::using_class_space()) {
2897 Metaspace::chunk_manager_class()->verify();
2898 }
2899 }
2900
2901 void MetaspaceAux::verify_capacity() {
2902 #ifdef ASSERT
2903 size_t running_sum_capacity_bytes = capacity_bytes();
2904 // For purposes of the running sum of capacity, verify against capacity
2905 size_t capacity_in_use_bytes = capacity_bytes_slow();
2906 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2907 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2908 " capacity_bytes_slow()" SIZE_FORMAT,
2909 running_sum_capacity_bytes, capacity_in_use_bytes));
2910 for (Metaspace::MetadataType i = Metaspace::ClassType;
2911 i < Metaspace:: MetadataTypeCount;
2912 i = (Metaspace::MetadataType)(i + 1)) {
2913 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2914 assert(capacity_bytes(i) == capacity_in_use_bytes,
2915 err_msg("capacity_bytes(%u) " SIZE_FORMAT
2916 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2917 i, capacity_bytes(i), i, capacity_in_use_bytes));
2918 }
2919 #endif
2920 }
2921
2922 void MetaspaceAux::verify_used() {
2923 #ifdef ASSERT
2924 size_t running_sum_used_bytes = used_bytes();
2925 // For purposes of the running sum of used, verify against used
2926 size_t used_in_use_bytes = used_bytes_slow();
2927 assert(used_bytes() == used_in_use_bytes,
2928 err_msg("used_bytes() " SIZE_FORMAT
2929 " used_bytes_slow()" SIZE_FORMAT,
2930 used_bytes(), used_in_use_bytes));
2931 for (Metaspace::MetadataType i = Metaspace::ClassType;
2932 i < Metaspace:: MetadataTypeCount;
2933 i = (Metaspace::MetadataType)(i + 1)) {
2934 size_t used_in_use_bytes = used_bytes_slow(i);
2935 assert(used_bytes(i) == used_in_use_bytes,
2936 err_msg("used_bytes(%u) " SIZE_FORMAT
2937 " used_bytes_slow(%u)" SIZE_FORMAT,
2938 i, used_bytes(i), i, used_in_use_bytes));
2939 }
2940 #endif
2941 }
2942
2943 void MetaspaceAux::verify_metrics() {
2944 verify_capacity();
2945 verify_used();
2946 }
2947
2948
2949 // Metaspace methods
2950
2951 size_t Metaspace::_first_chunk_word_size = 0;
2952 size_t Metaspace::_first_class_chunk_word_size = 0;
2953
2954 size_t Metaspace::_commit_alignment = 0;
2955 size_t Metaspace::_reserve_alignment = 0;
2956
2957 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2958 initialize(lock, type);
3140
3141 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3142 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3143 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3144 if (_class_space_list != NULL) {
3145 address base = (address)_class_space_list->current_virtual_space()->bottom();
3146 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3147 compressed_class_space_size(), p2i(base));
3148 if (requested_addr != 0) {
3149 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3150 }
3151 st->cr();
3152 }
3153 }
3154
3155 // For UseCompressedClassPointers the class space is reserved above the top of
3156 // the Java heap. The argument passed in is at the base of the compressed space.
3157 void Metaspace::initialize_class_space(ReservedSpace rs) {
3158 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3159 assert(rs.size() >= CompressedClassSpaceSize,
3160 err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize));
3161 assert(using_class_space(), "Must be using class space");
3162 _class_space_list = new VirtualSpaceList(rs);
3163 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3164
3165 if (!_class_space_list->initialization_succeeded()) {
3166 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3167 }
3168 }
3169
3170 #endif
3171
3172 void Metaspace::ergo_initialize() {
3173 if (DumpSharedSpaces) {
3174 // Using large pages when dumping the shared archive is currently not implemented.
3175 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3176 }
3177
3178 size_t page_size = os::vm_page_size();
3179 if (UseLargePages && UseLargePagesInMetaspace) {
3180 page_size = os::large_page_size();
3671 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3672 space_string);
3673 }
3674
3675 if (!is_init_completed()) {
3676 vm_exit_during_initialization("OutOfMemoryError", space_string);
3677 }
3678
3679 if (out_of_compressed_class_space) {
3680 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3681 } else {
3682 THROW_OOP(Universe::out_of_memory_error_metaspace());
3683 }
3684 }
3685
3686 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3687 switch (mdtype) {
3688 case Metaspace::ClassType: return "Class";
3689 case Metaspace::NonClassType: return "Metadata";
3690 default:
3691 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3692 return NULL;
3693 }
3694 }
3695
3696 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3697 assert(DumpSharedSpaces, "sanity");
3698
3699 int byte_size = (int)word_size * HeapWordSize;
3700 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3701
3702 if (_alloc_record_head == NULL) {
3703 _alloc_record_head = _alloc_record_tail = rec;
3704 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3705 _alloc_record_tail->_next = rec;
3706 _alloc_record_tail = rec;
3707 } else {
3708 // slow linear search, but this doesn't happen that often, and only when dumping
3709 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3710 if (old->_ptr == ptr) {
3711 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3953 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3954 VirtualSpaceNode vsn(vsn_test_size_bytes);
3955 vsn.initialize();
3956 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3957 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3958 vsn.retire(&cm);
3959
3960 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3961 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3962 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3963
3964 assert(num_medium_chunks == 0, "should not get any medium chunks");
3965 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3966 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3967 }
3968
3969 }
3970
3971 #define assert_is_available_positive(word_size) \
3972 assert(vsn.is_available(word_size), \
3973 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3974 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3975 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3976
3977 #define assert_is_available_negative(word_size) \
3978 assert(!vsn.is_available(word_size), \
3979 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3980 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3981 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3982
3983 static void test_is_available_positive() {
3984 // Reserve some memory.
3985 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3986 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3987
3988 // Commit some memory.
3989 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3990 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3991 assert(expanded, "Failed to commit");
3992
3993 // Check that is_available accepts the committed size.
3994 assert_is_available_positive(commit_word_size);
3995
3996 // Check that is_available accepts half the committed size.
3997 size_t expand_word_size = commit_word_size / 2;
3998 assert_is_available_positive(expand_word_size);
3999 }
4000
4001 static void test_is_available_negative() {
|
172 void inc_free_chunks_total(size_t v, size_t count = 1) {
173 Atomic::add_ptr(count, &_free_chunks_count);
174 Atomic::add_ptr(v, &_free_chunks_total);
175 }
176 ChunkTreeDictionary* humongous_dictionary() {
177 return &_humongous_dictionary;
178 }
179
180 ChunkList* free_chunks(ChunkIndex index);
181
182 // Returns the list for the given chunk word size.
183 ChunkList* find_free_chunks_list(size_t word_size);
184
185 // Remove from a list by size. Selects list based on size of chunk.
186 Metachunk* free_chunks_get(size_t chunk_word_size);
187
188 #define index_bounds_check(index) \
189 assert(index == SpecializedIndex || \
190 index == SmallIndex || \
191 index == MediumIndex || \
192 index == HumongousIndex, "Bad index: %d", (int) index)
193
194 size_t num_free_chunks(ChunkIndex index) const {
195 index_bounds_check(index);
196
197 if (index == HumongousIndex) {
198 return _humongous_dictionary.total_free_blocks();
199 }
200
201 ssize_t count = _free_chunks[index].count();
202 return count == -1 ? 0 : (size_t) count;
203 }
204
205 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206 index_bounds_check(index);
207
208 size_t word_size = 0;
209 if (index == HumongousIndex) {
210 word_size = _humongous_dictionary.total_size();
211 } else {
212 const size_t size_per_chunk_in_words = _free_chunks[index].size();
361 // in the node from any freelist.
362 void purge(ChunkManager* chunk_manager);
363
364 // If an allocation doesn't fit in the current node a new node is created.
365 // Allocate chunks out of the remaining committed space in this node
366 // to avoid wasting that memory.
367 // This always adds up because all the chunk sizes are multiples of
368 // the smallest chunk size.
369 void retire(ChunkManager* chunk_manager);
370
371 #ifdef ASSERT
372 // Debug support
373 void mangle();
374 #endif
375
376 void print_on(outputStream* st) const;
377 };
378
379 #define assert_is_ptr_aligned(ptr, alignment) \
380 assert(is_ptr_aligned(ptr, alignment), \
381 PTR_FORMAT " is not aligned to " \
382 SIZE_FORMAT, p2i(ptr), alignment)
383
384 #define assert_is_size_aligned(size, alignment) \
385 assert(is_size_aligned(size, alignment), \
386 SIZE_FORMAT " is not aligned to " \
387 SIZE_FORMAT, size, alignment)
388
389
390 // Decide if large pages should be committed when the memory is reserved.
391 static bool should_commit_large_pages_when_reserving(size_t bytes) {
392 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
393 size_t words = bytes / BytesPerWord;
394 bool is_class = false; // We never reserve large pages for the class space.
395 if (MetaspaceGC::can_expand(words, is_class) &&
396 MetaspaceGC::allowed_expansion() >= words) {
397 return true;
398 }
399 }
400
401 return false;
402 }
403
404 // byte_size is the size of the associated virtualspace.
405 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
406 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
407
784 Mutex* const SpaceManager::_expand_lock =
785 new Mutex(SpaceManager::_expand_lock_rank,
786 SpaceManager::_expand_lock_name,
787 Mutex::_allow_vm_block_flag,
788 Monitor::_safepoint_check_never);
789
790 void VirtualSpaceNode::inc_container_count() {
791 assert_lock_strong(SpaceManager::expand_lock());
792 _container_count++;
793 DEBUG_ONLY(verify_container_count();)
794 }
795
796 void VirtualSpaceNode::dec_container_count() {
797 assert_lock_strong(SpaceManager::expand_lock());
798 _container_count--;
799 }
800
801 #ifdef ASSERT
802 void VirtualSpaceNode::verify_container_count() {
803 assert(_container_count == container_count_slow(),
804 "Inconsistency in container_count _container_count " UINTX_FORMAT
805 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
806 }
807 #endif
808
809 // BlockFreelist methods
810
811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
812
813 BlockFreelist::~BlockFreelist() {
814 if (Verbose && TraceMetadataChunkAllocation) {
815 dictionary()->print_free_lists(gclog_or_tty);
816 }
817 delete _dictionary;
818 }
819
820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
821 Metablock* free_chunk = ::new (p) Metablock(word_size);
822 dictionary()->return_chunk(free_chunk);
823 }
824
825 MetaWord* BlockFreelist::get_block(size_t word_size) {
948 // aligned only the middle alignment of the VirtualSpace is used.
949 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
950 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
951
952 // ReservedSpaces marked as special will have the entire memory
953 // pre-committed. Setting a committed size will make sure that
954 // committed_size and actual_committed_size agrees.
955 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
956
957 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
958 Metaspace::commit_alignment());
959 if (result) {
960 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
961 "Checking that the pre-committed memory was registered by the VirtualSpace");
962
963 set_top((MetaWord*)virtual_space()->low());
964 set_reserved(MemRegion((HeapWord*)_rs.base(),
965 (HeapWord*)(_rs.base() + _rs.size())));
966
967 assert(reserved()->start() == (HeapWord*) _rs.base(),
968 "Reserved start was not set properly " PTR_FORMAT
969 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
970 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
971 "Reserved size was not set properly " SIZE_FORMAT
972 " != " SIZE_FORMAT, reserved()->word_size(),
973 _rs.size() / BytesPerWord);
974 }
975
976 return result;
977 }
978
979 void VirtualSpaceNode::print_on(outputStream* st) const {
980 size_t used = used_words_in_vs();
981 size_t capacity = capacity_words_in_vs();
982 VirtualSpace* vs = virtual_space();
983 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
984 "[" PTR_FORMAT ", " PTR_FORMAT ", "
985 PTR_FORMAT ", " PTR_FORMAT ")",
986 p2i(vs), capacity / K,
987 capacity == 0 ? 0 : used * 100 / capacity,
988 p2i(bottom()), p2i(top()), p2i(end()),
989 p2i(vs->high_boundary()));
990 }
991
992 #ifdef ASSERT
993 void VirtualSpaceNode::mangle() {
1001
1002 VirtualSpaceList::~VirtualSpaceList() {
1003 VirtualSpaceListIterator iter(virtual_space_list());
1004 while (iter.repeat()) {
1005 VirtualSpaceNode* vsl = iter.get_next();
1006 delete vsl;
1007 }
1008 }
1009
1010 void VirtualSpaceList::inc_reserved_words(size_t v) {
1011 assert_lock_strong(SpaceManager::expand_lock());
1012 _reserved_words = _reserved_words + v;
1013 }
1014 void VirtualSpaceList::dec_reserved_words(size_t v) {
1015 assert_lock_strong(SpaceManager::expand_lock());
1016 _reserved_words = _reserved_words - v;
1017 }
1018
1019 #define assert_committed_below_limit() \
1020 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1021 "Too much committed memory. Committed: " SIZE_FORMAT \
1022 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1023 MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
1024
1025 void VirtualSpaceList::inc_committed_words(size_t v) {
1026 assert_lock_strong(SpaceManager::expand_lock());
1027 _committed_words = _committed_words + v;
1028
1029 assert_committed_below_limit();
1030 }
1031 void VirtualSpaceList::dec_committed_words(size_t v) {
1032 assert_lock_strong(SpaceManager::expand_lock());
1033 _committed_words = _committed_words - v;
1034
1035 assert_committed_below_limit();
1036 }
1037
1038 void VirtualSpaceList::inc_virtual_space_count() {
1039 assert_lock_strong(SpaceManager::expand_lock());
1040 _virtual_space_count++;
1041 }
1042 void VirtualSpaceList::dec_virtual_space_count() {
1043 assert_lock_strong(SpaceManager::expand_lock());
1444 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1445 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1446 return false;
1447 }
1448 }
1449
1450 // Check if the user has imposed a limit on the metaspace memory.
1451 size_t committed_bytes = MetaspaceAux::committed_bytes();
1452 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1453 return false;
1454 }
1455
1456 return true;
1457 }
1458
1459 size_t MetaspaceGC::allowed_expansion() {
1460 size_t committed_bytes = MetaspaceAux::committed_bytes();
1461 size_t capacity_until_gc = capacity_until_GC();
1462
1463 assert(capacity_until_gc >= committed_bytes,
1464 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1465 capacity_until_gc, committed_bytes);
1466
1467 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1468 size_t left_until_GC = capacity_until_gc - committed_bytes;
1469 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1470
1471 return left_to_commit / BytesPerWord;
1472 }
1473
1474 void MetaspaceGC::compute_new_size() {
1475 assert(_shrink_factor <= 100, "invalid shrink factor");
1476 uint current_shrink_factor = _shrink_factor;
1477 _shrink_factor = 0;
1478
1479 // Using committed_bytes() for used_after_gc is an overestimation, since the
1480 // chunk free lists are included in committed_bytes() and the memory in an
1481 // un-fragmented chunk free list is available for future allocations.
1482 // However, if the chunk free lists becomes fragmented, then the memory may
1483 // not be available for future allocations and the memory is therefore "in use".
1484 // Including the chunk free lists in the definition of "in use" is therefore
1485 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1526 new_capacity_until_GC,
1527 MetaspaceGCThresholdUpdater::ComputeNewSize);
1528 if (PrintGCDetails && Verbose) {
1529 gclog_or_tty->print_cr(" expanding:"
1530 " minimum_desired_capacity: %6.1fKB"
1531 " expand_bytes: %6.1fKB"
1532 " MinMetaspaceExpansion: %6.1fKB"
1533 " new metaspace HWM: %6.1fKB",
1534 minimum_desired_capacity / (double) K,
1535 expand_bytes / (double) K,
1536 MinMetaspaceExpansion / (double) K,
1537 new_capacity_until_GC / (double) K);
1538 }
1539 }
1540 return;
1541 }
1542
1543 // No expansion, now see if we want to shrink
1544 // We would never want to shrink more than this
1545 assert(capacity_until_GC >= minimum_desired_capacity,
1546 SIZE_FORMAT " >= " SIZE_FORMAT,
1547 capacity_until_GC, minimum_desired_capacity);
1548 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1549
1550 // Should shrinking be considered?
1551 if (MaxMetaspaceFreeRatio < 100) {
1552 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1553 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1554 const double max_tmp = used_after_gc / minimum_used_percentage;
1555 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1556 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1557 MetaspaceSize);
1558 if (PrintGCDetails && Verbose) {
1559 gclog_or_tty->print_cr(" "
1560 " maximum_free_percentage: %6.2f"
1561 " minimum_used_percentage: %6.2f",
1562 maximum_free_percentage,
1563 minimum_used_percentage);
1564 gclog_or_tty->print_cr(" "
1565 " minimum_desired_capacity: %6.1fKB"
1566 " maximum_desired_capacity: %6.1fKB",
1567 minimum_desired_capacity / (double) K,
1568 maximum_desired_capacity / (double) K);
1569 }
1570
1571 assert(minimum_desired_capacity <= maximum_desired_capacity,
1572 "sanity check");
1573
1574 if (capacity_until_GC > maximum_desired_capacity) {
1575 // Capacity too large, compute shrinking size
1576 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1577 // We don't want shrink all the way back to initSize if people call
1578 // System.gc(), because some programs do that between "phases" and then
1579 // we'd just have to grow the heap up again for the next phase. So we
1580 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1581 // on the third call, and 100% by the fourth call. But if we recompute
1582 // size without shrinking, it goes back to 0%.
1583 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1584
1585 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1586
1587 assert(shrink_bytes <= max_shrink_bytes,
1588 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1589 shrink_bytes, max_shrink_bytes);
1590 if (current_shrink_factor == 0) {
1591 _shrink_factor = 10;
1592 } else {
1593 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1594 }
1595 if (PrintGCDetails && Verbose) {
1596 gclog_or_tty->print_cr(" "
1597 " shrinking:"
1598 " initSize: %.1fK"
1599 " maximum_desired_capacity: %.1fK",
1600 MetaspaceSize / (double) K,
1601 maximum_desired_capacity / (double) K);
1602 gclog_or_tty->print_cr(" "
1603 " shrink_bytes: %.1fK"
1604 " current_shrink_factor: %d"
1605 " new shrink factor: %d"
1606 " MinMetaspaceExpansion: %.1fK",
1607 shrink_bytes / (double) K,
1608 current_shrink_factor,
1609 _shrink_factor,
1659 size_t ChunkManager::free_chunks_total_bytes() {
1660 return free_chunks_total_words() * BytesPerWord;
1661 }
1662
1663 size_t ChunkManager::free_chunks_count() {
1664 #ifdef ASSERT
1665 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1666 MutexLockerEx cl(SpaceManager::expand_lock(),
1667 Mutex::_no_safepoint_check_flag);
1668 // This lock is only needed in debug because the verification
1669 // of the _free_chunks_totals walks the list of free chunks
1670 slow_locked_verify_free_chunks_count();
1671 }
1672 #endif
1673 return _free_chunks_count;
1674 }
1675
1676 void ChunkManager::locked_verify_free_chunks_total() {
1677 assert_lock_strong(SpaceManager::expand_lock());
1678 assert(sum_free_chunks() == _free_chunks_total,
1679 "_free_chunks_total " SIZE_FORMAT " is not the"
1680 " same as sum " SIZE_FORMAT, _free_chunks_total,
1681 sum_free_chunks());
1682 }
1683
1684 void ChunkManager::verify_free_chunks_total() {
1685 MutexLockerEx cl(SpaceManager::expand_lock(),
1686 Mutex::_no_safepoint_check_flag);
1687 locked_verify_free_chunks_total();
1688 }
1689
1690 void ChunkManager::locked_verify_free_chunks_count() {
1691 assert_lock_strong(SpaceManager::expand_lock());
1692 assert(sum_free_chunks_count() == _free_chunks_count,
1693 "_free_chunks_count " SIZE_FORMAT " is not the"
1694 " same as sum " SIZE_FORMAT, _free_chunks_count,
1695 sum_free_chunks_count());
1696 }
1697
1698 void ChunkManager::verify_free_chunks_count() {
1699 #ifdef ASSERT
1700 MutexLockerEx cl(SpaceManager::expand_lock(),
1701 Mutex::_no_safepoint_check_flag);
1702 locked_verify_free_chunks_count();
1703 #endif
1704 }
1705
1706 void ChunkManager::verify() {
1707 MutexLockerEx cl(SpaceManager::expand_lock(),
1708 Mutex::_no_safepoint_check_flag);
1709 locked_verify();
1710 }
1711
1712 void ChunkManager::locked_verify() {
1713 locked_verify_free_chunks_count();
1714 locked_verify_free_chunks_total();
1715 }
1874 break;
1875 case Metaspace::ROMetaspaceType:
1876 *chunk_word_size = SharedReadOnlySize / wordSize;
1877 *class_chunk_word_size = ClassSpecializedChunk;
1878 break;
1879 case Metaspace::ReadWriteMetaspaceType:
1880 *chunk_word_size = SharedReadWriteSize / wordSize;
1881 *class_chunk_word_size = ClassSpecializedChunk;
1882 break;
1883 case Metaspace::AnonymousMetaspaceType:
1884 case Metaspace::ReflectionMetaspaceType:
1885 *chunk_word_size = SpecializedChunk;
1886 *class_chunk_word_size = ClassSpecializedChunk;
1887 break;
1888 default:
1889 *chunk_word_size = SmallChunk;
1890 *class_chunk_word_size = ClassSmallChunk;
1891 break;
1892 }
1893 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1894 "Initial chunks sizes bad: data " SIZE_FORMAT
1895 " class " SIZE_FORMAT,
1896 *chunk_word_size, *class_chunk_word_size);
1897 }
1898
1899 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1900 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1901 size_t free = 0;
1902 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1903 Metachunk* chunk = chunks_in_use(i);
1904 while (chunk != NULL) {
1905 free += chunk->free_word_size();
1906 chunk = chunk->next();
1907 }
1908 }
1909 return free;
1910 }
1911
1912 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1913 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1914 size_t result = 0;
1915 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1916 result += sum_waste_in_chunks_in_use(i);
2019 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2020 chunk_word_size = (size_t) small_chunk_size();
2021 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2022 chunk_word_size = medium_chunk_size();
2023 }
2024 } else {
2025 chunk_word_size = medium_chunk_size();
2026 }
2027
2028 // Might still need a humongous chunk. Enforce
2029 // humongous allocations sizes to be aligned up to
2030 // the smallest chunk size.
2031 size_t if_humongous_sized_chunk =
2032 align_size_up(word_size + Metachunk::overhead(),
2033 smallest_chunk_size());
2034 chunk_word_size =
2035 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036
2037 assert(!SpaceManager::is_humongous(word_size) ||
2038 chunk_word_size == if_humongous_sized_chunk,
2039 "Size calculation is wrong, word_size " SIZE_FORMAT
2040 " chunk_word_size " SIZE_FORMAT,
2041 word_size, chunk_word_size);
2042 if (TraceMetadataHumongousAllocation &&
2043 SpaceManager::is_humongous(word_size)) {
2044 gclog_or_tty->print_cr("Metadata humongous allocation:");
2045 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2046 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2047 chunk_word_size);
2048 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2049 Metachunk::overhead());
2050 }
2051 return chunk_word_size;
2052 }
2053
2054 void SpaceManager::track_metaspace_memory_usage() {
2055 if (is_init_completed()) {
2056 if (is_class()) {
2057 MemoryService::track_compressed_class_memory_usage();
2058 }
2059 MemoryService::track_metaspace_memory_usage();
2060 }
2061 }
2185
2186 // This returns chunks one at a time. If a new
2187 // class List can be created that is a base class
2188 // of FreeList then something like FreeList::prepend()
2189 // can be used in place of this loop
2190 while (cur != NULL) {
2191 assert(cur->container() != NULL, "Container should have been set");
2192 cur->container()->dec_container_count();
2193 // Capture the next link before it is changed
2194 // by the call to return_chunk_at_head();
2195 Metachunk* next = cur->next();
2196 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197 list->return_chunk_at_head(cur);
2198 cur = next;
2199 }
2200 }
2201
2202 SpaceManager::~SpaceManager() {
2203 // This call this->_lock which can't be done while holding expand_lock()
2204 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206 " allocated_chunks_words() " SIZE_FORMAT,
2207 sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2208
2209 MutexLockerEx fcl(SpaceManager::expand_lock(),
2210 Mutex::_no_safepoint_check_flag);
2211
2212 chunk_manager()->slow_locked_verify();
2213
2214 dec_total_from_size_metrics();
2215
2216 if (TraceMetadataChunkAllocation && Verbose) {
2217 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218 locked_print_chunks_in_use_on(gclog_or_tty);
2219 }
2220
2221 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2222 // is during the freeing of a VirtualSpaceNodes.
2223
2224 // Have to update before the chunks_in_use lists are emptied
2225 // below.
2226 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227 sum_count_in_chunks_in_use());
2258 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259 sum_count_in_chunks_in_use(HumongousIndex),
2260 chunk_size_name(HumongousIndex));
2261 gclog_or_tty->print("Humongous chunk dictionary: ");
2262 }
2263 // Humongous chunks are never the current chunk.
2264 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265
2266 while (humongous_chunks != NULL) {
2267 #ifdef ASSERT
2268 humongous_chunks->set_is_tagged_free(true);
2269 #endif
2270 if (TraceMetadataChunkAllocation && Verbose) {
2271 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272 p2i(humongous_chunks),
2273 humongous_chunks->word_size());
2274 }
2275 assert(humongous_chunks->word_size() == (size_t)
2276 align_size_up(humongous_chunks->word_size(),
2277 smallest_chunk_size()),
2278 "Humongous chunk size is wrong: word size " SIZE_FORMAT
2279 " granularity " SIZE_FORMAT,
2280 humongous_chunks->word_size(), smallest_chunk_size());
2281 Metachunk* next_humongous_chunks = humongous_chunks->next();
2282 humongous_chunks->container()->dec_container_count();
2283 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284 humongous_chunks = next_humongous_chunks;
2285 }
2286 if (TraceMetadataChunkAllocation && Verbose) {
2287 gclog_or_tty->cr();
2288 gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289 chunk_manager()->humongous_dictionary()->total_count(),
2290 chunk_size_name(HumongousIndex));
2291 }
2292 chunk_manager()->slow_locked_verify();
2293 }
2294
2295 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296 switch (index) {
2297 case SpecializedIndex:
2298 return "Specialized";
2299 case SmallIndex:
2300 return "Small";
2314 "Need branch for ClassSpecializedChunk");
2315 return SpecializedIndex;
2316 case SmallChunk:
2317 case ClassSmallChunk:
2318 return SmallIndex;
2319 case MediumChunk:
2320 case ClassMediumChunk:
2321 return MediumIndex;
2322 default:
2323 assert(size > MediumChunk || size > ClassMediumChunk,
2324 "Not a humongous chunk");
2325 return HumongousIndex;
2326 }
2327 }
2328
2329 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2330 assert_lock_strong(_lock);
2331 size_t raw_word_size = get_raw_word_size(word_size);
2332 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2333 assert(raw_word_size >= min_size,
2334 "Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size);
2335 block_freelists()->return_block(p, raw_word_size);
2336 }
2337
2338 // Adds a chunk to the list of chunks in use.
2339 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2340
2341 assert(new_chunk != NULL, "Should not be NULL");
2342 assert(new_chunk->next() == NULL, "Should not be on a list");
2343
2344 new_chunk->reset_empty();
2345
2346 // Find the correct list and and set the current
2347 // chunk for that list.
2348 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2349
2350 if (index != HumongousIndex) {
2351 retire_current_chunk();
2352 set_current_chunk(new_chunk);
2353 new_chunk->set_next(chunks_in_use(index));
2354 set_chunks_in_use(index, new_chunk);
2524 }
2525 }
2526 }
2527 }
2528
2529 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2530 assert(is_humongous(chunk->word_size()) ||
2531 chunk->word_size() == medium_chunk_size() ||
2532 chunk->word_size() == small_chunk_size() ||
2533 chunk->word_size() == specialized_chunk_size(),
2534 "Chunk size is wrong");
2535 return;
2536 }
2537
2538 #ifdef ASSERT
2539 void SpaceManager::verify_allocated_blocks_words() {
2540 // Verification is only guaranteed at a safepoint.
2541 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2542 "Verification can fail if the applications is running");
2543 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2544 "allocation total is not consistent " SIZE_FORMAT
2545 " vs " SIZE_FORMAT,
2546 allocated_blocks_words(), sum_used_in_chunks_in_use());
2547 }
2548
2549 #endif
2550
2551 void SpaceManager::dump(outputStream* const out) const {
2552 size_t curr_total = 0;
2553 size_t waste = 0;
2554 uint i = 0;
2555 size_t used = 0;
2556 size_t capacity = 0;
2557
2558 // Add up statistics for all chunks in this SpaceManager.
2559 for (ChunkIndex index = ZeroIndex;
2560 index < NumberOfInUseLists;
2561 index = next_chunk_index(index)) {
2562 for (Metachunk* curr = chunks_in_use(index);
2563 curr != NULL;
2564 curr = curr->next()) {
2565 out->print("%d) ", i++);
2566 curr->print_on(out);
2599 #endif // PRODUCT
2600
2601 // MetaspaceAux
2602
2603
2604 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2605 size_t MetaspaceAux::_used_words[] = {0, 0};
2606
2607 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2608 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2609 return list == NULL ? 0 : list->free_bytes();
2610 }
2611
2612 size_t MetaspaceAux::free_bytes() {
2613 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2614 }
2615
2616 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2617 assert_lock_strong(SpaceManager::expand_lock());
2618 assert(words <= capacity_words(mdtype),
2619 "About to decrement below 0: words " SIZE_FORMAT
2620 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2621 words, mdtype, capacity_words(mdtype));
2622 _capacity_words[mdtype] -= words;
2623 }
2624
2625 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2626 assert_lock_strong(SpaceManager::expand_lock());
2627 // Needs to be atomic
2628 _capacity_words[mdtype] += words;
2629 }
2630
2631 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2632 assert(words <= used_words(mdtype),
2633 "About to decrement below 0: words " SIZE_FORMAT
2634 " is greater than _used_words[%u] " SIZE_FORMAT,
2635 words, mdtype, used_words(mdtype));
2636 // For CMS deallocation of the Metaspaces occurs during the
2637 // sweep which is a concurrent phase. Protection by the expand_lock()
2638 // is not enough since allocation is on a per Metaspace basis
2639 // and protected by the Metaspace lock.
2640 jlong minus_words = (jlong) - (jlong) words;
2641 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2642 }
2643
2644 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2645 // _used_words tracks allocations for
2646 // each piece of metadata. Those allocations are
2647 // generally done concurrently by different application
2648 // threads so must be done atomically.
2649 Atomic::add_ptr(words, &_used_words[mdtype]);
2650 }
2651
2652 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2653 size_t used = 0;
2654 ClassLoaderDataGraphMetaspaceIterator iter;
2655 while (iter.repeat()) {
2682 // added to the capacity calculation as needed.
2683 size_t capacity = 0;
2684 ClassLoaderDataGraphMetaspaceIterator iter;
2685 while (iter.repeat()) {
2686 Metaspace* msp = iter.get_next();
2687 if (msp != NULL) {
2688 capacity += msp->capacity_words_slow(mdtype);
2689 }
2690 }
2691 return capacity * BytesPerWord;
2692 }
2693
2694 size_t MetaspaceAux::capacity_bytes_slow() {
2695 #ifdef PRODUCT
2696 // Use capacity_bytes() in PRODUCT instead of this function.
2697 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2698 #endif
2699 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2700 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2701 assert(capacity_bytes() == class_capacity + non_class_capacity,
2702 "bad accounting: capacity_bytes() " SIZE_FORMAT
2703 " class_capacity + non_class_capacity " SIZE_FORMAT
2704 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2705 capacity_bytes(), class_capacity + non_class_capacity,
2706 class_capacity, non_class_capacity);
2707
2708 return class_capacity + non_class_capacity;
2709 }
2710
2711 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2712 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2713 return list == NULL ? 0 : list->reserved_bytes();
2714 }
2715
2716 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2717 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2718 return list == NULL ? 0 : list->committed_bytes();
2719 }
2720
2721 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2722
2723 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2724 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2725 if (chunk_manager == NULL) {
2726 return 0;
2887 void MetaspaceAux::dump(outputStream* out) {
2888 out->print_cr("All Metaspace:");
2889 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2890 out->print("class space: "); print_on(out, Metaspace::ClassType);
2891 print_waste(out);
2892 }
2893
2894 void MetaspaceAux::verify_free_chunks() {
2895 Metaspace::chunk_manager_metadata()->verify();
2896 if (Metaspace::using_class_space()) {
2897 Metaspace::chunk_manager_class()->verify();
2898 }
2899 }
2900
2901 void MetaspaceAux::verify_capacity() {
2902 #ifdef ASSERT
2903 size_t running_sum_capacity_bytes = capacity_bytes();
2904 // For purposes of the running sum of capacity, verify against capacity
2905 size_t capacity_in_use_bytes = capacity_bytes_slow();
2906 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2907 "capacity_words() * BytesPerWord " SIZE_FORMAT
2908 " capacity_bytes_slow()" SIZE_FORMAT,
2909 running_sum_capacity_bytes, capacity_in_use_bytes);
2910 for (Metaspace::MetadataType i = Metaspace::ClassType;
2911 i < Metaspace:: MetadataTypeCount;
2912 i = (Metaspace::MetadataType)(i + 1)) {
2913 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2914 assert(capacity_bytes(i) == capacity_in_use_bytes,
2915 "capacity_bytes(%u) " SIZE_FORMAT
2916 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2917 i, capacity_bytes(i), i, capacity_in_use_bytes);
2918 }
2919 #endif
2920 }
2921
2922 void MetaspaceAux::verify_used() {
2923 #ifdef ASSERT
2924 size_t running_sum_used_bytes = used_bytes();
2925 // For purposes of the running sum of used, verify against used
2926 size_t used_in_use_bytes = used_bytes_slow();
2927 assert(used_bytes() == used_in_use_bytes,
2928 "used_bytes() " SIZE_FORMAT
2929 " used_bytes_slow()" SIZE_FORMAT,
2930 used_bytes(), used_in_use_bytes);
2931 for (Metaspace::MetadataType i = Metaspace::ClassType;
2932 i < Metaspace:: MetadataTypeCount;
2933 i = (Metaspace::MetadataType)(i + 1)) {
2934 size_t used_in_use_bytes = used_bytes_slow(i);
2935 assert(used_bytes(i) == used_in_use_bytes,
2936 "used_bytes(%u) " SIZE_FORMAT
2937 " used_bytes_slow(%u)" SIZE_FORMAT,
2938 i, used_bytes(i), i, used_in_use_bytes);
2939 }
2940 #endif
2941 }
2942
2943 void MetaspaceAux::verify_metrics() {
2944 verify_capacity();
2945 verify_used();
2946 }
2947
2948
2949 // Metaspace methods
2950
2951 size_t Metaspace::_first_chunk_word_size = 0;
2952 size_t Metaspace::_first_class_chunk_word_size = 0;
2953
2954 size_t Metaspace::_commit_alignment = 0;
2955 size_t Metaspace::_reserve_alignment = 0;
2956
2957 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2958 initialize(lock, type);
3140
3141 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3142 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3143 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3144 if (_class_space_list != NULL) {
3145 address base = (address)_class_space_list->current_virtual_space()->bottom();
3146 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3147 compressed_class_space_size(), p2i(base));
3148 if (requested_addr != 0) {
3149 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3150 }
3151 st->cr();
3152 }
3153 }
3154
3155 // For UseCompressedClassPointers the class space is reserved above the top of
3156 // the Java heap. The argument passed in is at the base of the compressed space.
3157 void Metaspace::initialize_class_space(ReservedSpace rs) {
3158 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3159 assert(rs.size() >= CompressedClassSpaceSize,
3160 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3161 assert(using_class_space(), "Must be using class space");
3162 _class_space_list = new VirtualSpaceList(rs);
3163 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3164
3165 if (!_class_space_list->initialization_succeeded()) {
3166 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3167 }
3168 }
3169
3170 #endif
3171
3172 void Metaspace::ergo_initialize() {
3173 if (DumpSharedSpaces) {
3174 // Using large pages when dumping the shared archive is currently not implemented.
3175 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3176 }
3177
3178 size_t page_size = os::vm_page_size();
3179 if (UseLargePages && UseLargePagesInMetaspace) {
3180 page_size = os::large_page_size();
3671 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3672 space_string);
3673 }
3674
3675 if (!is_init_completed()) {
3676 vm_exit_during_initialization("OutOfMemoryError", space_string);
3677 }
3678
3679 if (out_of_compressed_class_space) {
3680 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3681 } else {
3682 THROW_OOP(Universe::out_of_memory_error_metaspace());
3683 }
3684 }
3685
3686 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3687 switch (mdtype) {
3688 case Metaspace::ClassType: return "Class";
3689 case Metaspace::NonClassType: return "Metadata";
3690 default:
3691 assert(false, "Got bad mdtype: %d", (int) mdtype);
3692 return NULL;
3693 }
3694 }
3695
3696 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3697 assert(DumpSharedSpaces, "sanity");
3698
3699 int byte_size = (int)word_size * HeapWordSize;
3700 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3701
3702 if (_alloc_record_head == NULL) {
3703 _alloc_record_head = _alloc_record_tail = rec;
3704 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3705 _alloc_record_tail->_next = rec;
3706 _alloc_record_tail = rec;
3707 } else {
3708 // slow linear search, but this doesn't happen that often, and only when dumping
3709 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3710 if (old->_ptr == ptr) {
3711 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3953 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3954 VirtualSpaceNode vsn(vsn_test_size_bytes);
3955 vsn.initialize();
3956 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3957 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3958 vsn.retire(&cm);
3959
3960 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3961 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3962 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3963
3964 assert(num_medium_chunks == 0, "should not get any medium chunks");
3965 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3966 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3967 }
3968
3969 }
3970
3971 #define assert_is_available_positive(word_size) \
3972 assert(vsn.is_available(word_size), \
3973 #word_size ": " PTR_FORMAT " bytes were not available in " \
3974 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3975 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3976
3977 #define assert_is_available_negative(word_size) \
3978 assert(!vsn.is_available(word_size), \
3979 #word_size ": " PTR_FORMAT " bytes should not be available in " \
3980 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3981 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
3982
3983 static void test_is_available_positive() {
3984 // Reserve some memory.
3985 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3986 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3987
3988 // Commit some memory.
3989 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3990 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3991 assert(expanded, "Failed to commit");
3992
3993 // Check that is_available accepts the committed size.
3994 assert_is_available_positive(commit_word_size);
3995
3996 // Check that is_available accepts half the committed size.
3997 size_t expand_word_size = commit_word_size / 2;
3998 assert_is_available_positive(expand_word_size);
3999 }
4000
4001 static void test_is_available_negative() {
|