1611 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
1612 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1613 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
1614 // vm_allocation_granularity aligned on Windows.
1615 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
1616 large_size += (os::vm_page_size()/BytesPerWord);
1617 vs_list->get_new_chunk(large_size, 0);
1618 }
1619
1620 static void test() {
1621 test_reserved();
1622 test_committed();
1623 test_virtual_space_list_large_chunk();
1624 }
1625 };
1626
1627 void TestMetaspaceUtils_test() {
1628 TestMetaspaceUtilsTest::test();
1629 }
1630
1631 class TestVirtualSpaceNodeTest {
1632 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
1633 size_t& num_small_chunks,
1634 size_t& num_specialized_chunks) {
1635 num_medium_chunks = words_left / MediumChunk;
1636 words_left = words_left % MediumChunk;
1637
1638 num_small_chunks = words_left / SmallChunk;
1639 words_left = words_left % SmallChunk;
1640 // how many specialized chunks can we get?
1641 num_specialized_chunks = words_left / SpecializedChunk;
1642 assert(words_left % SpecializedChunk == 0, "should be nothing left");
1643 }
1644
1645 public:
1646 static void test() {
1647 MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1648 const size_t vsn_test_size_words = MediumChunk * 4;
1649 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
1650
1651 // The chunk sizes must be multiples of eachother, or this will fail
1652 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
1653 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
1654
1655 { // No committed memory in VSN
1656 ChunkManager cm(false);
1657 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1658 vsn.initialize();
1659 vsn.retire(&cm);
1660 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
1661 }
1662
1663 { // All of VSN is committed, half is used by chunks
1664 ChunkManager cm(false);
1665 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1666 vsn.initialize();
1667 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
1668 vsn.get_chunk_vs(MediumChunk);
1669 vsn.get_chunk_vs(MediumChunk);
1670 vsn.retire(&cm);
1671 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
1672 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
1673 }
1674
1675 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
1676 // This doesn't work for systems with vm_page_size >= 16K.
1677 if (page_chunks < MediumChunk) {
1678 // 4 pages of VSN is committed, some is used by chunks
1679 ChunkManager cm(false);
1680 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1681
1682 vsn.initialize();
1683 vsn.expand_by(page_chunks, page_chunks);
1684 vsn.get_chunk_vs(SmallChunk);
1685 vsn.get_chunk_vs(SpecializedChunk);
1686 vsn.retire(&cm);
1687
1688 // committed - used = words left to retire
1689 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
1690
1691 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
1692 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
1693
1694 assert(num_medium_chunks == 0, "should not get any medium chunks");
1695 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
1696 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
1697 }
1698
1699 { // Half of VSN is committed, a humongous chunk is used
1700 ChunkManager cm(false);
1701 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1702 vsn.initialize();
1703 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
1704 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
1705 vsn.retire(&cm);
1706
1707 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
1708 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
1709 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
1710
1711 assert(num_medium_chunks == 0, "should not get any medium chunks");
1712 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
1713 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
1714 }
1715
1716 }
1717
1718 #define assert_is_available_positive(word_size) \
1719 assert(vsn.is_available(word_size), \
1720 #word_size ": " PTR_FORMAT " bytes were not available in " \
1721 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
1722 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
1723
1724 #define assert_is_available_negative(word_size) \
1725 assert(!vsn.is_available(word_size), \
1726 #word_size ": " PTR_FORMAT " bytes should not be available in " \
1727 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
1728 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
1729
1730 static void test_is_available_positive() {
1731 // Reserve some memory.
1732 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1733 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1734
1735 // Commit some memory.
1736 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1737 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1738 assert(expanded, "Failed to commit");
1739
1740 // Check that is_available accepts the committed size.
1741 assert_is_available_positive(commit_word_size);
1742
1743 // Check that is_available accepts half the committed size.
1744 size_t expand_word_size = commit_word_size / 2;
1745 assert_is_available_positive(expand_word_size);
1746 }
1747
1748 static void test_is_available_negative() {
1749 // Reserve some memory.
1750 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1751 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1752
1753 // Commit some memory.
1754 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1755 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1756 assert(expanded, "Failed to commit");
1757
1758 // Check that is_available doesn't accept a too large size.
1759 size_t two_times_commit_word_size = commit_word_size * 2;
1760 assert_is_available_negative(two_times_commit_word_size);
1761 }
1762
1763 static void test_is_available_overflow() {
1764 // Reserve some memory.
1765 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1766 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1767
1768 // Commit some memory.
1769 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1770 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1771 assert(expanded, "Failed to commit");
1772
1773 // Calculate a size that will overflow the virtual space size.
1774 void* virtual_space_max = (void*)(uintptr_t)-1;
1775 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
1776 size_t overflow_size = bottom_to_max + BytesPerWord;
1777 size_t overflow_word_size = overflow_size / BytesPerWord;
1778
1779 // Check that is_available can handle the overflow.
1780 assert_is_available_negative(overflow_word_size);
1781 }
1782
1783 static void test_is_available() {
1784 TestVirtualSpaceNodeTest::test_is_available_positive();
1785 TestVirtualSpaceNodeTest::test_is_available_negative();
1786 TestVirtualSpaceNodeTest::test_is_available_overflow();
1787 }
1788 };
1789
1790 #endif // !PRODUCT
1791
1792 struct chunkmanager_statistics_t {
1793 int num_specialized_chunks;
1794 int num_small_chunks;
1795 int num_medium_chunks;
1796 int num_humongous_chunks;
1797 };
1798
1799 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1800 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1801 ChunkManagerStatistics stat;
1802 chunk_manager->collect_statistics(&stat);
1803 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1804 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1805 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1806 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1807 }
1808
1809 struct chunk_geometry_t {
|
1611 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
1612 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1613 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
1614 // vm_allocation_granularity aligned on Windows.
1615 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
1616 large_size += (os::vm_page_size()/BytesPerWord);
1617 vs_list->get_new_chunk(large_size, 0);
1618 }
1619
1620 static void test() {
1621 test_reserved();
1622 test_committed();
1623 test_virtual_space_list_large_chunk();
1624 }
1625 };
1626
1627 void TestMetaspaceUtils_test() {
1628 TestMetaspaceUtilsTest::test();
1629 }
1630
1631 #endif // !PRODUCT
1632
1633 struct chunkmanager_statistics_t {
1634 int num_specialized_chunks;
1635 int num_small_chunks;
1636 int num_medium_chunks;
1637 int num_humongous_chunks;
1638 };
1639
1640 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1641 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1642 ChunkManagerStatistics stat;
1643 chunk_manager->collect_statistics(&stat);
1644 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1645 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1646 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1647 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1648 }
1649
1650 struct chunk_geometry_t {
|