< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc/shared/collectedHeap.hpp"
  26 #include "gc/shared/collectorPolicy.hpp"
  27 #include "gc/shared/gcLocker.hpp"

  28 #include "memory/allocation.hpp"
  29 #include "memory/binaryTreeDictionary.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/metachunk.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceGCThresholdUpdater.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/metaspaceTracer.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "runtime/atomic.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/mutex.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "services/memTracker.hpp"
  46 #include "services/memoryService.hpp"
  47 #include "utilities/copy.hpp"


 794 }
 795 
 796 void VirtualSpaceNode::dec_container_count() {
 797   assert_lock_strong(SpaceManager::expand_lock());
 798   _container_count--;
 799 }
 800 
 801 #ifdef ASSERT
 802 void VirtualSpaceNode::verify_container_count() {
 803   assert(_container_count == container_count_slow(),
 804          "Inconsistency in container_count _container_count " UINTX_FORMAT
 805          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 806 }
 807 #endif
 808 
 809 // BlockFreelist methods
 810 
 811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
 812 
 813 BlockFreelist::~BlockFreelist() {
 814   if (Verbose && TraceMetadataChunkAllocation) {
 815     dictionary()->print_free_lists(gclog_or_tty);


 816   }
 817   delete _dictionary;
 818 }
 819 
 820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 821   Metablock* free_chunk = ::new (p) Metablock(word_size);
 822   dictionary()->return_chunk(free_chunk);
 823 }
 824 
 825 MetaWord* BlockFreelist::get_block(size_t word_size) {
 826   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 827     // Dark matter.  Too small for dictionary.
 828     return NULL;
 829   }
 830 
 831   Metablock* free_block =
 832     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 833   if (free_block == NULL) {
 834     return NULL;
 835   }


 875 
 876 size_t VirtualSpaceNode::free_words_in_vs() const {
 877   return pointer_delta(end(), top(), sizeof(MetaWord));
 878 }
 879 
 880 // Allocates the chunk from the virtual space only.
 881 // This interface is also used internally for debugging.  Not all
 882 // chunks removed here are necessarily used for allocation.
 883 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 884   // Bottom of the new chunk
 885   MetaWord* chunk_limit = top();
 886   assert(chunk_limit != NULL, "Not safe to call this method");
 887 
 888   // The virtual spaces are always expanded by the
 889   // commit granularity to enforce the following condition.
 890   // Without this the is_available check will not work correctly.
 891   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 892       "The committed memory doesn't match the expanded memory.");
 893 
 894   if (!is_available(chunk_word_size)) {
 895     if (TraceMetadataChunkAllocation) {
 896       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 897       // Dump some information about the virtual space that is nearly full
 898       print_on(gclog_or_tty);
 899     }
 900     return NULL;
 901   }
 902 
 903   // Take the space  (bump top on the current virtual space).
 904   inc_top(chunk_word_size);
 905 
 906   // Initialize the chunk
 907   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 908   return result;
 909 }
 910 
 911 
 912 // Expand the virtual space (commit more of the reserved space)
 913 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 914   size_t min_bytes = min_words * BytesPerWord;
 915   size_t preferred_bytes = preferred_words * BytesPerWord;
 916 
 917   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 918 
 919   if (uncommitted < min_bytes) {


1214     // ensure lock-free iteration sees fully initialized node
1215     OrderAccess::storestore();
1216     link_vs(new_entry);
1217     return true;
1218   }
1219 }
1220 
1221 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1222   if (virtual_space_list() == NULL) {
1223       set_virtual_space_list(new_entry);
1224   } else {
1225     current_virtual_space()->set_next(new_entry);
1226   }
1227   set_current_virtual_space(new_entry);
1228   inc_reserved_words(new_entry->reserved_words());
1229   inc_committed_words(new_entry->committed_words());
1230   inc_virtual_space_count();
1231 #ifdef ASSERT
1232   new_entry->mangle();
1233 #endif
1234   if (TraceMetavirtualspaceAllocation && Verbose) {

1235     VirtualSpaceNode* vsl = current_virtual_space();
1236     vsl->print_on(gclog_or_tty);

1237   }
1238 }
1239 
1240 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1241                                       size_t min_words,
1242                                       size_t preferred_words) {
1243   size_t before = node->committed_words();
1244 
1245   bool result = node->expand_by(min_words, preferred_words);
1246 
1247   size_t after = node->committed_words();
1248 
1249   // after and before can be the same if the memory was pre-committed.
1250   assert(after >= before, "Inconsistency");
1251   inc_committed_words(after - before);
1252 
1253   return result;
1254 }
1255 
1256 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1313   // The expand amount is currently only determined by the requested sizes
1314   // and not how much committed memory is left in the current virtual space.
1315 
1316   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1317   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1318   if (min_word_size >= preferred_word_size) {
1319     // Can happen when humongous chunks are allocated.
1320     preferred_word_size = min_word_size;
1321   }
1322 
1323   bool expanded = expand_by(min_word_size, preferred_word_size);
1324   if (expanded) {
1325     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1326     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1327   }
1328 
1329    return next;
1330 }
1331 
1332 void VirtualSpaceList::print_on(outputStream* st) const {
1333   if (TraceMetadataChunkAllocation && Verbose) {
1334     VirtualSpaceListIterator iter(virtual_space_list());
1335     while (iter.repeat()) {
1336       VirtualSpaceNode* node = iter.get_next();
1337       node->print_on(st);
1338     }
1339   }
1340 }
1341 
1342 // MetaspaceGC methods
1343 
1344 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1345 // Within the VM operation after the GC the attempt to allocate the metadata
1346 // should succeed.  If the GC did not free enough space for the metaspace
1347 // allocation, the HWM is increased so that another virtualspace will be
1348 // allocated for the metadata.  With perm gen the increase in the perm
1349 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1350 // metaspace policy uses those as the small and large steps for the HWM.
1351 //
1352 // After the GC the compute_new_size() for MetaspaceGC is called to
1353 // resize the capacity of the metaspaces.  The current implementation
1354 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1355 // to resize the Java heap by some GC's.  New flags can be implemented
1356 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1357 // free space is desirable in the metaspace capacity to decide how much
1358 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1359 // free space is desirable in the metaspace capacity before decreasing


1480   // chunk free lists are included in committed_bytes() and the memory in an
1481   // un-fragmented chunk free list is available for future allocations.
1482   // However, if the chunk free lists becomes fragmented, then the memory may
1483   // not be available for future allocations and the memory is therefore "in use".
1484   // Including the chunk free lists in the definition of "in use" is therefore
1485   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1486   // shrink below committed_bytes() and this has caused serious bugs in the past.
1487   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1488   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1489 
1490   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1491   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1492 
1493   const double min_tmp = used_after_gc / maximum_used_percentage;
1494   size_t minimum_desired_capacity =
1495     (size_t)MIN2(min_tmp, double(max_uintx));
1496   // Don't shrink less than the initial generation size
1497   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1498                                   MetaspaceSize);
1499 
1500   if (PrintGCDetails && Verbose) {
1501     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1502     gclog_or_tty->print_cr("  "
1503                   "  minimum_free_percentage: %6.2f"
1504                   "  maximum_used_percentage: %6.2f",
1505                   minimum_free_percentage,
1506                   maximum_used_percentage);
1507     gclog_or_tty->print_cr("  "
1508                   "   used_after_gc       : %6.1fKB",
1509                   used_after_gc / (double) K);
1510   }
1511 
1512 
1513   size_t shrink_bytes = 0;
1514   if (capacity_until_GC < minimum_desired_capacity) {
1515     // If we have less capacity below the metaspace HWM, then
1516     // increment the HWM.
1517     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1518     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1519     // Don't expand unless it's significant
1520     if (expand_bytes >= MinMetaspaceExpansion) {
1521       size_t new_capacity_until_GC = 0;
1522       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1523       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1524 
1525       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1526                                                new_capacity_until_GC,
1527                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1528       if (PrintGCDetails && Verbose) {
1529         gclog_or_tty->print_cr("    expanding:"
1530                       "  minimum_desired_capacity: %6.1fKB"
1531                       "  expand_bytes: %6.1fKB"
1532                       "  MinMetaspaceExpansion: %6.1fKB"
1533                       "  new metaspace HWM:  %6.1fKB",
1534                       minimum_desired_capacity / (double) K,
1535                       expand_bytes / (double) K,
1536                       MinMetaspaceExpansion / (double) K,
1537                       new_capacity_until_GC / (double) K);
1538       }
1539     }
1540     return;
1541   }
1542 
1543   // No expansion, now see if we want to shrink
1544   // We would never want to shrink more than this
1545   assert(capacity_until_GC >= minimum_desired_capacity,
1546          SIZE_FORMAT " >= " SIZE_FORMAT,
1547          capacity_until_GC, minimum_desired_capacity);
1548   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1549 
1550   // Should shrinking be considered?
1551   if (MaxMetaspaceFreeRatio < 100) {
1552     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1553     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1554     const double max_tmp = used_after_gc / minimum_used_percentage;
1555     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1556     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1557                                     MetaspaceSize);
1558     if (PrintGCDetails && Verbose) {
1559       gclog_or_tty->print_cr("  "
1560                              "  maximum_free_percentage: %6.2f"
1561                              "  minimum_used_percentage: %6.2f",
1562                              maximum_free_percentage,
1563                              minimum_used_percentage);
1564       gclog_or_tty->print_cr("  "
1565                              "  minimum_desired_capacity: %6.1fKB"
1566                              "  maximum_desired_capacity: %6.1fKB",
1567                              minimum_desired_capacity / (double) K,
1568                              maximum_desired_capacity / (double) K);
1569     }
1570 
1571     assert(minimum_desired_capacity <= maximum_desired_capacity,
1572            "sanity check");
1573 
1574     if (capacity_until_GC > maximum_desired_capacity) {
1575       // Capacity too large, compute shrinking size
1576       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1577       // We don't want shrink all the way back to initSize if people call
1578       // System.gc(), because some programs do that between "phases" and then
1579       // we'd just have to grow the heap up again for the next phase.  So we
1580       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1581       // on the third call, and 100% by the fourth call.  But if we recompute
1582       // size without shrinking, it goes back to 0%.
1583       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1584 
1585       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1586 
1587       assert(shrink_bytes <= max_shrink_bytes,
1588              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1589              shrink_bytes, max_shrink_bytes);
1590       if (current_shrink_factor == 0) {
1591         _shrink_factor = 10;
1592       } else {
1593         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1594       }
1595       if (PrintGCDetails && Verbose) {
1596         gclog_or_tty->print_cr("  "
1597                       "  shrinking:"
1598                       "  initSize: %.1fK"
1599                       "  maximum_desired_capacity: %.1fK",
1600                       MetaspaceSize / (double) K,
1601                       maximum_desired_capacity / (double) K);
1602         gclog_or_tty->print_cr("  "
1603                       "  shrink_bytes: %.1fK"
1604                       "  current_shrink_factor: %d"
1605                       "  new shrink factor: %d"
1606                       "  MinMetaspaceExpansion: %.1fK",
1607                       shrink_bytes / (double) K,
1608                       current_shrink_factor,
1609                       _shrink_factor,
1610                       MinMetaspaceExpansion / (double) K);
1611       }
1612     }
1613   }
1614 
1615   // Don't shrink unless it's significant
1616   if (shrink_bytes >= MinMetaspaceExpansion &&
1617       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1618     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1619     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1620                                              new_capacity_until_GC,
1621                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1622   }
1623 }
1624 
1625 // Metadebug methods
1626 
1627 void Metadebug::init_allocation_fail_alot_count() {
1628   if (MetadataAllocationFailALot) {
1629     _allocation_fail_alot_count =
1630       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1631   }
1632 }
1633 
1634 #ifdef ASSERT
1635 bool Metadebug::test_metadata_failure() {
1636   if (MetadataAllocationFailALot &&
1637       Threads::is_vm_complete()) {
1638     if (_allocation_fail_alot_count > 0) {
1639       _allocation_fail_alot_count--;
1640     } else {
1641       if (TraceMetadataChunkAllocation && Verbose) {
1642         gclog_or_tty->print_cr("Metadata allocation failing for "
1643                                "MetadataAllocationFailALot");
1644       }
1645       init_allocation_fail_alot_count();
1646       return true;
1647     }
1648   }
1649   return false;
1650 }
1651 #endif
1652 
1653 // ChunkManager methods
1654 
1655 size_t ChunkManager::free_chunks_total_words() {
1656   return _free_chunks_total;
1657 }
1658 
1659 size_t ChunkManager::free_chunks_total_bytes() {
1660   return free_chunks_total_words() * BytesPerWord;
1661 }
1662 
1663 size_t ChunkManager::free_chunks_count() {
1664 #ifdef ASSERT


1769 
1770 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1771   assert_lock_strong(SpaceManager::expand_lock());
1772 
1773   slow_locked_verify();
1774 
1775   Metachunk* chunk = NULL;
1776   if (list_index(word_size) != HumongousIndex) {
1777     ChunkList* free_list = find_free_chunks_list(word_size);
1778     assert(free_list != NULL, "Sanity check");
1779 
1780     chunk = free_list->head();
1781 
1782     if (chunk == NULL) {
1783       return NULL;
1784     }
1785 
1786     // Remove the chunk as the head of the list.
1787     free_list->remove_chunk(chunk);
1788 
1789     if (TraceMetadataChunkAllocation && Verbose) {
1790       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1791                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1792                              p2i(free_list), p2i(chunk), chunk->word_size());
1793     }
1794   } else {
1795     chunk = humongous_dictionary()->get_chunk(
1796       word_size,
1797       FreeBlockDictionary<Metachunk>::atLeast);
1798 
1799     if (chunk == NULL) {
1800       return NULL;
1801     }
1802 
1803     if (TraceMetadataHumongousAllocation) {
1804       size_t waste = chunk->word_size() - word_size;
1805       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1806                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1807                              " waste " SIZE_FORMAT,
1808                              chunk->word_size(), word_size, waste);
1809     }
1810   }
1811 
1812   // Chunk is being removed from the chunks free list.
1813   dec_free_chunks_total(chunk->word_size());
1814 
1815   // Remove it from the links to this freelist
1816   chunk->set_next(NULL);
1817   chunk->set_prev(NULL);
1818 #ifdef ASSERT
1819   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1820   // work.
1821   chunk->set_is_tagged_free(false);
1822 #endif
1823   chunk->container()->inc_container_count();
1824 
1825   slow_locked_verify();
1826   return chunk;
1827 }
1828 
1829 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1830   assert_lock_strong(SpaceManager::expand_lock());
1831   slow_locked_verify();
1832 
1833   // Take from the beginning of the list
1834   Metachunk* chunk = free_chunks_get(word_size);
1835   if (chunk == NULL) {
1836     return NULL;
1837   }
1838 
1839   assert((word_size <= chunk->word_size()) ||
1840          list_index(chunk->word_size() == HumongousIndex),
1841          "Non-humongous variable sized chunk");
1842   if (TraceMetadataChunkAllocation) {

1843     size_t list_count;
1844     if (list_index(word_size) < HumongousIndex) {
1845       ChunkList* list = find_free_chunks_list(word_size);
1846       list_count = list->count();
1847     } else {
1848       list_count = humongous_dictionary()->total_count();
1849     }
1850     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1851                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1852                         p2i(this), p2i(chunk), chunk->word_size(), list_count);
1853     locked_print_free_chunks(gclog_or_tty);

1854   }
1855 
1856   return chunk;
1857 }
1858 
1859 void ChunkManager::print_on(outputStream* out) const {
1860   if (PrintFLSStatistics != 0) {
1861     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1862   }
1863 }
1864 
1865 // SpaceManager methods
1866 
1867 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1868                                            size_t* chunk_word_size,
1869                                            size_t* class_chunk_word_size) {
1870   switch (type) {
1871   case Metaspace::BootMetaspaceType:
1872     *chunk_word_size = Metaspace::first_chunk_word_size();
1873     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1874     break;
1875   case Metaspace::ROMetaspaceType:
1876     *chunk_word_size = SharedReadOnlySize / wordSize;
1877     *class_chunk_word_size = ClassSpecializedChunk;
1878     break;
1879   case Metaspace::ReadWriteMetaspaceType:
1880     *chunk_word_size = SharedReadWriteSize / wordSize;
1881     *class_chunk_word_size = ClassSpecializedChunk;
1882     break;


2022       chunk_word_size = medium_chunk_size();
2023     }
2024   } else {
2025     chunk_word_size = medium_chunk_size();
2026   }
2027 
2028   // Might still need a humongous chunk.  Enforce
2029   // humongous allocations sizes to be aligned up to
2030   // the smallest chunk size.
2031   size_t if_humongous_sized_chunk =
2032     align_size_up(word_size + Metachunk::overhead(),
2033                   smallest_chunk_size());
2034   chunk_word_size =
2035     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036 
2037   assert(!SpaceManager::is_humongous(word_size) ||
2038          chunk_word_size == if_humongous_sized_chunk,
2039          "Size calculation is wrong, word_size " SIZE_FORMAT
2040          " chunk_word_size " SIZE_FORMAT,
2041          word_size, chunk_word_size);
2042   if (TraceMetadataHumongousAllocation &&
2043       SpaceManager::is_humongous(word_size)) {
2044     gclog_or_tty->print_cr("Metadata humongous allocation:");
2045     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2046     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2047                            chunk_word_size);
2048     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2049                            Metachunk::overhead());
2050   }
2051   return chunk_word_size;
2052 }
2053 
2054 void SpaceManager::track_metaspace_memory_usage() {
2055   if (is_init_completed()) {
2056     if (is_class()) {
2057       MemoryService::track_compressed_class_memory_usage();
2058     }
2059     MemoryService::track_metaspace_memory_usage();
2060   }
2061 }
2062 
2063 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2064   assert(vs_list()->current_virtual_space() != NULL,
2065          "Should have been set");
2066   assert(current_chunk() == NULL ||
2067          current_chunk()->allocate(word_size) == NULL,
2068          "Don't need to expand");
2069   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2070 
2071   if (TraceMetadataChunkAllocation && Verbose) {
2072     size_t words_left = 0;
2073     size_t words_used = 0;
2074     if (current_chunk() != NULL) {
2075       words_left = current_chunk()->free_word_size();
2076       words_used = current_chunk()->used_word_size();
2077     }
2078     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2079                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2080                            " words left",
2081                             word_size, words_used, words_left);
2082   }
2083 
2084   // Get another chunk
2085   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2086   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2087 
2088   MetaWord* mem = NULL;
2089 
2090   // If a chunk was available, add it to the in-use chunk list
2091   // and do an allocation from it.
2092   if (next != NULL) {
2093     // Add to this manager's list of chunks in use.
2094     add_chunk(next, false);
2095     mem = next->allocate(word_size);
2096   }
2097 
2098   // Track metaspace memory usage statistic.
2099   track_metaspace_memory_usage();
2100 


2152 void SpaceManager::inc_used_metrics(size_t words) {
2153   // Add to the per SpaceManager total
2154   Atomic::add_ptr(words, &_allocated_blocks_words);
2155   // Add to the global total
2156   MetaspaceAux::inc_used(mdtype(), words);
2157 }
2158 
2159 void SpaceManager::dec_total_from_size_metrics() {
2160   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2161   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2162   // Also deduct the overhead per Metachunk
2163   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2164 }
2165 
2166 void SpaceManager::initialize() {
2167   Metadebug::init_allocation_fail_alot_count();
2168   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2169     _chunks_in_use[i] = NULL;
2170   }
2171   _current_chunk = NULL;
2172   if (TraceMetadataChunkAllocation && Verbose) {
2173     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this));
2174   }
2175 }
2176 
2177 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2178   if (chunks == NULL) {
2179     return;
2180   }
2181   ChunkList* list = free_chunks(index);
2182   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2183   assert_lock_strong(SpaceManager::expand_lock());
2184   Metachunk* cur = chunks;
2185 
2186   // This returns chunks one at a time.  If a new
2187   // class List can be created that is a base class
2188   // of FreeList then something like FreeList::prepend()
2189   // can be used in place of this loop
2190   while (cur != NULL) {
2191     assert(cur->container() != NULL, "Container should have been set");
2192     cur->container()->dec_container_count();
2193     // Capture the next link before it is changed
2194     // by the call to return_chunk_at_head();


2196     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197     list->return_chunk_at_head(cur);
2198     cur = next;
2199   }
2200 }
2201 
2202 SpaceManager::~SpaceManager() {
2203   // This call this->_lock which can't be done while holding expand_lock()
2204   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206          " allocated_chunks_words() " SIZE_FORMAT,
2207          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2208 
2209   MutexLockerEx fcl(SpaceManager::expand_lock(),
2210                     Mutex::_no_safepoint_check_flag);
2211 
2212   chunk_manager()->slow_locked_verify();
2213 
2214   dec_total_from_size_metrics();
2215 
2216   if (TraceMetadataChunkAllocation && Verbose) {
2217     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218     locked_print_chunks_in_use_on(gclog_or_tty);


2219   }
2220 
2221   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2222   // is during the freeing of a VirtualSpaceNodes.
2223 
2224   // Have to update before the chunks_in_use lists are emptied
2225   // below.
2226   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227                                          sum_count_in_chunks_in_use());
2228 
2229   // Add all the chunks in use by this space manager
2230   // to the global list of free chunks.
2231 
2232   // Follow each list of chunks-in-use and add them to the
2233   // free lists.  Each list is NULL terminated.
2234 
2235   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2236     if (TraceMetadataChunkAllocation && Verbose) {
2237       gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist",
2238                              sum_count_in_chunks_in_use(i),
2239                              chunk_size_name(i));
2240     }
2241     Metachunk* chunks = chunks_in_use(i);
2242     chunk_manager()->return_chunks(i, chunks);
2243     set_chunks_in_use(i, NULL);
2244     if (TraceMetadataChunkAllocation && Verbose) {
2245       gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s",
2246                              chunk_manager()->free_chunks(i)->count(),
2247                              chunk_size_name(i));
2248     }
2249     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2250   }
2251 
2252   // The medium chunk case may be optimized by passing the head and
2253   // tail of the medium chunk list to add_at_head().  The tail is often
2254   // the current chunk but there are probably exceptions.
2255 
2256   // Humongous chunks
2257   if (TraceMetadataChunkAllocation && Verbose) {
2258     gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259                             sum_count_in_chunks_in_use(HumongousIndex),
2260                             chunk_size_name(HumongousIndex));
2261     gclog_or_tty->print("Humongous chunk dictionary: ");
2262   }
2263   // Humongous chunks are never the current chunk.
2264   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265 
2266   while (humongous_chunks != NULL) {
2267 #ifdef ASSERT
2268     humongous_chunks->set_is_tagged_free(true);
2269 #endif
2270     if (TraceMetadataChunkAllocation && Verbose) {
2271       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272                           p2i(humongous_chunks),
2273                           humongous_chunks->word_size());
2274     }
2275     assert(humongous_chunks->word_size() == (size_t)
2276            align_size_up(humongous_chunks->word_size(),
2277                              smallest_chunk_size()),
2278            "Humongous chunk size is wrong: word size " SIZE_FORMAT
2279            " granularity " SIZE_FORMAT,
2280            humongous_chunks->word_size(), smallest_chunk_size());
2281     Metachunk* next_humongous_chunks = humongous_chunks->next();
2282     humongous_chunks->container()->dec_container_count();
2283     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284     humongous_chunks = next_humongous_chunks;
2285   }
2286   if (TraceMetadataChunkAllocation && Verbose) {
2287     gclog_or_tty->cr();
2288     gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289                      chunk_manager()->humongous_dictionary()->total_count(),
2290                      chunk_size_name(HumongousIndex));
2291   }
2292   chunk_manager()->slow_locked_verify();
2293 }
2294 
2295 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296   switch (index) {
2297     case SpecializedIndex:
2298       return "Specialized";
2299     case SmallIndex:
2300       return "Small";
2301     case MediumIndex:
2302       return "Medium";
2303     case HumongousIndex:
2304       return "Humongous";
2305     default:
2306       return NULL;
2307   }
2308 }
2309 
2310 ChunkIndex ChunkManager::list_index(size_t size) {
2311   switch (size) {


2357     // small, so small will be null.  Link this first chunk as the current
2358     // chunk.
2359     if (make_current) {
2360       // Set as the current chunk but otherwise treat as a humongous chunk.
2361       set_current_chunk(new_chunk);
2362     }
2363     // Link at head.  The _current_chunk only points to a humongous chunk for
2364     // the null class loader metaspace (class and data virtual space managers)
2365     // any humongous chunks so will not point to the tail
2366     // of the humongous chunks list.
2367     new_chunk->set_next(chunks_in_use(HumongousIndex));
2368     set_chunks_in_use(HumongousIndex, new_chunk);
2369 
2370     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2371   }
2372 
2373   // Add to the running sum of capacity
2374   inc_size_metrics(new_chunk->word_size());
2375 
2376   assert(new_chunk->is_empty(), "Not ready for reuse");
2377   if (TraceMetadataChunkAllocation && Verbose) {
2378     gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ",
2379                         sum_count_in_chunks_in_use());
2380     new_chunk->print_on(gclog_or_tty);
2381     chunk_manager()->locked_print_free_chunks(gclog_or_tty);


2382   }
2383 }
2384 
2385 void SpaceManager::retire_current_chunk() {
2386   if (current_chunk() != NULL) {
2387     size_t remaining_words = current_chunk()->free_word_size();
2388     if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2389       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2390       inc_used_metrics(remaining_words);
2391     }
2392   }
2393 }
2394 
2395 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2396                                        size_t grow_chunks_by_words) {
2397   // Get a chunk from the chunk freelist
2398   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2399 
2400   if (next == NULL) {
2401     next = vs_list()->get_new_chunk(word_size,
2402                                     grow_chunks_by_words,
2403                                     medium_chunk_bunch());
2404   }
2405 
2406   if (TraceMetadataHumongousAllocation && next != NULL &&

2407       SpaceManager::is_humongous(next->word_size())) {
2408     gclog_or_tty->print_cr("  new humongous chunk word size "
2409                            PTR_FORMAT, next->word_size());
2410   }
2411 
2412   return next;
2413 }
2414 
2415 /*
2416  * The policy is to allocate up to _small_chunk_limit small chunks
2417  * after which only medium chunks are allocated.  This is done to
2418  * reduce fragmentation.  In some cases, this can result in a lot
2419  * of small chunks being allocated to the point where it's not
2420  * possible to expand.  If this happens, there may be no medium chunks
2421  * available and OOME would be thrown.  Instead of doing that,
2422  * if the allocation request size fits in a small chunk, an attempt
2423  * will be made to allocate a small chunk.
2424  */
2425 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2426   size_t raw_word_size = get_raw_word_size(word_size);
2427 
2428   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2429     return NULL;


2554   uint i = 0;
2555   size_t used = 0;
2556   size_t capacity = 0;
2557 
2558   // Add up statistics for all chunks in this SpaceManager.
2559   for (ChunkIndex index = ZeroIndex;
2560        index < NumberOfInUseLists;
2561        index = next_chunk_index(index)) {
2562     for (Metachunk* curr = chunks_in_use(index);
2563          curr != NULL;
2564          curr = curr->next()) {
2565       out->print("%d) ", i++);
2566       curr->print_on(out);
2567       curr_total += curr->word_size();
2568       used += curr->used_word_size();
2569       capacity += curr->word_size();
2570       waste += curr->free_word_size() + curr->overhead();;
2571     }
2572   }
2573 
2574   if (TraceMetadataChunkAllocation && Verbose) {
2575     block_freelists()->print_on(out);
2576   }
2577 
2578   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2579   // Free space isn't wasted.
2580   waste -= free;
2581 
2582   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2583                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2584                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2585 }
2586 
2587 #ifndef PRODUCT
2588 void SpaceManager::mangle_freed_chunks() {
2589   for (ChunkIndex index = ZeroIndex;
2590        index < NumberOfInUseLists;
2591        index = next_chunk_index(index)) {
2592     for (Metachunk* curr = chunks_in_use(index);
2593          curr != NULL;
2594          curr = curr->next()) {


2739 }
2740 
2741 size_t MetaspaceAux::free_chunks_total_bytes() {
2742   return free_chunks_total_words() * BytesPerWord;
2743 }
2744 
2745 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2746   return Metaspace::get_chunk_manager(mdtype) != NULL;
2747 }
2748 
2749 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2750   if (!has_chunk_free_list(mdtype)) {
2751     return MetaspaceChunkFreeListSummary();
2752   }
2753 
2754   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2755   return cm->chunk_free_list_summary();
2756 }
2757 
2758 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2759   gclog_or_tty->print(", [Metaspace:");
2760   if (PrintGCDetails && Verbose) {
2761     gclog_or_tty->print(" "  SIZE_FORMAT
2762                         "->" SIZE_FORMAT
2763                         "("  SIZE_FORMAT ")",
2764                         prev_metadata_used,
2765                         used_bytes(),
2766                         reserved_bytes());
2767   } else {
2768     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2769                         "->" SIZE_FORMAT "K"
2770                         "("  SIZE_FORMAT "K)",
2771                         prev_metadata_used/K,
2772                         used_bytes()/K,
2773                         reserved_bytes()/K);
2774   }
2775 
2776   gclog_or_tty->print("]");
2777 }
2778 
2779 // This is printed when PrintGCDetails
2780 void MetaspaceAux::print_on(outputStream* out) {
2781   Metaspace::MetadataType nct = Metaspace::NonClassType;
2782 
2783   out->print_cr(" Metaspace       "
2784                 "used "      SIZE_FORMAT "K, "
2785                 "capacity "  SIZE_FORMAT "K, "
2786                 "committed " SIZE_FORMAT "K, "
2787                 "reserved "  SIZE_FORMAT "K",
2788                 used_bytes()/K,
2789                 capacity_bytes()/K,
2790                 committed_bytes()/K,
2791                 reserved_bytes()/K);
2792 
2793   if (Metaspace::using_class_space()) {
2794     Metaspace::MetadataType ct = Metaspace::ClassType;
2795     out->print_cr("  class space    "
2796                   "used "      SIZE_FORMAT "K, "


3116                                               compressed_class_space_size()));
3117       }
3118     }
3119   }
3120 
3121   // If we got here then the metaspace got allocated.
3122   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3123 
3124 #if INCLUDE_CDS
3125   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3126   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3127     FileMapInfo::stop_sharing_and_unmap(
3128         "Could not allocate metaspace at a compatible address");
3129   }
3130 #endif
3131   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3132                                   UseSharedSpaces ? (address)cds_base : 0);
3133 
3134   initialize_class_space(metaspace_rs);
3135 
3136   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3137     print_compressed_class_space(gclog_or_tty, requested_addr);


3138   }
3139 }
3140 
3141 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3142   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3143                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3144   if (_class_space_list != NULL) {
3145     address base = (address)_class_space_list->current_virtual_space()->bottom();
3146     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3147                  compressed_class_space_size(), p2i(base));
3148     if (requested_addr != 0) {
3149       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3150     }
3151     st->cr();
3152   }
3153 }
3154 
3155 // For UseCompressedClassPointers the class space is reserved above the top of
3156 // the Java heap.  The argument passed in is at the base of the compressed space.
3157 void Metaspace::initialize_class_space(ReservedSpace rs) {


3269     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3270 
3271     if (!_space_list->initialization_succeeded()) {
3272       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3273     }
3274 
3275 #ifdef _LP64
3276     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3277       vm_exit_during_initialization("Unable to dump shared archive.",
3278           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3279                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3280                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3281                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3282     }
3283 
3284     // Set the compressed klass pointer base so that decoding of these pointers works
3285     // properly when creating the shared archive.
3286     assert(UseCompressedOops && UseCompressedClassPointers,
3287       "UseCompressedOops and UseCompressedClassPointers must be set");
3288     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3289     if (TraceMetavirtualspaceAllocation && Verbose) {
3290       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3291                              p2i(_space_list->current_virtual_space()->bottom()));
3292     }
3293 
3294     Universe::set_narrow_klass_shift(0);
3295 #endif // _LP64
3296 #endif // INCLUDE_CDS
3297   } else {
3298     // If using shared space, open the file that contains the shared space
3299     // and map in the memory before initializing the rest of metaspace (so
3300     // the addresses don't conflict)
3301     address cds_address = NULL;
3302     if (UseSharedSpaces) {
3303 #if INCLUDE_CDS
3304       FileMapInfo* mapinfo = new FileMapInfo();
3305 
3306       // Open the shared archive file, read and validate the header. If
3307       // initialization fails, shared spaces [UseSharedSpaces] are
3308       // disabled and the file is closed.
3309       // Map in spaces now also
3310       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3311         cds_total = FileMapInfo::shared_spaces_size();
3312         cds_address = (address)mapinfo->header()->region_addr(0);


3459 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3460   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3461   assert(delta_bytes > 0, "Must be");
3462 
3463   size_t before = 0;
3464   size_t after = 0;
3465   MetaWord* res;
3466   bool incremented;
3467 
3468   // Each thread increments the HWM at most once. Even if the thread fails to increment
3469   // the HWM, an allocation is still attempted. This is because another thread must then
3470   // have incremented the HWM and therefore the allocation might still succeed.
3471   do {
3472     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3473     res = allocate(word_size, mdtype);
3474   } while (!incremented && res == NULL);
3475 
3476   if (incremented) {
3477     tracer()->report_gc_threshold(before, after,
3478                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3479     if (PrintGCDetails && Verbose) {
3480       gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3481           " to " SIZE_FORMAT, before, after);
3482     }
3483   }
3484 
3485   return res;
3486 }
3487 
3488 // Space allocated in the Metaspace.  This may
3489 // be across several metadata virtual spaces.
3490 char* Metaspace::bottom() const {
3491   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3492   return (char*)vsm()->current_chunk()->bottom();
3493 }
3494 
3495 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3496   if (mdtype == ClassType) {
3497     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3498   } else {
3499     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3500   }
3501 }
3502 


3625     if (result == NULL) {
3626       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3627     }
3628   }
3629 
3630   // Zero initialize.
3631   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3632 
3633   return result;
3634 }
3635 
3636 size_t Metaspace::class_chunk_size(size_t word_size) {
3637   assert(using_class_space(), "Has to use class space");
3638   return class_vsm()->calc_chunk_size(word_size);
3639 }
3640 
3641 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3642   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3643 
3644   // If result is still null, we are out of memory.
3645   if (Verbose && TraceMetadataChunkAllocation) {
3646     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3647         SIZE_FORMAT, word_size);


3648     if (loader_data->metaspace_or_null() != NULL) {
3649       loader_data->dump(gclog_or_tty);
3650     }
3651     MetaspaceAux::dump(gclog_or_tty);
3652   }
3653 
3654   bool out_of_compressed_class_space = false;
3655   if (is_class_space_allocation(mdtype)) {
3656     Metaspace* metaspace = loader_data->metaspace_non_null();
3657     out_of_compressed_class_space =
3658       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3659       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3660       CompressedClassSpaceSize;
3661   }
3662 
3663   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3664   const char* space_string = out_of_compressed_class_space ?
3665     "Compressed class space" : "Metaspace";
3666 
3667   report_java_out_of_memory(space_string);
3668 
3669   if (JvmtiExport::should_post_resource_exhausted()) {
3670     JvmtiExport::post_resource_exhausted(
3671         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc/shared/collectedHeap.hpp"
  26 #include "gc/shared/collectorPolicy.hpp"
  27 #include "gc/shared/gcLocker.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "memory/binaryTreeDictionary.hpp"
  31 #include "memory/filemap.hpp"
  32 #include "memory/freeList.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceGCThresholdUpdater.hpp"
  36 #include "memory/metaspaceShared.hpp"
  37 #include "memory/metaspaceTracer.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "runtime/atomic.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/mutex.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "services/memTracker.hpp"
  47 #include "services/memoryService.hpp"
  48 #include "utilities/copy.hpp"


 795 }
 796 
 797 void VirtualSpaceNode::dec_container_count() {
 798   assert_lock_strong(SpaceManager::expand_lock());
 799   _container_count--;
 800 }
 801 
 802 #ifdef ASSERT
 803 void VirtualSpaceNode::verify_container_count() {
 804   assert(_container_count == container_count_slow(),
 805          "Inconsistency in container_count _container_count " UINTX_FORMAT
 806          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 807 }
 808 #endif
 809 
 810 // BlockFreelist methods
 811 
 812 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
 813 
 814 BlockFreelist::~BlockFreelist() {
 815   LogHandle(gc, metaspace, freelist) log;
 816   if (log.is_trace()) {
 817     ResourceMark rm;
 818     dictionary()->print_free_lists(log.trace_stream());
 819   }
 820   delete _dictionary;
 821 }
 822 
 823 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 824   Metablock* free_chunk = ::new (p) Metablock(word_size);
 825   dictionary()->return_chunk(free_chunk);
 826 }
 827 
 828 MetaWord* BlockFreelist::get_block(size_t word_size) {
 829   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 830     // Dark matter.  Too small for dictionary.
 831     return NULL;
 832   }
 833 
 834   Metablock* free_block =
 835     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 836   if (free_block == NULL) {
 837     return NULL;
 838   }


 878 
 879 size_t VirtualSpaceNode::free_words_in_vs() const {
 880   return pointer_delta(end(), top(), sizeof(MetaWord));
 881 }
 882 
 883 // Allocates the chunk from the virtual space only.
 884 // This interface is also used internally for debugging.  Not all
 885 // chunks removed here are necessarily used for allocation.
 886 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 887   // Bottom of the new chunk
 888   MetaWord* chunk_limit = top();
 889   assert(chunk_limit != NULL, "Not safe to call this method");
 890 
 891   // The virtual spaces are always expanded by the
 892   // commit granularity to enforce the following condition.
 893   // Without this the is_available check will not work correctly.
 894   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 895       "The committed memory doesn't match the expanded memory.");
 896 
 897   if (!is_available(chunk_word_size)) {
 898     LogHandle(gc, metaspace, freelist) log;
 899     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 900     // Dump some information about the virtual space that is nearly full
 901     print_on(log.debug_stream());

 902     return NULL;
 903   }
 904 
 905   // Take the space  (bump top on the current virtual space).
 906   inc_top(chunk_word_size);
 907 
 908   // Initialize the chunk
 909   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 910   return result;
 911 }
 912 
 913 
 914 // Expand the virtual space (commit more of the reserved space)
 915 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 916   size_t min_bytes = min_words * BytesPerWord;
 917   size_t preferred_bytes = preferred_words * BytesPerWord;
 918 
 919   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 920 
 921   if (uncommitted < min_bytes) {


1216     // ensure lock-free iteration sees fully initialized node
1217     OrderAccess::storestore();
1218     link_vs(new_entry);
1219     return true;
1220   }
1221 }
1222 
1223 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1224   if (virtual_space_list() == NULL) {
1225       set_virtual_space_list(new_entry);
1226   } else {
1227     current_virtual_space()->set_next(new_entry);
1228   }
1229   set_current_virtual_space(new_entry);
1230   inc_reserved_words(new_entry->reserved_words());
1231   inc_committed_words(new_entry->committed_words());
1232   inc_virtual_space_count();
1233 #ifdef ASSERT
1234   new_entry->mangle();
1235 #endif
1236   LogHandle(gc, metaspace) log;
1237   if (log.is_develop()) {
1238     VirtualSpaceNode* vsl = current_virtual_space();
1239     ResourceMark rm;
1240     vsl->print_on(log.develop_stream());
1241   }
1242 }
1243 
1244 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1245                                       size_t min_words,
1246                                       size_t preferred_words) {
1247   size_t before = node->committed_words();
1248 
1249   bool result = node->expand_by(min_words, preferred_words);
1250 
1251   size_t after = node->committed_words();
1252 
1253   // after and before can be the same if the memory was pre-committed.
1254   assert(after >= before, "Inconsistency");
1255   inc_committed_words(after - before);
1256 
1257   return result;
1258 }
1259 
1260 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1317   // The expand amount is currently only determined by the requested sizes
1318   // and not how much committed memory is left in the current virtual space.
1319 
1320   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1321   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1322   if (min_word_size >= preferred_word_size) {
1323     // Can happen when humongous chunks are allocated.
1324     preferred_word_size = min_word_size;
1325   }
1326 
1327   bool expanded = expand_by(min_word_size, preferred_word_size);
1328   if (expanded) {
1329     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1330     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1331   }
1332 
1333    return next;
1334 }
1335 
1336 void VirtualSpaceList::print_on(outputStream* st) const {

1337   VirtualSpaceListIterator iter(virtual_space_list());
1338   while (iter.repeat()) {
1339     VirtualSpaceNode* node = iter.get_next();
1340     node->print_on(st);
1341   }

1342 }
1343 
1344 // MetaspaceGC methods
1345 
1346 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1347 // Within the VM operation after the GC the attempt to allocate the metadata
1348 // should succeed.  If the GC did not free enough space for the metaspace
1349 // allocation, the HWM is increased so that another virtualspace will be
1350 // allocated for the metadata.  With perm gen the increase in the perm
1351 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1352 // metaspace policy uses those as the small and large steps for the HWM.
1353 //
1354 // After the GC the compute_new_size() for MetaspaceGC is called to
1355 // resize the capacity of the metaspaces.  The current implementation
1356 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1357 // to resize the Java heap by some GC's.  New flags can be implemented
1358 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1359 // free space is desirable in the metaspace capacity to decide how much
1360 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1361 // free space is desirable in the metaspace capacity before decreasing


1482   // chunk free lists are included in committed_bytes() and the memory in an
1483   // un-fragmented chunk free list is available for future allocations.
1484   // However, if the chunk free lists becomes fragmented, then the memory may
1485   // not be available for future allocations and the memory is therefore "in use".
1486   // Including the chunk free lists in the definition of "in use" is therefore
1487   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1488   // shrink below committed_bytes() and this has caused serious bugs in the past.
1489   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1490   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1491 
1492   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1493   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1494 
1495   const double min_tmp = used_after_gc / maximum_used_percentage;
1496   size_t minimum_desired_capacity =
1497     (size_t)MIN2(min_tmp, double(max_uintx));
1498   // Don't shrink less than the initial generation size
1499   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1500                                   MetaspaceSize);
1501 
1502   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1503   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1504                 minimum_free_percentage, maximum_used_percentage);
1505   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);







1506 
1507 
1508   size_t shrink_bytes = 0;
1509   if (capacity_until_GC < minimum_desired_capacity) {
1510     // If we have less capacity below the metaspace HWM, then
1511     // increment the HWM.
1512     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1513     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1514     // Don't expand unless it's significant
1515     if (expand_bytes >= MinMetaspaceExpansion) {
1516       size_t new_capacity_until_GC = 0;
1517       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1518       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1519 
1520       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1521                                                new_capacity_until_GC,
1522                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1523       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",





1524                                minimum_desired_capacity / (double) K,
1525                                expand_bytes / (double) K,
1526                                MinMetaspaceExpansion / (double) K,
1527                                new_capacity_until_GC / (double) K);
1528     }

1529     return;
1530   }
1531 
1532   // No expansion, now see if we want to shrink
1533   // We would never want to shrink more than this
1534   assert(capacity_until_GC >= minimum_desired_capacity,
1535          SIZE_FORMAT " >= " SIZE_FORMAT,
1536          capacity_until_GC, minimum_desired_capacity);
1537   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1538 
1539   // Should shrinking be considered?
1540   if (MaxMetaspaceFreeRatio < 100) {
1541     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1542     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1543     const double max_tmp = used_after_gc / minimum_used_percentage;
1544     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1545     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1546                                     MetaspaceSize);
1547     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1548                              maximum_free_percentage, minimum_used_percentage);
1549     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1550                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);








1551 
1552     assert(minimum_desired_capacity <= maximum_desired_capacity,
1553            "sanity check");
1554 
1555     if (capacity_until_GC > maximum_desired_capacity) {
1556       // Capacity too large, compute shrinking size
1557       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1558       // We don't want shrink all the way back to initSize if people call
1559       // System.gc(), because some programs do that between "phases" and then
1560       // we'd just have to grow the heap up again for the next phase.  So we
1561       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1562       // on the third call, and 100% by the fourth call.  But if we recompute
1563       // size without shrinking, it goes back to 0%.
1564       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1565 
1566       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1567 
1568       assert(shrink_bytes <= max_shrink_bytes,
1569              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1570              shrink_bytes, max_shrink_bytes);
1571       if (current_shrink_factor == 0) {
1572         _shrink_factor = 10;
1573       } else {
1574         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1575       }
1576       log_trace(gc, metaspace)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
1577                     MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1578       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1579                     shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);













1580     }
1581   }
1582 
1583   // Don't shrink unless it's significant
1584   if (shrink_bytes >= MinMetaspaceExpansion &&
1585       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1586     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1587     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1588                                              new_capacity_until_GC,
1589                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1590   }
1591 }
1592 
1593 // Metadebug methods
1594 
1595 void Metadebug::init_allocation_fail_alot_count() {
1596   if (MetadataAllocationFailALot) {
1597     _allocation_fail_alot_count =
1598       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1599   }
1600 }
1601 
1602 #ifdef ASSERT
1603 bool Metadebug::test_metadata_failure() {
1604   if (MetadataAllocationFailALot &&
1605       Threads::is_vm_complete()) {
1606     if (_allocation_fail_alot_count > 0) {
1607       _allocation_fail_alot_count--;
1608     } else {
1609       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");



1610       init_allocation_fail_alot_count();
1611       return true;
1612     }
1613   }
1614   return false;
1615 }
1616 #endif
1617 
1618 // ChunkManager methods
1619 
1620 size_t ChunkManager::free_chunks_total_words() {
1621   return _free_chunks_total;
1622 }
1623 
1624 size_t ChunkManager::free_chunks_total_bytes() {
1625   return free_chunks_total_words() * BytesPerWord;
1626 }
1627 
1628 size_t ChunkManager::free_chunks_count() {
1629 #ifdef ASSERT


1734 
1735 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1736   assert_lock_strong(SpaceManager::expand_lock());
1737 
1738   slow_locked_verify();
1739 
1740   Metachunk* chunk = NULL;
1741   if (list_index(word_size) != HumongousIndex) {
1742     ChunkList* free_list = find_free_chunks_list(word_size);
1743     assert(free_list != NULL, "Sanity check");
1744 
1745     chunk = free_list->head();
1746 
1747     if (chunk == NULL) {
1748       return NULL;
1749     }
1750 
1751     // Remove the chunk as the head of the list.
1752     free_list->remove_chunk(chunk);
1753 
1754     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,


1755                                        p2i(free_list), p2i(chunk), chunk->word_size());

1756   } else {
1757     chunk = humongous_dictionary()->get_chunk(
1758       word_size,
1759       FreeBlockDictionary<Metachunk>::atLeast);
1760 
1761     if (chunk == NULL) {
1762       return NULL;
1763     }
1764 
1765     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1766                                     chunk->word_size(), word_size, chunk->word_size() - word_size);





1767   }
1768 
1769   // Chunk is being removed from the chunks free list.
1770   dec_free_chunks_total(chunk->word_size());
1771 
1772   // Remove it from the links to this freelist
1773   chunk->set_next(NULL);
1774   chunk->set_prev(NULL);
1775 #ifdef ASSERT
1776   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1777   // work.
1778   chunk->set_is_tagged_free(false);
1779 #endif
1780   chunk->container()->inc_container_count();
1781 
1782   slow_locked_verify();
1783   return chunk;
1784 }
1785 
1786 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1787   assert_lock_strong(SpaceManager::expand_lock());
1788   slow_locked_verify();
1789 
1790   // Take from the beginning of the list
1791   Metachunk* chunk = free_chunks_get(word_size);
1792   if (chunk == NULL) {
1793     return NULL;
1794   }
1795 
1796   assert((word_size <= chunk->word_size()) ||
1797          list_index(chunk->word_size() == HumongousIndex),
1798          "Non-humongous variable sized chunk");
1799   LogHandle(gc, metaspace, freelist) log;
1800   if (log.is_debug()) {
1801     size_t list_count;
1802     if (list_index(word_size) < HumongousIndex) {
1803       ChunkList* list = find_free_chunks_list(word_size);
1804       list_count = list->count();
1805     } else {
1806       list_count = humongous_dictionary()->total_count();
1807     }
1808     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",

1809                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1810     ResourceMark rm;
1811     locked_print_free_chunks(log.debug_stream());
1812   }
1813 
1814   return chunk;
1815 }
1816 
1817 void ChunkManager::print_on(outputStream* out) const {
1818   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);


1819 }
1820 
1821 // SpaceManager methods
1822 
1823 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1824                                            size_t* chunk_word_size,
1825                                            size_t* class_chunk_word_size) {
1826   switch (type) {
1827   case Metaspace::BootMetaspaceType:
1828     *chunk_word_size = Metaspace::first_chunk_word_size();
1829     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1830     break;
1831   case Metaspace::ROMetaspaceType:
1832     *chunk_word_size = SharedReadOnlySize / wordSize;
1833     *class_chunk_word_size = ClassSpecializedChunk;
1834     break;
1835   case Metaspace::ReadWriteMetaspaceType:
1836     *chunk_word_size = SharedReadWriteSize / wordSize;
1837     *class_chunk_word_size = ClassSpecializedChunk;
1838     break;


1978       chunk_word_size = medium_chunk_size();
1979     }
1980   } else {
1981     chunk_word_size = medium_chunk_size();
1982   }
1983 
1984   // Might still need a humongous chunk.  Enforce
1985   // humongous allocations sizes to be aligned up to
1986   // the smallest chunk size.
1987   size_t if_humongous_sized_chunk =
1988     align_size_up(word_size + Metachunk::overhead(),
1989                   smallest_chunk_size());
1990   chunk_word_size =
1991     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1992 
1993   assert(!SpaceManager::is_humongous(word_size) ||
1994          chunk_word_size == if_humongous_sized_chunk,
1995          "Size calculation is wrong, word_size " SIZE_FORMAT
1996          " chunk_word_size " SIZE_FORMAT,
1997          word_size, chunk_word_size);
1998   LogHandle(gc, metaspace, alloc) log;
1999   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2000     log.debug("Metadata humongous allocation:");
2001     log.debug("  word_size " PTR_FORMAT, word_size);
2002     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2003     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());


2004   }
2005   return chunk_word_size;
2006 }
2007 
2008 void SpaceManager::track_metaspace_memory_usage() {
2009   if (is_init_completed()) {
2010     if (is_class()) {
2011       MemoryService::track_compressed_class_memory_usage();
2012     }
2013     MemoryService::track_metaspace_memory_usage();
2014   }
2015 }
2016 
2017 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2018   assert(vs_list()->current_virtual_space() != NULL,
2019          "Should have been set");
2020   assert(current_chunk() == NULL ||
2021          current_chunk()->allocate(word_size) == NULL,
2022          "Don't need to expand");
2023   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2024 
2025   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2026     size_t words_left = 0;
2027     size_t words_used = 0;
2028     if (current_chunk() != NULL) {
2029       words_left = current_chunk()->free_word_size();
2030       words_used = current_chunk()->used_word_size();
2031     }
2032     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",


2033                                        word_size, words_used, words_left);
2034   }
2035 
2036   // Get another chunk
2037   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2038   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2039 
2040   MetaWord* mem = NULL;
2041 
2042   // If a chunk was available, add it to the in-use chunk list
2043   // and do an allocation from it.
2044   if (next != NULL) {
2045     // Add to this manager's list of chunks in use.
2046     add_chunk(next, false);
2047     mem = next->allocate(word_size);
2048   }
2049 
2050   // Track metaspace memory usage statistic.
2051   track_metaspace_memory_usage();
2052 


2104 void SpaceManager::inc_used_metrics(size_t words) {
2105   // Add to the per SpaceManager total
2106   Atomic::add_ptr(words, &_allocated_blocks_words);
2107   // Add to the global total
2108   MetaspaceAux::inc_used(mdtype(), words);
2109 }
2110 
2111 void SpaceManager::dec_total_from_size_metrics() {
2112   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2113   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2114   // Also deduct the overhead per Metachunk
2115   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2116 }
2117 
2118 void SpaceManager::initialize() {
2119   Metadebug::init_allocation_fail_alot_count();
2120   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2121     _chunks_in_use[i] = NULL;
2122   }
2123   _current_chunk = NULL;
2124   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));


2125 }
2126 
2127 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2128   if (chunks == NULL) {
2129     return;
2130   }
2131   ChunkList* list = free_chunks(index);
2132   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2133   assert_lock_strong(SpaceManager::expand_lock());
2134   Metachunk* cur = chunks;
2135 
2136   // This returns chunks one at a time.  If a new
2137   // class List can be created that is a base class
2138   // of FreeList then something like FreeList::prepend()
2139   // can be used in place of this loop
2140   while (cur != NULL) {
2141     assert(cur->container() != NULL, "Container should have been set");
2142     cur->container()->dec_container_count();
2143     // Capture the next link before it is changed
2144     // by the call to return_chunk_at_head();


2146     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2147     list->return_chunk_at_head(cur);
2148     cur = next;
2149   }
2150 }
2151 
2152 SpaceManager::~SpaceManager() {
2153   // This call this->_lock which can't be done while holding expand_lock()
2154   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2155          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2156          " allocated_chunks_words() " SIZE_FORMAT,
2157          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2158 
2159   MutexLockerEx fcl(SpaceManager::expand_lock(),
2160                     Mutex::_no_safepoint_check_flag);
2161 
2162   chunk_manager()->slow_locked_verify();
2163 
2164   dec_total_from_size_metrics();
2165 
2166   LogHandle(gc, metaspace, freelist) log;
2167   if (log.is_trace()) {
2168     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2169     ResourceMark rm;
2170     locked_print_chunks_in_use_on(log.trace_stream());
2171   }
2172 
2173   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2174   // is during the freeing of a VirtualSpaceNodes.
2175 
2176   // Have to update before the chunks_in_use lists are emptied
2177   // below.
2178   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2179                                          sum_count_in_chunks_in_use());
2180 
2181   // Add all the chunks in use by this space manager
2182   // to the global list of free chunks.
2183 
2184   // Follow each list of chunks-in-use and add them to the
2185   // free lists.  Each list is NULL terminated.
2186 
2187   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2188     log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i));




2189     Metachunk* chunks = chunks_in_use(i);
2190     chunk_manager()->return_chunks(i, chunks);
2191     set_chunks_in_use(i, NULL);
2192     log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i));




2193     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2194   }
2195 
2196   // The medium chunk case may be optimized by passing the head and
2197   // tail of the medium chunk list to add_at_head().  The tail is often
2198   // the current chunk but there are probably exceptions.
2199 
2200   // Humongous chunks
2201   log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2202             sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex));
2203   log.trace("Humongous chunk dictionary: ");



2204   // Humongous chunks are never the current chunk.
2205   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2206 
2207   while (humongous_chunks != NULL) {
2208 #ifdef ASSERT
2209     humongous_chunks->set_is_tagged_free(true);
2210 #endif
2211     log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size());




2212     assert(humongous_chunks->word_size() == (size_t)
2213            align_size_up(humongous_chunks->word_size(),
2214                              smallest_chunk_size()),
2215            "Humongous chunk size is wrong: word size " SIZE_FORMAT
2216            " granularity " SIZE_FORMAT,
2217            humongous_chunks->word_size(), smallest_chunk_size());
2218     Metachunk* next_humongous_chunks = humongous_chunks->next();
2219     humongous_chunks->container()->dec_container_count();
2220     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2221     humongous_chunks = next_humongous_chunks;
2222   }
2223   log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex));





2224   chunk_manager()->slow_locked_verify();
2225 }
2226 
2227 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2228   switch (index) {
2229     case SpecializedIndex:
2230       return "Specialized";
2231     case SmallIndex:
2232       return "Small";
2233     case MediumIndex:
2234       return "Medium";
2235     case HumongousIndex:
2236       return "Humongous";
2237     default:
2238       return NULL;
2239   }
2240 }
2241 
2242 ChunkIndex ChunkManager::list_index(size_t size) {
2243   switch (size) {


2289     // small, so small will be null.  Link this first chunk as the current
2290     // chunk.
2291     if (make_current) {
2292       // Set as the current chunk but otherwise treat as a humongous chunk.
2293       set_current_chunk(new_chunk);
2294     }
2295     // Link at head.  The _current_chunk only points to a humongous chunk for
2296     // the null class loader metaspace (class and data virtual space managers)
2297     // any humongous chunks so will not point to the tail
2298     // of the humongous chunks list.
2299     new_chunk->set_next(chunks_in_use(HumongousIndex));
2300     set_chunks_in_use(HumongousIndex, new_chunk);
2301 
2302     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2303   }
2304 
2305   // Add to the running sum of capacity
2306   inc_size_metrics(new_chunk->word_size());
2307 
2308   assert(new_chunk->is_empty(), "Not ready for reuse");
2309   LogHandle(gc, metaspace, freelist) log;
2310   if (log.is_trace()) {
2311     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2312     ResourceMark rm;
2313     outputStream* out = log.trace_stream();
2314     new_chunk->print_on(out);
2315     chunk_manager()->locked_print_free_chunks(out);
2316   }
2317 }
2318 
2319 void SpaceManager::retire_current_chunk() {
2320   if (current_chunk() != NULL) {
2321     size_t remaining_words = current_chunk()->free_word_size();
2322     if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2323       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2324       inc_used_metrics(remaining_words);
2325     }
2326   }
2327 }
2328 
2329 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2330                                        size_t grow_chunks_by_words) {
2331   // Get a chunk from the chunk freelist
2332   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2333 
2334   if (next == NULL) {
2335     next = vs_list()->get_new_chunk(word_size,
2336                                     grow_chunks_by_words,
2337                                     medium_chunk_bunch());
2338   }
2339 
2340   LogHandle(gc, metaspace, alloc) log;
2341   if (log.is_debug() && next != NULL &&
2342       SpaceManager::is_humongous(next->word_size())) {
2343     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());

2344   }
2345 
2346   return next;
2347 }
2348 
2349 /*
2350  * The policy is to allocate up to _small_chunk_limit small chunks
2351  * after which only medium chunks are allocated.  This is done to
2352  * reduce fragmentation.  In some cases, this can result in a lot
2353  * of small chunks being allocated to the point where it's not
2354  * possible to expand.  If this happens, there may be no medium chunks
2355  * available and OOME would be thrown.  Instead of doing that,
2356  * if the allocation request size fits in a small chunk, an attempt
2357  * will be made to allocate a small chunk.
2358  */
2359 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2360   size_t raw_word_size = get_raw_word_size(word_size);
2361 
2362   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2363     return NULL;


2488   uint i = 0;
2489   size_t used = 0;
2490   size_t capacity = 0;
2491 
2492   // Add up statistics for all chunks in this SpaceManager.
2493   for (ChunkIndex index = ZeroIndex;
2494        index < NumberOfInUseLists;
2495        index = next_chunk_index(index)) {
2496     for (Metachunk* curr = chunks_in_use(index);
2497          curr != NULL;
2498          curr = curr->next()) {
2499       out->print("%d) ", i++);
2500       curr->print_on(out);
2501       curr_total += curr->word_size();
2502       used += curr->used_word_size();
2503       capacity += curr->word_size();
2504       waste += curr->free_word_size() + curr->overhead();;
2505     }
2506   }
2507 
2508   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2509     block_freelists()->print_on(out);
2510   }
2511 
2512   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2513   // Free space isn't wasted.
2514   waste -= free;
2515 
2516   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2517                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2518                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2519 }
2520 
2521 #ifndef PRODUCT
2522 void SpaceManager::mangle_freed_chunks() {
2523   for (ChunkIndex index = ZeroIndex;
2524        index < NumberOfInUseLists;
2525        index = next_chunk_index(index)) {
2526     for (Metachunk* curr = chunks_in_use(index);
2527          curr != NULL;
2528          curr = curr->next()) {


2673 }
2674 
2675 size_t MetaspaceAux::free_chunks_total_bytes() {
2676   return free_chunks_total_words() * BytesPerWord;
2677 }
2678 
2679 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2680   return Metaspace::get_chunk_manager(mdtype) != NULL;
2681 }
2682 
2683 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2684   if (!has_chunk_free_list(mdtype)) {
2685     return MetaspaceChunkFreeListSummary();
2686   }
2687 
2688   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2689   return cm->chunk_free_list_summary();
2690 }
2691 
2692 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2693   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2694       prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
















2695 }
2696 
2697 // This is printed when PrintGCDetails
2698 void MetaspaceAux::print_on(outputStream* out) {
2699   Metaspace::MetadataType nct = Metaspace::NonClassType;
2700 
2701   out->print_cr(" Metaspace       "
2702                 "used "      SIZE_FORMAT "K, "
2703                 "capacity "  SIZE_FORMAT "K, "
2704                 "committed " SIZE_FORMAT "K, "
2705                 "reserved "  SIZE_FORMAT "K",
2706                 used_bytes()/K,
2707                 capacity_bytes()/K,
2708                 committed_bytes()/K,
2709                 reserved_bytes()/K);
2710 
2711   if (Metaspace::using_class_space()) {
2712     Metaspace::MetadataType ct = Metaspace::ClassType;
2713     out->print_cr("  class space    "
2714                   "used "      SIZE_FORMAT "K, "


3034                                               compressed_class_space_size()));
3035       }
3036     }
3037   }
3038 
3039   // If we got here then the metaspace got allocated.
3040   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3041 
3042 #if INCLUDE_CDS
3043   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3044   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3045     FileMapInfo::stop_sharing_and_unmap(
3046         "Could not allocate metaspace at a compatible address");
3047   }
3048 #endif
3049   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3050                                   UseSharedSpaces ? (address)cds_base : 0);
3051 
3052   initialize_class_space(metaspace_rs);
3053 
3054   LogHandle(gc, metaspace, init) log;
3055   if (log.is_develop()) {
3056     ResourceMark rm;
3057     print_compressed_class_space(log.develop_stream(), requested_addr);
3058   }
3059 }
3060 
3061 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3062   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3063                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3064   if (_class_space_list != NULL) {
3065     address base = (address)_class_space_list->current_virtual_space()->bottom();
3066     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3067                  compressed_class_space_size(), p2i(base));
3068     if (requested_addr != 0) {
3069       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3070     }
3071     st->cr();
3072   }
3073 }
3074 
3075 // For UseCompressedClassPointers the class space is reserved above the top of
3076 // the Java heap.  The argument passed in is at the base of the compressed space.
3077 void Metaspace::initialize_class_space(ReservedSpace rs) {


3189     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3190 
3191     if (!_space_list->initialization_succeeded()) {
3192       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3193     }
3194 
3195 #ifdef _LP64
3196     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3197       vm_exit_during_initialization("Unable to dump shared archive.",
3198           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3199                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3200                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3201                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3202     }
3203 
3204     // Set the compressed klass pointer base so that decoding of these pointers works
3205     // properly when creating the shared archive.
3206     assert(UseCompressedOops && UseCompressedClassPointers,
3207       "UseCompressedOops and UseCompressedClassPointers must be set");
3208     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3209     log_develop(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,

3210                                p2i(_space_list->current_virtual_space()->bottom()));

3211 
3212     Universe::set_narrow_klass_shift(0);
3213 #endif // _LP64
3214 #endif // INCLUDE_CDS
3215   } else {
3216     // If using shared space, open the file that contains the shared space
3217     // and map in the memory before initializing the rest of metaspace (so
3218     // the addresses don't conflict)
3219     address cds_address = NULL;
3220     if (UseSharedSpaces) {
3221 #if INCLUDE_CDS
3222       FileMapInfo* mapinfo = new FileMapInfo();
3223 
3224       // Open the shared archive file, read and validate the header. If
3225       // initialization fails, shared spaces [UseSharedSpaces] are
3226       // disabled and the file is closed.
3227       // Map in spaces now also
3228       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3229         cds_total = FileMapInfo::shared_spaces_size();
3230         cds_address = (address)mapinfo->header()->region_addr(0);


3377 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3378   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3379   assert(delta_bytes > 0, "Must be");
3380 
3381   size_t before = 0;
3382   size_t after = 0;
3383   MetaWord* res;
3384   bool incremented;
3385 
3386   // Each thread increments the HWM at most once. Even if the thread fails to increment
3387   // the HWM, an allocation is still attempted. This is because another thread must then
3388   // have incremented the HWM and therefore the allocation might still succeed.
3389   do {
3390     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3391     res = allocate(word_size, mdtype);
3392   } while (!incremented && res == NULL);
3393 
3394   if (incremented) {
3395     tracer()->report_gc_threshold(before, after,
3396                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3397     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);



3398   }
3399 
3400   return res;
3401 }
3402 
3403 // Space allocated in the Metaspace.  This may
3404 // be across several metadata virtual spaces.
3405 char* Metaspace::bottom() const {
3406   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3407   return (char*)vsm()->current_chunk()->bottom();
3408 }
3409 
3410 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3411   if (mdtype == ClassType) {
3412     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3413   } else {
3414     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3415   }
3416 }
3417 


3540     if (result == NULL) {
3541       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3542     }
3543   }
3544 
3545   // Zero initialize.
3546   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3547 
3548   return result;
3549 }
3550 
3551 size_t Metaspace::class_chunk_size(size_t word_size) {
3552   assert(using_class_space(), "Has to use class space");
3553   return class_vsm()->calc_chunk_size(word_size);
3554 }
3555 
3556 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3557   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3558 
3559   // If result is still null, we are out of memory.
3560   LogHandle(gc, metaspace, freelist) log;
3561   if (log.is_trace()) {
3562     log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size);
3563     ResourceMark rm;
3564     outputStream* out = log.trace_stream();
3565     if (loader_data->metaspace_or_null() != NULL) {
3566       loader_data->dump(out);
3567     }
3568     MetaspaceAux::dump(out);
3569   }
3570 
3571   bool out_of_compressed_class_space = false;
3572   if (is_class_space_allocation(mdtype)) {
3573     Metaspace* metaspace = loader_data->metaspace_non_null();
3574     out_of_compressed_class_space =
3575       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3576       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3577       CompressedClassSpaceSize;
3578   }
3579 
3580   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3581   const char* space_string = out_of_compressed_class_space ?
3582     "Compressed class space" : "Metaspace";
3583 
3584   report_java_out_of_memory(space_string);
3585 
3586   if (JvmtiExport::should_post_resource_exhausted()) {
3587     JvmtiExport::post_resource_exhausted(
3588         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


< prev index next >