< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc/shared/collectedHeap.hpp"
  26 #include "gc/shared/collectorPolicy.hpp"
  27 #include "gc/shared/gcLocker.hpp"

  28 #include "memory/allocation.hpp"
  29 #include "memory/binaryTreeDictionary.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/freeList.hpp"
  32 #include "memory/metachunk.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceGCThresholdUpdater.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/metaspaceTracer.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "runtime/atomic.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/mutex.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "services/memTracker.hpp"
  46 #include "services/memoryService.hpp"
  47 #include "utilities/copy.hpp"


 794 }
 795 
 796 void VirtualSpaceNode::dec_container_count() {
 797   assert_lock_strong(SpaceManager::expand_lock());
 798   _container_count--;
 799 }
 800 
 801 #ifdef ASSERT
 802 void VirtualSpaceNode::verify_container_count() {
 803   assert(_container_count == container_count_slow(),
 804          "Inconsistency in container_count _container_count " UINTX_FORMAT
 805          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 806 }
 807 #endif
 808 
 809 // BlockFreelist methods
 810 
 811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
 812 
 813 BlockFreelist::~BlockFreelist() {
 814   if (Verbose && TraceMetadataChunkAllocation) {
 815     dictionary()->print_free_lists(gclog_or_tty);


 816   }
 817   delete _dictionary;
 818 }
 819 
 820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 821   Metablock* free_chunk = ::new (p) Metablock(word_size);
 822   dictionary()->return_chunk(free_chunk);
 823 }
 824 
 825 MetaWord* BlockFreelist::get_block(size_t word_size) {
 826   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 827     // Dark matter.  Too small for dictionary.
 828     return NULL;
 829   }
 830 
 831   Metablock* free_block =
 832     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 833   if (free_block == NULL) {
 834     return NULL;
 835   }


 875 
 876 size_t VirtualSpaceNode::free_words_in_vs() const {
 877   return pointer_delta(end(), top(), sizeof(MetaWord));
 878 }
 879 
 880 // Allocates the chunk from the virtual space only.
 881 // This interface is also used internally for debugging.  Not all
 882 // chunks removed here are necessarily used for allocation.
 883 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 884   // Bottom of the new chunk
 885   MetaWord* chunk_limit = top();
 886   assert(chunk_limit != NULL, "Not safe to call this method");
 887 
 888   // The virtual spaces are always expanded by the
 889   // commit granularity to enforce the following condition.
 890   // Without this the is_available check will not work correctly.
 891   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 892       "The committed memory doesn't match the expanded memory.");
 893 
 894   if (!is_available(chunk_word_size)) {
 895     if (TraceMetadataChunkAllocation) {
 896       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 897       // Dump some information about the virtual space that is nearly full
 898       print_on(gclog_or_tty);
 899     }
 900     return NULL;
 901   }
 902 
 903   // Take the space  (bump top on the current virtual space).
 904   inc_top(chunk_word_size);
 905 
 906   // Initialize the chunk
 907   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 908   return result;
 909 }
 910 
 911 
 912 // Expand the virtual space (commit more of the reserved space)
 913 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 914   size_t min_bytes = min_words * BytesPerWord;
 915   size_t preferred_bytes = preferred_words * BytesPerWord;
 916 
 917   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 918 
 919   if (uncommitted < min_bytes) {


1214     // ensure lock-free iteration sees fully initialized node
1215     OrderAccess::storestore();
1216     link_vs(new_entry);
1217     return true;
1218   }
1219 }
1220 
1221 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1222   if (virtual_space_list() == NULL) {
1223       set_virtual_space_list(new_entry);
1224   } else {
1225     current_virtual_space()->set_next(new_entry);
1226   }
1227   set_current_virtual_space(new_entry);
1228   inc_reserved_words(new_entry->reserved_words());
1229   inc_committed_words(new_entry->committed_words());
1230   inc_virtual_space_count();
1231 #ifdef ASSERT
1232   new_entry->mangle();
1233 #endif
1234   if (TraceMetavirtualspaceAllocation && Verbose) {

1235     VirtualSpaceNode* vsl = current_virtual_space();
1236     vsl->print_on(gclog_or_tty);

1237   }
1238 }
1239 
1240 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1241                                       size_t min_words,
1242                                       size_t preferred_words) {
1243   size_t before = node->committed_words();
1244 
1245   bool result = node->expand_by(min_words, preferred_words);
1246 
1247   size_t after = node->committed_words();
1248 
1249   // after and before can be the same if the memory was pre-committed.
1250   assert(after >= before, "Inconsistency");
1251   inc_committed_words(after - before);
1252 
1253   return result;
1254 }
1255 
1256 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1313   // The expand amount is currently only determined by the requested sizes
1314   // and not how much committed memory is left in the current virtual space.
1315 
1316   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1317   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1318   if (min_word_size >= preferred_word_size) {
1319     // Can happen when humongous chunks are allocated.
1320     preferred_word_size = min_word_size;
1321   }
1322 
1323   bool expanded = expand_by(min_word_size, preferred_word_size);
1324   if (expanded) {
1325     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1326     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1327   }
1328 
1329    return next;
1330 }
1331 
1332 void VirtualSpaceList::print_on(outputStream* st) const {
1333   if (TraceMetadataChunkAllocation && Verbose) {
1334     VirtualSpaceListIterator iter(virtual_space_list());
1335     while (iter.repeat()) {
1336       VirtualSpaceNode* node = iter.get_next();
1337       node->print_on(st);
1338     }
1339   }
1340 }
1341 
1342 // MetaspaceGC methods
1343 
1344 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1345 // Within the VM operation after the GC the attempt to allocate the metadata
1346 // should succeed.  If the GC did not free enough space for the metaspace
1347 // allocation, the HWM is increased so that another virtualspace will be
1348 // allocated for the metadata.  With perm gen the increase in the perm
1349 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1350 // metaspace policy uses those as the small and large steps for the HWM.
1351 //
1352 // After the GC the compute_new_size() for MetaspaceGC is called to
1353 // resize the capacity of the metaspaces.  The current implementation
1354 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1355 // to resize the Java heap by some GC's.  New flags can be implemented
1356 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1357 // free space is desirable in the metaspace capacity to decide how much
1358 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1359 // free space is desirable in the metaspace capacity before decreasing


1480   // chunk free lists are included in committed_bytes() and the memory in an
1481   // un-fragmented chunk free list is available for future allocations.
1482   // However, if the chunk free lists becomes fragmented, then the memory may
1483   // not be available for future allocations and the memory is therefore "in use".
1484   // Including the chunk free lists in the definition of "in use" is therefore
1485   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1486   // shrink below committed_bytes() and this has caused serious bugs in the past.
1487   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1488   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1489 
1490   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1491   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1492 
1493   const double min_tmp = used_after_gc / maximum_used_percentage;
1494   size_t minimum_desired_capacity =
1495     (size_t)MIN2(min_tmp, double(max_uintx));
1496   // Don't shrink less than the initial generation size
1497   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1498                                   MetaspaceSize);
1499 
1500   if (PrintGCDetails && Verbose) {
1501     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1502     gclog_or_tty->print_cr("  "
1503                   "  minimum_free_percentage: %6.2f"
1504                   "  maximum_used_percentage: %6.2f",
1505                   minimum_free_percentage,
1506                   maximum_used_percentage);
1507     gclog_or_tty->print_cr("  "
1508                   "   used_after_gc       : %6.1fKB",
1509                   used_after_gc / (double) K);
1510   }
1511 
1512 
1513   size_t shrink_bytes = 0;
1514   if (capacity_until_GC < minimum_desired_capacity) {
1515     // If we have less capacity below the metaspace HWM, then
1516     // increment the HWM.
1517     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1518     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1519     // Don't expand unless it's significant
1520     if (expand_bytes >= MinMetaspaceExpansion) {
1521       size_t new_capacity_until_GC = 0;
1522       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1523       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1524 
1525       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1526                                                new_capacity_until_GC,
1527                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1528       if (PrintGCDetails && Verbose) {
1529         gclog_or_tty->print_cr("    expanding:"
1530                       "  minimum_desired_capacity: %6.1fKB"
1531                       "  expand_bytes: %6.1fKB"
1532                       "  MinMetaspaceExpansion: %6.1fKB"
1533                       "  new metaspace HWM:  %6.1fKB",
1534                       minimum_desired_capacity / (double) K,
1535                       expand_bytes / (double) K,
1536                       MinMetaspaceExpansion / (double) K,
1537                       new_capacity_until_GC / (double) K);
1538       }
1539     }
1540     return;
1541   }
1542 
1543   // No expansion, now see if we want to shrink
1544   // We would never want to shrink more than this
1545   assert(capacity_until_GC >= minimum_desired_capacity,
1546          SIZE_FORMAT " >= " SIZE_FORMAT,
1547          capacity_until_GC, minimum_desired_capacity);
1548   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1549 
1550   // Should shrinking be considered?
1551   if (MaxMetaspaceFreeRatio < 100) {
1552     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1553     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1554     const double max_tmp = used_after_gc / minimum_used_percentage;
1555     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1556     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1557                                     MetaspaceSize);
1558     if (PrintGCDetails && Verbose) {
1559       gclog_or_tty->print_cr("  "
1560                              "  maximum_free_percentage: %6.2f"
1561                              "  minimum_used_percentage: %6.2f",
1562                              maximum_free_percentage,
1563                              minimum_used_percentage);
1564       gclog_or_tty->print_cr("  "
1565                              "  minimum_desired_capacity: %6.1fKB"
1566                              "  maximum_desired_capacity: %6.1fKB",
1567                              minimum_desired_capacity / (double) K,
1568                              maximum_desired_capacity / (double) K);
1569     }
1570 
1571     assert(minimum_desired_capacity <= maximum_desired_capacity,
1572            "sanity check");
1573 
1574     if (capacity_until_GC > maximum_desired_capacity) {
1575       // Capacity too large, compute shrinking size
1576       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1577       // We don't want shrink all the way back to initSize if people call
1578       // System.gc(), because some programs do that between "phases" and then
1579       // we'd just have to grow the heap up again for the next phase.  So we
1580       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1581       // on the third call, and 100% by the fourth call.  But if we recompute
1582       // size without shrinking, it goes back to 0%.
1583       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1584 
1585       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1586 
1587       assert(shrink_bytes <= max_shrink_bytes,
1588              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1589              shrink_bytes, max_shrink_bytes);
1590       if (current_shrink_factor == 0) {
1591         _shrink_factor = 10;
1592       } else {
1593         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1594       }
1595       if (PrintGCDetails && Verbose) {
1596         gclog_or_tty->print_cr("  "
1597                       "  shrinking:"
1598                       "  initSize: %.1fK"
1599                       "  maximum_desired_capacity: %.1fK",
1600                       MetaspaceSize / (double) K,
1601                       maximum_desired_capacity / (double) K);
1602         gclog_or_tty->print_cr("  "
1603                       "  shrink_bytes: %.1fK"
1604                       "  current_shrink_factor: %d"
1605                       "  new shrink factor: %d"
1606                       "  MinMetaspaceExpansion: %.1fK",
1607                       shrink_bytes / (double) K,
1608                       current_shrink_factor,
1609                       _shrink_factor,
1610                       MinMetaspaceExpansion / (double) K);
1611       }
1612     }
1613   }
1614 
1615   // Don't shrink unless it's significant
1616   if (shrink_bytes >= MinMetaspaceExpansion &&
1617       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1618     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1619     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1620                                              new_capacity_until_GC,
1621                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1622   }
1623 }
1624 
1625 // Metadebug methods
1626 
1627 void Metadebug::init_allocation_fail_alot_count() {
1628   if (MetadataAllocationFailALot) {
1629     _allocation_fail_alot_count =
1630       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1631   }
1632 }
1633 
1634 #ifdef ASSERT
1635 bool Metadebug::test_metadata_failure() {
1636   if (MetadataAllocationFailALot &&
1637       Threads::is_vm_complete()) {
1638     if (_allocation_fail_alot_count > 0) {
1639       _allocation_fail_alot_count--;
1640     } else {
1641       if (TraceMetadataChunkAllocation && Verbose) {
1642         gclog_or_tty->print_cr("Metadata allocation failing for "
1643                                "MetadataAllocationFailALot");
1644       }
1645       init_allocation_fail_alot_count();
1646       return true;
1647     }
1648   }
1649   return false;
1650 }
1651 #endif
1652 
1653 // ChunkManager methods
1654 
1655 size_t ChunkManager::free_chunks_total_words() {
1656   return _free_chunks_total;
1657 }
1658 
1659 size_t ChunkManager::free_chunks_total_bytes() {
1660   return free_chunks_total_words() * BytesPerWord;
1661 }
1662 
1663 size_t ChunkManager::free_chunks_count() {
1664 #ifdef ASSERT


1769 
1770 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1771   assert_lock_strong(SpaceManager::expand_lock());
1772 
1773   slow_locked_verify();
1774 
1775   Metachunk* chunk = NULL;
1776   if (list_index(word_size) != HumongousIndex) {
1777     ChunkList* free_list = find_free_chunks_list(word_size);
1778     assert(free_list != NULL, "Sanity check");
1779 
1780     chunk = free_list->head();
1781 
1782     if (chunk == NULL) {
1783       return NULL;
1784     }
1785 
1786     // Remove the chunk as the head of the list.
1787     free_list->remove_chunk(chunk);
1788 
1789     if (TraceMetadataChunkAllocation && Verbose) {
1790       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1791                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1792                              p2i(free_list), p2i(chunk), chunk->word_size());
1793     }
1794   } else {
1795     chunk = humongous_dictionary()->get_chunk(
1796       word_size,
1797       FreeBlockDictionary<Metachunk>::atLeast);
1798 
1799     if (chunk == NULL) {
1800       return NULL;
1801     }
1802 
1803     if (TraceMetadataHumongousAllocation) {
1804       size_t waste = chunk->word_size() - word_size;
1805       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1806                              SIZE_FORMAT " for requested size " SIZE_FORMAT
1807                              " waste " SIZE_FORMAT,
1808                              chunk->word_size(), word_size, waste);
1809     }
1810   }
1811 
1812   // Chunk is being removed from the chunks free list.
1813   dec_free_chunks_total(chunk->word_size());
1814 
1815   // Remove it from the links to this freelist
1816   chunk->set_next(NULL);
1817   chunk->set_prev(NULL);
1818 #ifdef ASSERT
1819   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1820   // work.
1821   chunk->set_is_tagged_free(false);
1822 #endif
1823   chunk->container()->inc_container_count();
1824 
1825   slow_locked_verify();
1826   return chunk;
1827 }
1828 
1829 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1830   assert_lock_strong(SpaceManager::expand_lock());
1831   slow_locked_verify();
1832 
1833   // Take from the beginning of the list
1834   Metachunk* chunk = free_chunks_get(word_size);
1835   if (chunk == NULL) {
1836     return NULL;
1837   }
1838 
1839   assert((word_size <= chunk->word_size()) ||
1840          list_index(chunk->word_size() == HumongousIndex),
1841          "Non-humongous variable sized chunk");
1842   if (TraceMetadataChunkAllocation) {

1843     size_t list_count;
1844     if (list_index(word_size) < HumongousIndex) {
1845       ChunkList* list = find_free_chunks_list(word_size);
1846       list_count = list->count();
1847     } else {
1848       list_count = humongous_dictionary()->total_count();
1849     }
1850     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1851                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1852                         p2i(this), p2i(chunk), chunk->word_size(), list_count);
1853     locked_print_free_chunks(gclog_or_tty);

1854   }
1855 
1856   return chunk;
1857 }
1858 
1859 void ChunkManager::print_on(outputStream* out) const {
1860   if (PrintFLSStatistics != 0) {
1861     const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1862   }
1863 }
1864 
1865 // SpaceManager methods
1866 
1867 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1868                                            size_t* chunk_word_size,
1869                                            size_t* class_chunk_word_size) {
1870   switch (type) {
1871   case Metaspace::BootMetaspaceType:
1872     *chunk_word_size = Metaspace::first_chunk_word_size();
1873     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1874     break;
1875   case Metaspace::ROMetaspaceType:
1876     *chunk_word_size = SharedReadOnlySize / wordSize;
1877     *class_chunk_word_size = ClassSpecializedChunk;
1878     break;
1879   case Metaspace::ReadWriteMetaspaceType:
1880     *chunk_word_size = SharedReadWriteSize / wordSize;
1881     *class_chunk_word_size = ClassSpecializedChunk;
1882     break;


2022       chunk_word_size = medium_chunk_size();
2023     }
2024   } else {
2025     chunk_word_size = medium_chunk_size();
2026   }
2027 
2028   // Might still need a humongous chunk.  Enforce
2029   // humongous allocations sizes to be aligned up to
2030   // the smallest chunk size.
2031   size_t if_humongous_sized_chunk =
2032     align_size_up(word_size + Metachunk::overhead(),
2033                   smallest_chunk_size());
2034   chunk_word_size =
2035     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036 
2037   assert(!SpaceManager::is_humongous(word_size) ||
2038          chunk_word_size == if_humongous_sized_chunk,
2039          "Size calculation is wrong, word_size " SIZE_FORMAT
2040          " chunk_word_size " SIZE_FORMAT,
2041          word_size, chunk_word_size);
2042   if (TraceMetadataHumongousAllocation &&
2043       SpaceManager::is_humongous(word_size)) {
2044     gclog_or_tty->print_cr("Metadata humongous allocation:");
2045     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
2046     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
2047                            chunk_word_size);
2048     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2049                            Metachunk::overhead());
2050   }
2051   return chunk_word_size;
2052 }
2053 
2054 void SpaceManager::track_metaspace_memory_usage() {
2055   if (is_init_completed()) {
2056     if (is_class()) {
2057       MemoryService::track_compressed_class_memory_usage();
2058     }
2059     MemoryService::track_metaspace_memory_usage();
2060   }
2061 }
2062 
2063 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2064   assert(vs_list()->current_virtual_space() != NULL,
2065          "Should have been set");
2066   assert(current_chunk() == NULL ||
2067          current_chunk()->allocate(word_size) == NULL,
2068          "Don't need to expand");
2069   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2070 
2071   if (TraceMetadataChunkAllocation && Verbose) {
2072     size_t words_left = 0;
2073     size_t words_used = 0;
2074     if (current_chunk() != NULL) {
2075       words_left = current_chunk()->free_word_size();
2076       words_used = current_chunk()->used_word_size();
2077     }
2078     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2079                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
2080                            " words left",
2081                             word_size, words_used, words_left);
2082   }
2083 
2084   // Get another chunk
2085   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2086   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2087 
2088   MetaWord* mem = NULL;
2089 
2090   // If a chunk was available, add it to the in-use chunk list
2091   // and do an allocation from it.
2092   if (next != NULL) {
2093     // Add to this manager's list of chunks in use.
2094     add_chunk(next, false);
2095     mem = next->allocate(word_size);
2096   }
2097 
2098   // Track metaspace memory usage statistic.
2099   track_metaspace_memory_usage();
2100 


2152 void SpaceManager::inc_used_metrics(size_t words) {
2153   // Add to the per SpaceManager total
2154   Atomic::add_ptr(words, &_allocated_blocks_words);
2155   // Add to the global total
2156   MetaspaceAux::inc_used(mdtype(), words);
2157 }
2158 
2159 void SpaceManager::dec_total_from_size_metrics() {
2160   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2161   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2162   // Also deduct the overhead per Metachunk
2163   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2164 }
2165 
2166 void SpaceManager::initialize() {
2167   Metadebug::init_allocation_fail_alot_count();
2168   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2169     _chunks_in_use[i] = NULL;
2170   }
2171   _current_chunk = NULL;
2172   if (TraceMetadataChunkAllocation && Verbose) {
2173     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this));
2174   }
2175 }
2176 
2177 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2178   if (chunks == NULL) {
2179     return;
2180   }
2181   ChunkList* list = free_chunks(index);
2182   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2183   assert_lock_strong(SpaceManager::expand_lock());
2184   Metachunk* cur = chunks;
2185 
2186   // This returns chunks one at a time.  If a new
2187   // class List can be created that is a base class
2188   // of FreeList then something like FreeList::prepend()
2189   // can be used in place of this loop
2190   while (cur != NULL) {
2191     assert(cur->container() != NULL, "Container should have been set");
2192     cur->container()->dec_container_count();
2193     // Capture the next link before it is changed
2194     // by the call to return_chunk_at_head();


2196     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197     list->return_chunk_at_head(cur);
2198     cur = next;
2199   }
2200 }
2201 
2202 SpaceManager::~SpaceManager() {
2203   // This call this->_lock which can't be done while holding expand_lock()
2204   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206          " allocated_chunks_words() " SIZE_FORMAT,
2207          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2208 
2209   MutexLockerEx fcl(SpaceManager::expand_lock(),
2210                     Mutex::_no_safepoint_check_flag);
2211 
2212   chunk_manager()->slow_locked_verify();
2213 
2214   dec_total_from_size_metrics();
2215 
2216   if (TraceMetadataChunkAllocation && Verbose) {
2217     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218     locked_print_chunks_in_use_on(gclog_or_tty);


2219   }
2220 
2221   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2222   // is during the freeing of a VirtualSpaceNodes.
2223 
2224   // Have to update before the chunks_in_use lists are emptied
2225   // below.
2226   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227                                          sum_count_in_chunks_in_use());
2228 
2229   // Add all the chunks in use by this space manager
2230   // to the global list of free chunks.
2231 
2232   // Follow each list of chunks-in-use and add them to the
2233   // free lists.  Each list is NULL terminated.
2234 
2235   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2236     if (TraceMetadataChunkAllocation && Verbose) {
2237       gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist",
2238                              sum_count_in_chunks_in_use(i),
2239                              chunk_size_name(i));
2240     }
2241     Metachunk* chunks = chunks_in_use(i);
2242     chunk_manager()->return_chunks(i, chunks);
2243     set_chunks_in_use(i, NULL);
2244     if (TraceMetadataChunkAllocation && Verbose) {
2245       gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s",
2246                              chunk_manager()->free_chunks(i)->count(),
2247                              chunk_size_name(i));
2248     }
2249     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2250   }
2251 
2252   // The medium chunk case may be optimized by passing the head and
2253   // tail of the medium chunk list to add_at_head().  The tail is often
2254   // the current chunk but there are probably exceptions.
2255 
2256   // Humongous chunks
2257   if (TraceMetadataChunkAllocation && Verbose) {
2258     gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259                             sum_count_in_chunks_in_use(HumongousIndex),
2260                             chunk_size_name(HumongousIndex));
2261     gclog_or_tty->print("Humongous chunk dictionary: ");
2262   }
2263   // Humongous chunks are never the current chunk.
2264   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265 
2266   while (humongous_chunks != NULL) {
2267 #ifdef ASSERT
2268     humongous_chunks->set_is_tagged_free(true);
2269 #endif
2270     if (TraceMetadataChunkAllocation && Verbose) {
2271       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272                           p2i(humongous_chunks),
2273                           humongous_chunks->word_size());
2274     }
2275     assert(humongous_chunks->word_size() == (size_t)
2276            align_size_up(humongous_chunks->word_size(),
2277                              smallest_chunk_size()),
2278            "Humongous chunk size is wrong: word size " SIZE_FORMAT
2279            " granularity " SIZE_FORMAT,
2280            humongous_chunks->word_size(), smallest_chunk_size());
2281     Metachunk* next_humongous_chunks = humongous_chunks->next();
2282     humongous_chunks->container()->dec_container_count();
2283     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284     humongous_chunks = next_humongous_chunks;
2285   }
2286   if (TraceMetadataChunkAllocation && Verbose) {
2287     gclog_or_tty->cr();
2288     gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289                      chunk_manager()->humongous_dictionary()->total_count(),
2290                      chunk_size_name(HumongousIndex));
2291   }
2292   chunk_manager()->slow_locked_verify();
2293 }
2294 
2295 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296   switch (index) {
2297     case SpecializedIndex:
2298       return "Specialized";
2299     case SmallIndex:
2300       return "Small";
2301     case MediumIndex:
2302       return "Medium";
2303     case HumongousIndex:
2304       return "Humongous";
2305     default:
2306       return NULL;
2307   }
2308 }
2309 
2310 ChunkIndex ChunkManager::list_index(size_t size) {
2311   switch (size) {


2357     // small, so small will be null.  Link this first chunk as the current
2358     // chunk.
2359     if (make_current) {
2360       // Set as the current chunk but otherwise treat as a humongous chunk.
2361       set_current_chunk(new_chunk);
2362     }
2363     // Link at head.  The _current_chunk only points to a humongous chunk for
2364     // the null class loader metaspace (class and data virtual space managers)
2365     // any humongous chunks so will not point to the tail
2366     // of the humongous chunks list.
2367     new_chunk->set_next(chunks_in_use(HumongousIndex));
2368     set_chunks_in_use(HumongousIndex, new_chunk);
2369 
2370     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2371   }
2372 
2373   // Add to the running sum of capacity
2374   inc_size_metrics(new_chunk->word_size());
2375 
2376   assert(new_chunk->is_empty(), "Not ready for reuse");
2377   if (TraceMetadataChunkAllocation && Verbose) {
2378     gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ",
2379                         sum_count_in_chunks_in_use());
2380     new_chunk->print_on(gclog_or_tty);
2381     chunk_manager()->locked_print_free_chunks(gclog_or_tty);


2382   }
2383 }
2384 
2385 void SpaceManager::retire_current_chunk() {
2386   if (current_chunk() != NULL) {
2387     size_t remaining_words = current_chunk()->free_word_size();
2388     if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2389       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2390       inc_used_metrics(remaining_words);
2391     }
2392   }
2393 }
2394 
2395 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2396                                        size_t grow_chunks_by_words) {
2397   // Get a chunk from the chunk freelist
2398   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2399 
2400   if (next == NULL) {
2401     next = vs_list()->get_new_chunk(word_size,
2402                                     grow_chunks_by_words,
2403                                     medium_chunk_bunch());
2404   }
2405 
2406   if (TraceMetadataHumongousAllocation && next != NULL &&

2407       SpaceManager::is_humongous(next->word_size())) {
2408     gclog_or_tty->print_cr("  new humongous chunk word size "
2409                            PTR_FORMAT, next->word_size());
2410   }
2411 
2412   return next;
2413 }
2414 
2415 /*
2416  * The policy is to allocate up to _small_chunk_limit small chunks
2417  * after which only medium chunks are allocated.  This is done to
2418  * reduce fragmentation.  In some cases, this can result in a lot
2419  * of small chunks being allocated to the point where it's not
2420  * possible to expand.  If this happens, there may be no medium chunks
2421  * available and OOME would be thrown.  Instead of doing that,
2422  * if the allocation request size fits in a small chunk, an attempt
2423  * will be made to allocate a small chunk.
2424  */
2425 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2426   size_t raw_word_size = get_raw_word_size(word_size);
2427 
2428   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2429     return NULL;


2554   uint i = 0;
2555   size_t used = 0;
2556   size_t capacity = 0;
2557 
2558   // Add up statistics for all chunks in this SpaceManager.
2559   for (ChunkIndex index = ZeroIndex;
2560        index < NumberOfInUseLists;
2561        index = next_chunk_index(index)) {
2562     for (Metachunk* curr = chunks_in_use(index);
2563          curr != NULL;
2564          curr = curr->next()) {
2565       out->print("%d) ", i++);
2566       curr->print_on(out);
2567       curr_total += curr->word_size();
2568       used += curr->used_word_size();
2569       capacity += curr->word_size();
2570       waste += curr->free_word_size() + curr->overhead();;
2571     }
2572   }
2573 
2574   if (TraceMetadataChunkAllocation && Verbose) {
2575     block_freelists()->print_on(out);
2576   }
2577 
2578   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2579   // Free space isn't wasted.
2580   waste -= free;
2581 
2582   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2583                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2584                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2585 }
2586 
2587 #ifndef PRODUCT
2588 void SpaceManager::mangle_freed_chunks() {
2589   for (ChunkIndex index = ZeroIndex;
2590        index < NumberOfInUseLists;
2591        index = next_chunk_index(index)) {
2592     for (Metachunk* curr = chunks_in_use(index);
2593          curr != NULL;
2594          curr = curr->next()) {


2739 }
2740 
2741 size_t MetaspaceAux::free_chunks_total_bytes() {
2742   return free_chunks_total_words() * BytesPerWord;
2743 }
2744 
2745 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2746   return Metaspace::get_chunk_manager(mdtype) != NULL;
2747 }
2748 
2749 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2750   if (!has_chunk_free_list(mdtype)) {
2751     return MetaspaceChunkFreeListSummary();
2752   }
2753 
2754   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2755   return cm->chunk_free_list_summary();
2756 }
2757 
2758 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2759   gclog_or_tty->print(", [Metaspace:");
2760   if (PrintGCDetails && Verbose) {
2761     gclog_or_tty->print(" "  SIZE_FORMAT
2762                         "->" SIZE_FORMAT
2763                         "("  SIZE_FORMAT ")",
2764                         prev_metadata_used,
2765                         used_bytes(),
2766                         reserved_bytes());
2767   } else {
2768     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2769                         "->" SIZE_FORMAT "K"
2770                         "("  SIZE_FORMAT "K)",
2771                         prev_metadata_used/K,
2772                         used_bytes()/K,
2773                         reserved_bytes()/K);
2774   }
2775 
2776   gclog_or_tty->print("]");
2777 }
2778 
2779 // This is printed when PrintGCDetails
2780 void MetaspaceAux::print_on(outputStream* out) {
2781   Metaspace::MetadataType nct = Metaspace::NonClassType;
2782 
2783   out->print_cr(" Metaspace       "
2784                 "used "      SIZE_FORMAT "K, "
2785                 "capacity "  SIZE_FORMAT "K, "
2786                 "committed " SIZE_FORMAT "K, "
2787                 "reserved "  SIZE_FORMAT "K",
2788                 used_bytes()/K,
2789                 capacity_bytes()/K,
2790                 committed_bytes()/K,
2791                 reserved_bytes()/K);
2792 
2793   if (Metaspace::using_class_space()) {
2794     Metaspace::MetadataType ct = Metaspace::ClassType;
2795     out->print_cr("  class space    "
2796                   "used "      SIZE_FORMAT "K, "
2797                   "capacity "  SIZE_FORMAT "K, "
2798                   "committed " SIZE_FORMAT "K, "
2799                   "reserved "  SIZE_FORMAT "K",


3116                                               compressed_class_space_size()));
3117       }
3118     }
3119   }
3120 
3121   // If we got here then the metaspace got allocated.
3122   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3123 
3124 #if INCLUDE_CDS
3125   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3126   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3127     FileMapInfo::stop_sharing_and_unmap(
3128         "Could not allocate metaspace at a compatible address");
3129   }
3130 #endif
3131   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3132                                   UseSharedSpaces ? (address)cds_base : 0);
3133 
3134   initialize_class_space(metaspace_rs);
3135 
3136   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3137     print_compressed_class_space(gclog_or_tty, requested_addr);


3138   }
3139 }
3140 
3141 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3142   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3143                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3144   if (_class_space_list != NULL) {
3145     address base = (address)_class_space_list->current_virtual_space()->bottom();
3146     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3147                  compressed_class_space_size(), p2i(base));
3148     if (requested_addr != 0) {
3149       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3150     }
3151     st->cr();
3152   }
3153 }
3154 
3155 // For UseCompressedClassPointers the class space is reserved above the top of
3156 // the Java heap.  The argument passed in is at the base of the compressed space.
3157 void Metaspace::initialize_class_space(ReservedSpace rs) {


3239     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3240 
3241     if (!_space_list->initialization_succeeded()) {
3242       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3243     }
3244 
3245 #ifdef _LP64
3246     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3247       vm_exit_during_initialization("Unable to dump shared archive.",
3248           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3249                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3250                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3251                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3252     }
3253 
3254     // Set the compressed klass pointer base so that decoding of these pointers works
3255     // properly when creating the shared archive.
3256     assert(UseCompressedOops && UseCompressedClassPointers,
3257       "UseCompressedOops and UseCompressedClassPointers must be set");
3258     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3259     if (TraceMetavirtualspaceAllocation && Verbose) {
3260       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3261                              p2i(_space_list->current_virtual_space()->bottom()));
3262     }
3263 
3264     Universe::set_narrow_klass_shift(0);
3265 #endif // _LP64
3266 #endif // INCLUDE_CDS
3267   } else {
3268     // If using shared space, open the file that contains the shared space
3269     // and map in the memory before initializing the rest of metaspace (so
3270     // the addresses don't conflict)
3271     address cds_address = NULL;
3272     if (UseSharedSpaces) {
3273 #if INCLUDE_CDS
3274       FileMapInfo* mapinfo = new FileMapInfo();
3275 
3276       // Open the shared archive file, read and validate the header. If
3277       // initialization fails, shared spaces [UseSharedSpaces] are
3278       // disabled and the file is closed.
3279       // Map in spaces now also
3280       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3281         cds_total = FileMapInfo::shared_spaces_size();
3282         cds_address = (address)mapinfo->header()->region_addr(0);


3429 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3430   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3431   assert(delta_bytes > 0, "Must be");
3432 
3433   size_t before = 0;
3434   size_t after = 0;
3435   MetaWord* res;
3436   bool incremented;
3437 
3438   // Each thread increments the HWM at most once. Even if the thread fails to increment
3439   // the HWM, an allocation is still attempted. This is because another thread must then
3440   // have incremented the HWM and therefore the allocation might still succeed.
3441   do {
3442     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3443     res = allocate(word_size, mdtype);
3444   } while (!incremented && res == NULL);
3445 
3446   if (incremented) {
3447     tracer()->report_gc_threshold(before, after,
3448                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3449     if (PrintGCDetails && Verbose) {
3450       gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3451           " to " SIZE_FORMAT, before, after);
3452     }
3453   }
3454 
3455   return res;
3456 }
3457 
3458 // Space allocated in the Metaspace.  This may
3459 // be across several metadata virtual spaces.
3460 char* Metaspace::bottom() const {
3461   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3462   return (char*)vsm()->current_chunk()->bottom();
3463 }
3464 
3465 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3466   if (mdtype == ClassType) {
3467     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3468   } else {
3469     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3470   }
3471 }
3472 


3595     if (result == NULL) {
3596       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3597     }
3598   }
3599 
3600   // Zero initialize.
3601   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3602 
3603   return result;
3604 }
3605 
3606 size_t Metaspace::class_chunk_size(size_t word_size) {
3607   assert(using_class_space(), "Has to use class space");
3608   return class_vsm()->calc_chunk_size(word_size);
3609 }
3610 
3611 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3612   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3613 
3614   // If result is still null, we are out of memory.
3615   if (Verbose && TraceMetadataChunkAllocation) {
3616     gclog_or_tty->print_cr("Metaspace allocation failed for size "
3617         SIZE_FORMAT, word_size);


3618     if (loader_data->metaspace_or_null() != NULL) {
3619       loader_data->dump(gclog_or_tty);
3620     }
3621     MetaspaceAux::dump(gclog_or_tty);
3622   }
3623 
3624   bool out_of_compressed_class_space = false;
3625   if (is_class_space_allocation(mdtype)) {
3626     Metaspace* metaspace = loader_data->metaspace_non_null();
3627     out_of_compressed_class_space =
3628       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3629       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3630       CompressedClassSpaceSize;
3631   }
3632 
3633   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3634   const char* space_string = out_of_compressed_class_space ?
3635     "Compressed class space" : "Metaspace";
3636 
3637   report_java_out_of_memory(space_string);
3638 
3639   if (JvmtiExport::should_post_resource_exhausted()) {
3640     JvmtiExport::post_resource_exhausted(
3641         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc/shared/collectedHeap.hpp"
  26 #include "gc/shared/collectorPolicy.hpp"
  27 #include "gc/shared/gcLocker.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "memory/binaryTreeDictionary.hpp"
  31 #include "memory/filemap.hpp"
  32 #include "memory/freeList.hpp"
  33 #include "memory/metachunk.hpp"
  34 #include "memory/metaspace.hpp"
  35 #include "memory/metaspaceGCThresholdUpdater.hpp"
  36 #include "memory/metaspaceShared.hpp"
  37 #include "memory/metaspaceTracer.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "runtime/atomic.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/mutex.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "services/memTracker.hpp"
  47 #include "services/memoryService.hpp"
  48 #include "utilities/copy.hpp"


 795 }
 796 
 797 void VirtualSpaceNode::dec_container_count() {
 798   assert_lock_strong(SpaceManager::expand_lock());
 799   _container_count--;
 800 }
 801 
 802 #ifdef ASSERT
 803 void VirtualSpaceNode::verify_container_count() {
 804   assert(_container_count == container_count_slow(),
 805          "Inconsistency in container_count _container_count " UINTX_FORMAT
 806          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 807 }
 808 #endif
 809 
 810 // BlockFreelist methods
 811 
 812 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
 813 
 814 BlockFreelist::~BlockFreelist() {
 815   LogHandle(gc, metaspace, freelist) log;
 816   if (log.is_trace()) {
 817     ResourceMark rm;
 818     dictionary()->print_free_lists(log.trace_stream());
 819   }
 820   delete _dictionary;
 821 }
 822 
 823 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
 824   Metablock* free_chunk = ::new (p) Metablock(word_size);
 825   dictionary()->return_chunk(free_chunk);
 826 }
 827 
 828 MetaWord* BlockFreelist::get_block(size_t word_size) {
 829   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
 830     // Dark matter.  Too small for dictionary.
 831     return NULL;
 832   }
 833 
 834   Metablock* free_block =
 835     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
 836   if (free_block == NULL) {
 837     return NULL;
 838   }


 878 
 879 size_t VirtualSpaceNode::free_words_in_vs() const {
 880   return pointer_delta(end(), top(), sizeof(MetaWord));
 881 }
 882 
 883 // Allocates the chunk from the virtual space only.
 884 // This interface is also used internally for debugging.  Not all
 885 // chunks removed here are necessarily used for allocation.
 886 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 887   // Bottom of the new chunk
 888   MetaWord* chunk_limit = top();
 889   assert(chunk_limit != NULL, "Not safe to call this method");
 890 
 891   // The virtual spaces are always expanded by the
 892   // commit granularity to enforce the following condition.
 893   // Without this the is_available check will not work correctly.
 894   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 895       "The committed memory doesn't match the expanded memory.");
 896 
 897   if (!is_available(chunk_word_size)) {
 898     LogHandle(gc, metaspace, freelist) log;
 899     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 900     // Dump some information about the virtual space that is nearly full
 901     ResourceMark rm;
 902     print_on(log.debug_stream());
 903     return NULL;
 904   }
 905 
 906   // Take the space  (bump top on the current virtual space).
 907   inc_top(chunk_word_size);
 908 
 909   // Initialize the chunk
 910   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
 911   return result;
 912 }
 913 
 914 
 915 // Expand the virtual space (commit more of the reserved space)
 916 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 917   size_t min_bytes = min_words * BytesPerWord;
 918   size_t preferred_bytes = preferred_words * BytesPerWord;
 919 
 920   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 921 
 922   if (uncommitted < min_bytes) {


1217     // ensure lock-free iteration sees fully initialized node
1218     OrderAccess::storestore();
1219     link_vs(new_entry);
1220     return true;
1221   }
1222 }
1223 
1224 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1225   if (virtual_space_list() == NULL) {
1226       set_virtual_space_list(new_entry);
1227   } else {
1228     current_virtual_space()->set_next(new_entry);
1229   }
1230   set_current_virtual_space(new_entry);
1231   inc_reserved_words(new_entry->reserved_words());
1232   inc_committed_words(new_entry->committed_words());
1233   inc_virtual_space_count();
1234 #ifdef ASSERT
1235   new_entry->mangle();
1236 #endif
1237   if (develop_log_is_enabled(Trace, gc, metaspace)) {
1238     LogHandle(gc, metaspace) log;
1239     VirtualSpaceNode* vsl = current_virtual_space();
1240     ResourceMark rm;
1241     vsl->print_on(log.trace_stream());
1242   }
1243 }
1244 
1245 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1246                                       size_t min_words,
1247                                       size_t preferred_words) {
1248   size_t before = node->committed_words();
1249 
1250   bool result = node->expand_by(min_words, preferred_words);
1251 
1252   size_t after = node->committed_words();
1253 
1254   // after and before can be the same if the memory was pre-committed.
1255   assert(after >= before, "Inconsistency");
1256   inc_committed_words(after - before);
1257 
1258   return result;
1259 }
1260 
1261 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1318   // The expand amount is currently only determined by the requested sizes
1319   // and not how much committed memory is left in the current virtual space.
1320 
1321   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1322   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
1323   if (min_word_size >= preferred_word_size) {
1324     // Can happen when humongous chunks are allocated.
1325     preferred_word_size = min_word_size;
1326   }
1327 
1328   bool expanded = expand_by(min_word_size, preferred_word_size);
1329   if (expanded) {
1330     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1331     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1332   }
1333 
1334    return next;
1335 }
1336 
1337 void VirtualSpaceList::print_on(outputStream* st) const {

1338   VirtualSpaceListIterator iter(virtual_space_list());
1339   while (iter.repeat()) {
1340     VirtualSpaceNode* node = iter.get_next();
1341     node->print_on(st);
1342   }

1343 }
1344 
1345 // MetaspaceGC methods
1346 
1347 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1348 // Within the VM operation after the GC the attempt to allocate the metadata
1349 // should succeed.  If the GC did not free enough space for the metaspace
1350 // allocation, the HWM is increased so that another virtualspace will be
1351 // allocated for the metadata.  With perm gen the increase in the perm
1352 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
1353 // metaspace policy uses those as the small and large steps for the HWM.
1354 //
1355 // After the GC the compute_new_size() for MetaspaceGC is called to
1356 // resize the capacity of the metaspaces.  The current implementation
1357 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1358 // to resize the Java heap by some GC's.  New flags can be implemented
1359 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1360 // free space is desirable in the metaspace capacity to decide how much
1361 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1362 // free space is desirable in the metaspace capacity before decreasing


1483   // chunk free lists are included in committed_bytes() and the memory in an
1484   // un-fragmented chunk free list is available for future allocations.
1485   // However, if the chunk free lists becomes fragmented, then the memory may
1486   // not be available for future allocations and the memory is therefore "in use".
1487   // Including the chunk free lists in the definition of "in use" is therefore
1488   // necessary. Not including the chunk free lists can cause capacity_until_GC to
1489   // shrink below committed_bytes() and this has caused serious bugs in the past.
1490   const size_t used_after_gc = MetaspaceAux::committed_bytes();
1491   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1492 
1493   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1494   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1495 
1496   const double min_tmp = used_after_gc / maximum_used_percentage;
1497   size_t minimum_desired_capacity =
1498     (size_t)MIN2(min_tmp, double(max_uintx));
1499   // Don't shrink less than the initial generation size
1500   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1501                                   MetaspaceSize);
1502 
1503   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1504   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1505                            minimum_free_percentage, maximum_used_percentage);
1506   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);







1507 
1508 
1509   size_t shrink_bytes = 0;
1510   if (capacity_until_GC < minimum_desired_capacity) {
1511     // If we have less capacity below the metaspace HWM, then
1512     // increment the HWM.
1513     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1514     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1515     // Don't expand unless it's significant
1516     if (expand_bytes >= MinMetaspaceExpansion) {
1517       size_t new_capacity_until_GC = 0;
1518       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1519       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1520 
1521       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1522                                                new_capacity_until_GC,
1523                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1524       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",





1525                                minimum_desired_capacity / (double) K,
1526                                expand_bytes / (double) K,
1527                                MinMetaspaceExpansion / (double) K,
1528                                new_capacity_until_GC / (double) K);
1529     }

1530     return;
1531   }
1532 
1533   // No expansion, now see if we want to shrink
1534   // We would never want to shrink more than this
1535   assert(capacity_until_GC >= minimum_desired_capacity,
1536          SIZE_FORMAT " >= " SIZE_FORMAT,
1537          capacity_until_GC, minimum_desired_capacity);
1538   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1539 
1540   // Should shrinking be considered?
1541   if (MaxMetaspaceFreeRatio < 100) {
1542     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1543     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1544     const double max_tmp = used_after_gc / minimum_used_percentage;
1545     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1546     maximum_desired_capacity = MAX2(maximum_desired_capacity,
1547                                     MetaspaceSize);
1548     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1549                              maximum_free_percentage, minimum_used_percentage);
1550     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1551                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);








1552 
1553     assert(minimum_desired_capacity <= maximum_desired_capacity,
1554            "sanity check");
1555 
1556     if (capacity_until_GC > maximum_desired_capacity) {
1557       // Capacity too large, compute shrinking size
1558       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1559       // We don't want shrink all the way back to initSize if people call
1560       // System.gc(), because some programs do that between "phases" and then
1561       // we'd just have to grow the heap up again for the next phase.  So we
1562       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1563       // on the third call, and 100% by the fourth call.  But if we recompute
1564       // size without shrinking, it goes back to 0%.
1565       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1566 
1567       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1568 
1569       assert(shrink_bytes <= max_shrink_bytes,
1570              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1571              shrink_bytes, max_shrink_bytes);
1572       if (current_shrink_factor == 0) {
1573         _shrink_factor = 10;
1574       } else {
1575         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1576       }
1577       log_trace(gc, metaspace)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
1578                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1579       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1580                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);













1581     }
1582   }
1583 
1584   // Don't shrink unless it's significant
1585   if (shrink_bytes >= MinMetaspaceExpansion &&
1586       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1587     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1588     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1589                                              new_capacity_until_GC,
1590                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
1591   }
1592 }
1593 
1594 // Metadebug methods
1595 
1596 void Metadebug::init_allocation_fail_alot_count() {
1597   if (MetadataAllocationFailALot) {
1598     _allocation_fail_alot_count =
1599       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1600   }
1601 }
1602 
1603 #ifdef ASSERT
1604 bool Metadebug::test_metadata_failure() {
1605   if (MetadataAllocationFailALot &&
1606       Threads::is_vm_complete()) {
1607     if (_allocation_fail_alot_count > 0) {
1608       _allocation_fail_alot_count--;
1609     } else {
1610       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");



1611       init_allocation_fail_alot_count();
1612       return true;
1613     }
1614   }
1615   return false;
1616 }
1617 #endif
1618 
1619 // ChunkManager methods
1620 
1621 size_t ChunkManager::free_chunks_total_words() {
1622   return _free_chunks_total;
1623 }
1624 
1625 size_t ChunkManager::free_chunks_total_bytes() {
1626   return free_chunks_total_words() * BytesPerWord;
1627 }
1628 
1629 size_t ChunkManager::free_chunks_count() {
1630 #ifdef ASSERT


1735 
1736 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1737   assert_lock_strong(SpaceManager::expand_lock());
1738 
1739   slow_locked_verify();
1740 
1741   Metachunk* chunk = NULL;
1742   if (list_index(word_size) != HumongousIndex) {
1743     ChunkList* free_list = find_free_chunks_list(word_size);
1744     assert(free_list != NULL, "Sanity check");
1745 
1746     chunk = free_list->head();
1747 
1748     if (chunk == NULL) {
1749       return NULL;
1750     }
1751 
1752     // Remove the chunk as the head of the list.
1753     free_list->remove_chunk(chunk);
1754 
1755     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,


1756                                        p2i(free_list), p2i(chunk), chunk->word_size());

1757   } else {
1758     chunk = humongous_dictionary()->get_chunk(
1759       word_size,
1760       FreeBlockDictionary<Metachunk>::atLeast);
1761 
1762     if (chunk == NULL) {
1763       return NULL;
1764     }
1765 
1766     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
1767                                     chunk->word_size(), word_size, chunk->word_size() - word_size);





1768   }
1769 
1770   // Chunk is being removed from the chunks free list.
1771   dec_free_chunks_total(chunk->word_size());
1772 
1773   // Remove it from the links to this freelist
1774   chunk->set_next(NULL);
1775   chunk->set_prev(NULL);
1776 #ifdef ASSERT
1777   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1778   // work.
1779   chunk->set_is_tagged_free(false);
1780 #endif
1781   chunk->container()->inc_container_count();
1782 
1783   slow_locked_verify();
1784   return chunk;
1785 }
1786 
1787 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1788   assert_lock_strong(SpaceManager::expand_lock());
1789   slow_locked_verify();
1790 
1791   // Take from the beginning of the list
1792   Metachunk* chunk = free_chunks_get(word_size);
1793   if (chunk == NULL) {
1794     return NULL;
1795   }
1796 
1797   assert((word_size <= chunk->word_size()) ||
1798          list_index(chunk->word_size() == HumongousIndex),
1799          "Non-humongous variable sized chunk");
1800   LogHandle(gc, metaspace, freelist) log;
1801   if (log.is_debug()) {
1802     size_t list_count;
1803     if (list_index(word_size) < HumongousIndex) {
1804       ChunkList* list = find_free_chunks_list(word_size);
1805       list_count = list->count();
1806     } else {
1807       list_count = humongous_dictionary()->total_count();
1808     }
1809     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",

1810                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1811     ResourceMark rm;
1812     locked_print_free_chunks(log.debug_stream());
1813   }
1814 
1815   return chunk;
1816 }
1817 
1818 void ChunkManager::print_on(outputStream* out) const {
1819   const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);


1820 }
1821 
1822 // SpaceManager methods
1823 
1824 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1825                                            size_t* chunk_word_size,
1826                                            size_t* class_chunk_word_size) {
1827   switch (type) {
1828   case Metaspace::BootMetaspaceType:
1829     *chunk_word_size = Metaspace::first_chunk_word_size();
1830     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1831     break;
1832   case Metaspace::ROMetaspaceType:
1833     *chunk_word_size = SharedReadOnlySize / wordSize;
1834     *class_chunk_word_size = ClassSpecializedChunk;
1835     break;
1836   case Metaspace::ReadWriteMetaspaceType:
1837     *chunk_word_size = SharedReadWriteSize / wordSize;
1838     *class_chunk_word_size = ClassSpecializedChunk;
1839     break;


1979       chunk_word_size = medium_chunk_size();
1980     }
1981   } else {
1982     chunk_word_size = medium_chunk_size();
1983   }
1984 
1985   // Might still need a humongous chunk.  Enforce
1986   // humongous allocations sizes to be aligned up to
1987   // the smallest chunk size.
1988   size_t if_humongous_sized_chunk =
1989     align_size_up(word_size + Metachunk::overhead(),
1990                   smallest_chunk_size());
1991   chunk_word_size =
1992     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1993 
1994   assert(!SpaceManager::is_humongous(word_size) ||
1995          chunk_word_size == if_humongous_sized_chunk,
1996          "Size calculation is wrong, word_size " SIZE_FORMAT
1997          " chunk_word_size " SIZE_FORMAT,
1998          word_size, chunk_word_size);
1999   LogHandle(gc, metaspace, alloc) log;
2000   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2001     log.debug("Metadata humongous allocation:");
2002     log.debug("  word_size " PTR_FORMAT, word_size);
2003     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2004     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());


2005   }
2006   return chunk_word_size;
2007 }
2008 
2009 void SpaceManager::track_metaspace_memory_usage() {
2010   if (is_init_completed()) {
2011     if (is_class()) {
2012       MemoryService::track_compressed_class_memory_usage();
2013     }
2014     MemoryService::track_metaspace_memory_usage();
2015   }
2016 }
2017 
2018 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2019   assert(vs_list()->current_virtual_space() != NULL,
2020          "Should have been set");
2021   assert(current_chunk() == NULL ||
2022          current_chunk()->allocate(word_size) == NULL,
2023          "Don't need to expand");
2024   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2025 
2026   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2027     size_t words_left = 0;
2028     size_t words_used = 0;
2029     if (current_chunk() != NULL) {
2030       words_left = current_chunk()->free_word_size();
2031       words_used = current_chunk()->used_word_size();
2032     }
2033     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",


2034                                        word_size, words_used, words_left);
2035   }
2036 
2037   // Get another chunk
2038   size_t grow_chunks_by_words = calc_chunk_size(word_size);
2039   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2040 
2041   MetaWord* mem = NULL;
2042 
2043   // If a chunk was available, add it to the in-use chunk list
2044   // and do an allocation from it.
2045   if (next != NULL) {
2046     // Add to this manager's list of chunks in use.
2047     add_chunk(next, false);
2048     mem = next->allocate(word_size);
2049   }
2050 
2051   // Track metaspace memory usage statistic.
2052   track_metaspace_memory_usage();
2053 


2105 void SpaceManager::inc_used_metrics(size_t words) {
2106   // Add to the per SpaceManager total
2107   Atomic::add_ptr(words, &_allocated_blocks_words);
2108   // Add to the global total
2109   MetaspaceAux::inc_used(mdtype(), words);
2110 }
2111 
2112 void SpaceManager::dec_total_from_size_metrics() {
2113   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2114   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2115   // Also deduct the overhead per Metachunk
2116   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2117 }
2118 
2119 void SpaceManager::initialize() {
2120   Metadebug::init_allocation_fail_alot_count();
2121   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2122     _chunks_in_use[i] = NULL;
2123   }
2124   _current_chunk = NULL;
2125   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));


2126 }
2127 
2128 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2129   if (chunks == NULL) {
2130     return;
2131   }
2132   ChunkList* list = free_chunks(index);
2133   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2134   assert_lock_strong(SpaceManager::expand_lock());
2135   Metachunk* cur = chunks;
2136 
2137   // This returns chunks one at a time.  If a new
2138   // class List can be created that is a base class
2139   // of FreeList then something like FreeList::prepend()
2140   // can be used in place of this loop
2141   while (cur != NULL) {
2142     assert(cur->container() != NULL, "Container should have been set");
2143     cur->container()->dec_container_count();
2144     // Capture the next link before it is changed
2145     // by the call to return_chunk_at_head();


2147     DEBUG_ONLY(cur->set_is_tagged_free(true);)
2148     list->return_chunk_at_head(cur);
2149     cur = next;
2150   }
2151 }
2152 
2153 SpaceManager::~SpaceManager() {
2154   // This call this->_lock which can't be done while holding expand_lock()
2155   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2156          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2157          " allocated_chunks_words() " SIZE_FORMAT,
2158          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2159 
2160   MutexLockerEx fcl(SpaceManager::expand_lock(),
2161                     Mutex::_no_safepoint_check_flag);
2162 
2163   chunk_manager()->slow_locked_verify();
2164 
2165   dec_total_from_size_metrics();
2166 
2167   LogHandle(gc, metaspace, freelist) log;
2168   if (log.is_trace()) {
2169     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2170     ResourceMark rm;
2171     locked_print_chunks_in_use_on(log.trace_stream());
2172   }
2173 
2174   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
2175   // is during the freeing of a VirtualSpaceNodes.
2176 
2177   // Have to update before the chunks_in_use lists are emptied
2178   // below.
2179   chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2180                                          sum_count_in_chunks_in_use());
2181 
2182   // Add all the chunks in use by this space manager
2183   // to the global list of free chunks.
2184 
2185   // Follow each list of chunks-in-use and add them to the
2186   // free lists.  Each list is NULL terminated.
2187 
2188   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2189     log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i));




2190     Metachunk* chunks = chunks_in_use(i);
2191     chunk_manager()->return_chunks(i, chunks);
2192     set_chunks_in_use(i, NULL);
2193     log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i));




2194     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2195   }
2196 
2197   // The medium chunk case may be optimized by passing the head and
2198   // tail of the medium chunk list to add_at_head().  The tail is often
2199   // the current chunk but there are probably exceptions.
2200 
2201   // Humongous chunks
2202   log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2203             sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex));
2204   log.trace("Humongous chunk dictionary: ");



2205   // Humongous chunks are never the current chunk.
2206   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2207 
2208   while (humongous_chunks != NULL) {
2209 #ifdef ASSERT
2210     humongous_chunks->set_is_tagged_free(true);
2211 #endif
2212     log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size());




2213     assert(humongous_chunks->word_size() == (size_t)
2214            align_size_up(humongous_chunks->word_size(),
2215                              smallest_chunk_size()),
2216            "Humongous chunk size is wrong: word size " SIZE_FORMAT
2217            " granularity " SIZE_FORMAT,
2218            humongous_chunks->word_size(), smallest_chunk_size());
2219     Metachunk* next_humongous_chunks = humongous_chunks->next();
2220     humongous_chunks->container()->dec_container_count();
2221     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2222     humongous_chunks = next_humongous_chunks;
2223   }
2224   log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex));





2225   chunk_manager()->slow_locked_verify();
2226 }
2227 
2228 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2229   switch (index) {
2230     case SpecializedIndex:
2231       return "Specialized";
2232     case SmallIndex:
2233       return "Small";
2234     case MediumIndex:
2235       return "Medium";
2236     case HumongousIndex:
2237       return "Humongous";
2238     default:
2239       return NULL;
2240   }
2241 }
2242 
2243 ChunkIndex ChunkManager::list_index(size_t size) {
2244   switch (size) {


2290     // small, so small will be null.  Link this first chunk as the current
2291     // chunk.
2292     if (make_current) {
2293       // Set as the current chunk but otherwise treat as a humongous chunk.
2294       set_current_chunk(new_chunk);
2295     }
2296     // Link at head.  The _current_chunk only points to a humongous chunk for
2297     // the null class loader metaspace (class and data virtual space managers)
2298     // any humongous chunks so will not point to the tail
2299     // of the humongous chunks list.
2300     new_chunk->set_next(chunks_in_use(HumongousIndex));
2301     set_chunks_in_use(HumongousIndex, new_chunk);
2302 
2303     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2304   }
2305 
2306   // Add to the running sum of capacity
2307   inc_size_metrics(new_chunk->word_size());
2308 
2309   assert(new_chunk->is_empty(), "Not ready for reuse");
2310   LogHandle(gc, metaspace, freelist) log;
2311   if (log.is_trace()) {
2312     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2313     ResourceMark rm;
2314     outputStream* out = log.trace_stream();
2315     new_chunk->print_on(out);
2316     chunk_manager()->locked_print_free_chunks(out);
2317   }
2318 }
2319 
2320 void SpaceManager::retire_current_chunk() {
2321   if (current_chunk() != NULL) {
2322     size_t remaining_words = current_chunk()->free_word_size();
2323     if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2324       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2325       inc_used_metrics(remaining_words);
2326     }
2327   }
2328 }
2329 
2330 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2331                                        size_t grow_chunks_by_words) {
2332   // Get a chunk from the chunk freelist
2333   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2334 
2335   if (next == NULL) {
2336     next = vs_list()->get_new_chunk(word_size,
2337                                     grow_chunks_by_words,
2338                                     medium_chunk_bunch());
2339   }
2340 
2341   LogHandle(gc, metaspace, alloc) log;
2342   if (log.is_debug() && next != NULL &&
2343       SpaceManager::is_humongous(next->word_size())) {
2344     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());

2345   }
2346 
2347   return next;
2348 }
2349 
2350 /*
2351  * The policy is to allocate up to _small_chunk_limit small chunks
2352  * after which only medium chunks are allocated.  This is done to
2353  * reduce fragmentation.  In some cases, this can result in a lot
2354  * of small chunks being allocated to the point where it's not
2355  * possible to expand.  If this happens, there may be no medium chunks
2356  * available and OOME would be thrown.  Instead of doing that,
2357  * if the allocation request size fits in a small chunk, an attempt
2358  * will be made to allocate a small chunk.
2359  */
2360 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2361   size_t raw_word_size = get_raw_word_size(word_size);
2362 
2363   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2364     return NULL;


2489   uint i = 0;
2490   size_t used = 0;
2491   size_t capacity = 0;
2492 
2493   // Add up statistics for all chunks in this SpaceManager.
2494   for (ChunkIndex index = ZeroIndex;
2495        index < NumberOfInUseLists;
2496        index = next_chunk_index(index)) {
2497     for (Metachunk* curr = chunks_in_use(index);
2498          curr != NULL;
2499          curr = curr->next()) {
2500       out->print("%d) ", i++);
2501       curr->print_on(out);
2502       curr_total += curr->word_size();
2503       used += curr->used_word_size();
2504       capacity += curr->word_size();
2505       waste += curr->free_word_size() + curr->overhead();;
2506     }
2507   }
2508 
2509   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
2510     block_freelists()->print_on(out);
2511   }
2512 
2513   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2514   // Free space isn't wasted.
2515   waste -= free;
2516 
2517   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
2518                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2519                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2520 }
2521 
2522 #ifndef PRODUCT
2523 void SpaceManager::mangle_freed_chunks() {
2524   for (ChunkIndex index = ZeroIndex;
2525        index < NumberOfInUseLists;
2526        index = next_chunk_index(index)) {
2527     for (Metachunk* curr = chunks_in_use(index);
2528          curr != NULL;
2529          curr = curr->next()) {


2674 }
2675 
2676 size_t MetaspaceAux::free_chunks_total_bytes() {
2677   return free_chunks_total_words() * BytesPerWord;
2678 }
2679 
2680 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2681   return Metaspace::get_chunk_manager(mdtype) != NULL;
2682 }
2683 
2684 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2685   if (!has_chunk_free_list(mdtype)) {
2686     return MetaspaceChunkFreeListSummary();
2687   }
2688 
2689   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2690   return cm->chunk_free_list_summary();
2691 }
2692 
2693 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2694   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
2695                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
















2696 }
2697 

2698 void MetaspaceAux::print_on(outputStream* out) {
2699   Metaspace::MetadataType nct = Metaspace::NonClassType;
2700 
2701   out->print_cr(" Metaspace       "
2702                 "used "      SIZE_FORMAT "K, "
2703                 "capacity "  SIZE_FORMAT "K, "
2704                 "committed " SIZE_FORMAT "K, "
2705                 "reserved "  SIZE_FORMAT "K",
2706                 used_bytes()/K,
2707                 capacity_bytes()/K,
2708                 committed_bytes()/K,
2709                 reserved_bytes()/K);
2710 
2711   if (Metaspace::using_class_space()) {
2712     Metaspace::MetadataType ct = Metaspace::ClassType;
2713     out->print_cr("  class space    "
2714                   "used "      SIZE_FORMAT "K, "
2715                   "capacity "  SIZE_FORMAT "K, "
2716                   "committed " SIZE_FORMAT "K, "
2717                   "reserved "  SIZE_FORMAT "K",


3034                                               compressed_class_space_size()));
3035       }
3036     }
3037   }
3038 
3039   // If we got here then the metaspace got allocated.
3040   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3041 
3042 #if INCLUDE_CDS
3043   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3044   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3045     FileMapInfo::stop_sharing_and_unmap(
3046         "Could not allocate metaspace at a compatible address");
3047   }
3048 #endif
3049   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3050                                   UseSharedSpaces ? (address)cds_base : 0);
3051 
3052   initialize_class_space(metaspace_rs);
3053 
3054   if (develop_log_is_enabled(Trace, gc, metaspace)) {
3055     LogHandle(gc, metaspace) log;
3056     ResourceMark rm;
3057     print_compressed_class_space(log.trace_stream(), requested_addr);
3058   }
3059 }
3060 
3061 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3062   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3063                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3064   if (_class_space_list != NULL) {
3065     address base = (address)_class_space_list->current_virtual_space()->bottom();
3066     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3067                  compressed_class_space_size(), p2i(base));
3068     if (requested_addr != 0) {
3069       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3070     }
3071     st->cr();
3072   }
3073 }
3074 
3075 // For UseCompressedClassPointers the class space is reserved above the top of
3076 // the Java heap.  The argument passed in is at the base of the compressed space.
3077 void Metaspace::initialize_class_space(ReservedSpace rs) {


3159     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3160 
3161     if (!_space_list->initialization_succeeded()) {
3162       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3163     }
3164 
3165 #ifdef _LP64
3166     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3167       vm_exit_during_initialization("Unable to dump shared archive.",
3168           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3169                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3170                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3171                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3172     }
3173 
3174     // Set the compressed klass pointer base so that decoding of these pointers works
3175     // properly when creating the shared archive.
3176     assert(UseCompressedOops && UseCompressedClassPointers,
3177       "UseCompressedOops and UseCompressedClassPointers must be set");
3178     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3179     log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,

3180                                      p2i(_space_list->current_virtual_space()->bottom()));

3181 
3182     Universe::set_narrow_klass_shift(0);
3183 #endif // _LP64
3184 #endif // INCLUDE_CDS
3185   } else {
3186     // If using shared space, open the file that contains the shared space
3187     // and map in the memory before initializing the rest of metaspace (so
3188     // the addresses don't conflict)
3189     address cds_address = NULL;
3190     if (UseSharedSpaces) {
3191 #if INCLUDE_CDS
3192       FileMapInfo* mapinfo = new FileMapInfo();
3193 
3194       // Open the shared archive file, read and validate the header. If
3195       // initialization fails, shared spaces [UseSharedSpaces] are
3196       // disabled and the file is closed.
3197       // Map in spaces now also
3198       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3199         cds_total = FileMapInfo::shared_spaces_size();
3200         cds_address = (address)mapinfo->header()->region_addr(0);


3347 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3348   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3349   assert(delta_bytes > 0, "Must be");
3350 
3351   size_t before = 0;
3352   size_t after = 0;
3353   MetaWord* res;
3354   bool incremented;
3355 
3356   // Each thread increments the HWM at most once. Even if the thread fails to increment
3357   // the HWM, an allocation is still attempted. This is because another thread must then
3358   // have incremented the HWM and therefore the allocation might still succeed.
3359   do {
3360     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3361     res = allocate(word_size, mdtype);
3362   } while (!incremented && res == NULL);
3363 
3364   if (incremented) {
3365     tracer()->report_gc_threshold(before, after,
3366                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3367     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);



3368   }
3369 
3370   return res;
3371 }
3372 
3373 // Space allocated in the Metaspace.  This may
3374 // be across several metadata virtual spaces.
3375 char* Metaspace::bottom() const {
3376   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3377   return (char*)vsm()->current_chunk()->bottom();
3378 }
3379 
3380 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3381   if (mdtype == ClassType) {
3382     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3383   } else {
3384     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3385   }
3386 }
3387 


3510     if (result == NULL) {
3511       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3512     }
3513   }
3514 
3515   // Zero initialize.
3516   Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3517 
3518   return result;
3519 }
3520 
3521 size_t Metaspace::class_chunk_size(size_t word_size) {
3522   assert(using_class_space(), "Has to use class space");
3523   return class_vsm()->calc_chunk_size(word_size);
3524 }
3525 
3526 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3527   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3528 
3529   // If result is still null, we are out of memory.
3530   LogHandle(gc, metaspace, freelist) log;
3531   if (log.is_trace()) {
3532     log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size);
3533     ResourceMark rm;
3534     outputStream* out = log.trace_stream();
3535     if (loader_data->metaspace_or_null() != NULL) {
3536       loader_data->dump(out);
3537     }
3538     MetaspaceAux::dump(out);
3539   }
3540 
3541   bool out_of_compressed_class_space = false;
3542   if (is_class_space_allocation(mdtype)) {
3543     Metaspace* metaspace = loader_data->metaspace_non_null();
3544     out_of_compressed_class_space =
3545       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3546       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3547       CompressedClassSpaceSize;
3548   }
3549 
3550   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3551   const char* space_string = out_of_compressed_class_space ?
3552     "Compressed class space" : "Metaspace";
3553 
3554   report_java_out_of_memory(space_string);
3555 
3556   if (JvmtiExport::should_post_resource_exhausted()) {
3557     JvmtiExport::post_resource_exhausted(
3558         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


< prev index next >