< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page




 445 
 446   // In preparation for deleting this node, remove all the chunks
 447   // in the node from any freelist.
 448   void purge(ChunkManager* chunk_manager);
 449 
 450   // If an allocation doesn't fit in the current node a new node is created.
 451   // Allocate chunks out of the remaining committed space in this node
 452   // to avoid wasting that memory.
 453   // This always adds up because all the chunk sizes are multiples of
 454   // the smallest chunk size.
 455   void retire(ChunkManager* chunk_manager);
 456 
 457 #ifdef ASSERT
 458   // Debug support
 459   void mangle();
 460 #endif
 461 
 462   void print_on(outputStream* st) const;
 463 };
 464 
 465 #define assert_is_ptr_aligned(ptr, alignment) \
 466   assert(is_ptr_aligned(ptr, alignment),      \
 467          PTR_FORMAT " is not aligned to "     \
 468          SIZE_FORMAT, p2i(ptr), alignment)
 469 
 470 #define assert_is_size_aligned(size, alignment) \
 471   assert(is_size_aligned(size, alignment),      \
 472          SIZE_FORMAT " is not aligned to "      \
 473          SIZE_FORMAT, size, alignment)
 474 
 475 
 476 // Decide if large pages should be committed when the memory is reserved.
 477 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 478   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 479     size_t words = bytes / BytesPerWord;
 480     bool is_class = false; // We never reserve large pages for the class space.
 481     if (MetaspaceGC::can_expand(words, is_class) &&
 482         MetaspaceGC::allowed_expansion() >= words) {
 483       return true;
 484     }
 485   }
 486 
 487   return false;
 488 }
 489 
 490   // byte_size is the size of the associated virtualspace.
 491 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 492   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 493 
 494 #if INCLUDE_CDS
 495   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 496   // configurable address, generally at the top of the Java heap so other
 497   // memory addresses don't conflict.
 498   if (DumpSharedSpaces) {
 499     bool large_pages = false; // No large pages when dumping the CDS archive.
 500     char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 501 
 502     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 503     if (_rs.is_reserved()) {
 504       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 505     } else {
 506       // Get a mmap region anywhere if the SharedBaseAddress fails.
 507       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 508     }
 509     if (!_rs.is_reserved()) {
 510       vm_exit_during_initialization("Unable to allocate memory for shared space",
 511         err_msg(SIZE_FORMAT " bytes.", bytes));
 512     }
 513     MetaspaceShared::initialize_shared_rs(&_rs);
 514   } else
 515 #endif
 516   {
 517     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 518 
 519     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 520   }
 521 
 522   if (_rs.is_reserved()) {
 523     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 524     assert(_rs.size() != 0, "Catch if we get a 0 size");
 525     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 526     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 527 
 528     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 529   }
 530 }
 531 
 532 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 533   Metachunk* chunk = first_chunk();
 534   Metachunk* invalid_chunk = (Metachunk*) top();
 535   while (chunk < invalid_chunk ) {
 536     assert(chunk->is_tagged_free(), "Should be tagged free");
 537     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 538     chunk_manager->remove_chunk(chunk);
 539     assert(chunk->next() == NULL &&
 540            chunk->prev() == NULL,
 541            "Was not removed from its list");
 542     chunk = (Metachunk*) next;
 543   }
 544 }
 545 
 546 #ifdef ASSERT


 846   void track_metaspace_memory_usage();
 847 
 848   // debugging support.
 849 
 850   void dump(outputStream* const out) const;
 851   void print_on(outputStream* st) const;
 852   void locked_print_chunks_in_use_on(outputStream* st) const;
 853 
 854   void verify();
 855   void verify_chunk_size(Metachunk* chunk);
 856 #ifdef ASSERT
 857   void verify_allocated_blocks_words();
 858 #endif
 859 
 860   // This adjusts the size given to be greater than the minimum allocation size in
 861   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 862   size_t get_allocation_word_size(size_t word_size) {
 863     size_t byte_size = word_size * BytesPerWord;
 864 
 865     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 866     raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
 867 
 868     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 869     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 870 
 871     return raw_word_size;
 872   }
 873 };
 874 
 875 uint const SpaceManager::_small_chunk_limit = 4;
 876 
 877 const char* SpaceManager::_expand_lock_name =
 878   "SpaceManager chunk allocation lock";
 879 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 880 Mutex* const SpaceManager::_expand_lock =
 881   new Mutex(SpaceManager::_expand_lock_rank,
 882             SpaceManager::_expand_lock_name,
 883             Mutex::_allow_vm_block_flag,
 884             Monitor::_safepoint_check_never);
 885 
 886 void VirtualSpaceNode::inc_container_count() {


1051 }
1052 
1053 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1054   assert_lock_strong(SpaceManager::expand_lock());
1055   Metachunk* result = take_from_committed(chunk_word_size);
1056   if (result != NULL) {
1057     inc_container_count();
1058   }
1059   return result;
1060 }
1061 
1062 bool VirtualSpaceNode::initialize() {
1063 
1064   if (!_rs.is_reserved()) {
1065     return false;
1066   }
1067 
1068   // These are necessary restriction to make sure that the virtual space always
1069   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1070   // aligned only the middle alignment of the VirtualSpace is used.
1071   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
1072   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
1073 
1074   // ReservedSpaces marked as special will have the entire memory
1075   // pre-committed. Setting a committed size will make sure that
1076   // committed_size and actual_committed_size agrees.
1077   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1078 
1079   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1080                                             Metaspace::commit_alignment());
1081   if (result) {
1082     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1083         "Checking that the pre-committed memory was registered by the VirtualSpace");
1084 
1085     set_top((MetaWord*)virtual_space()->low());
1086     set_reserved(MemRegion((HeapWord*)_rs.base(),
1087                  (HeapWord*)(_rs.base() + _rs.size())));
1088 
1089     assert(reserved()->start() == (HeapWord*) _rs.base(),
1090            "Reserved start was not set properly " PTR_FORMAT
1091            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1092     assert(reserved()->word_size() == _rs.size() / BytesPerWord,


1306 }
1307 
1308 // Allocate another meta virtual space and add it to the list.
1309 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1310   assert_lock_strong(SpaceManager::expand_lock());
1311 
1312   if (is_class()) {
1313     assert(false, "We currently don't support more than one VirtualSpace for"
1314                   " the compressed class space. The initialization of the"
1315                   " CCS uses another code path and should not hit this path.");
1316     return false;
1317   }
1318 
1319   if (vs_word_size == 0) {
1320     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1321     return false;
1322   }
1323 
1324   // Reserve the space
1325   size_t vs_byte_size = vs_word_size * BytesPerWord;
1326   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1327 
1328   // Allocate the meta virtual space and initialize it.
1329   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1330   if (!new_entry->initialize()) {
1331     delete new_entry;
1332     return false;
1333   } else {
1334     assert(new_entry->reserved_words() == vs_word_size,
1335         "Reserved memory size differs from requested memory size");
1336     // ensure lock-free iteration sees fully initialized node
1337     OrderAccess::storestore();
1338     link_vs(new_entry);
1339     return true;
1340   }
1341 }
1342 
1343 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1344   if (virtual_space_list() == NULL) {
1345       set_virtual_space_list(new_entry);
1346   } else {


1361   }
1362 }
1363 
1364 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1365                                       size_t min_words,
1366                                       size_t preferred_words) {
1367   size_t before = node->committed_words();
1368 
1369   bool result = node->expand_by(min_words, preferred_words);
1370 
1371   size_t after = node->committed_words();
1372 
1373   // after and before can be the same if the memory was pre-committed.
1374   assert(after >= before, "Inconsistency");
1375   inc_committed_words(after - before);
1376 
1377   return result;
1378 }
1379 
1380 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1381   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
1382   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1383   assert(min_words <= preferred_words, "Invalid arguments");
1384 
1385   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1386     return  false;
1387   }
1388 
1389   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1390   if (allowed_expansion_words < min_words) {
1391     return false;
1392   }
1393 
1394   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1395 
1396   // Commit more memory from the the current virtual space.
1397   bool vs_expanded = expand_node_by(current_virtual_space(),
1398                                     min_words,
1399                                     max_expansion_words);
1400   if (vs_expanded) {
1401     return true;
1402   }
1403   retire_current_virtual_space();
1404 
1405   // Get another virtual space.
1406   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1407   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1408 
1409   if (create_new_virtual_space(grow_vs_words)) {
1410     if (current_virtual_space()->is_pre_committed()) {
1411       // The memory was pre-committed, so we are done here.
1412       assert(min_words <= current_virtual_space()->committed_words(),
1413           "The new VirtualSpace was pre-committed, so it"
1414           "should be large enough to fit the alloc request.");
1415       return true;
1416     }
1417 
1418     return expand_node_by(current_virtual_space(),
1419                           min_words,
1420                           max_expansion_words);
1421   }
1422 
1423   return false;
1424 }
1425 
1426 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1427 
1428   // Allocate a chunk out of the current virtual space.
1429   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1430 
1431   if (next != NULL) {
1432     return next;
1433   }
1434 
1435   // The expand amount is currently only determined by the requested sizes
1436   // and not how much committed memory is left in the current virtual space.
1437 
1438   size_t min_word_size       = align_size_up(chunk_word_size,              Metaspace::commit_alignment_words());
1439   size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1440   if (min_word_size >= preferred_word_size) {
1441     // Can happen when humongous chunks are allocated.
1442     preferred_word_size = min_word_size;
1443   }
1444 
1445   bool expanded = expand_by(min_word_size, preferred_word_size);
1446   if (expanded) {
1447     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1448     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1449   }
1450 
1451    return next;
1452 }
1453 
1454 void VirtualSpaceList::print_on(outputStream* st) const {
1455   VirtualSpaceListIterator iter(virtual_space_list());
1456   while (iter.repeat()) {
1457     VirtualSpaceNode* node = iter.get_next();
1458     node->print_on(st);
1459   }


1471 //
1472 // After the GC the compute_new_size() for MetaspaceGC is called to
1473 // resize the capacity of the metaspaces.  The current implementation
1474 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1475 // to resize the Java heap by some GC's.  New flags can be implemented
1476 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1477 // free space is desirable in the metaspace capacity to decide how much
1478 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1479 // free space is desirable in the metaspace capacity before decreasing
1480 // the HWM.
1481 
1482 // Calculate the amount to increase the high water mark (HWM).
1483 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1484 // another expansion is not requested too soon.  If that is not
1485 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1486 // If that is still not enough, expand by the size of the allocation
1487 // plus some.
1488 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1489   size_t min_delta = MinMetaspaceExpansion;
1490   size_t max_delta = MaxMetaspaceExpansion;
1491   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1492 
1493   if (delta <= min_delta) {
1494     delta = min_delta;
1495   } else if (delta <= max_delta) {
1496     // Don't want to hit the high water mark on the next
1497     // allocation so make the delta greater than just enough
1498     // for this allocation.
1499     delta = max_delta;
1500   } else {
1501     // This allocation is large but the next ones are probably not
1502     // so increase by the minimum.
1503     delta = delta + min_delta;
1504   }
1505 
1506   assert_is_size_aligned(delta, Metaspace::commit_alignment());
1507 
1508   return delta;
1509 }
1510 
1511 size_t MetaspaceGC::capacity_until_GC() {
1512   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1513   assert(value >= MetaspaceSize, "Not initialized properly?");
1514   return value;
1515 }
1516 
1517 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1518   assert_is_size_aligned(v, Metaspace::commit_alignment());
1519 
1520   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1521   size_t new_value = capacity_until_GC + v;
1522 
1523   if (new_value < capacity_until_GC) {
1524     // The addition wrapped around, set new_value to aligned max value.
1525     new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1526   }
1527 
1528   intptr_t expected = (intptr_t) capacity_until_GC;
1529   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1530 
1531   if (expected != actual) {
1532     return false;
1533   }
1534 
1535   if (new_cap_until_GC != NULL) {
1536     *new_cap_until_GC = new_value;
1537   }
1538   if (old_cap_until_GC != NULL) {
1539     *old_cap_until_GC = capacity_until_GC;
1540   }
1541   return true;
1542 }
1543 
1544 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1545   assert_is_size_aligned(v, Metaspace::commit_alignment());
1546 
1547   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1548 }
1549 
1550 void MetaspaceGC::initialize() {
1551   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1552   // we can't do a GC during initialization.
1553   _capacity_until_GC = MaxMetaspaceSize;
1554 }
1555 
1556 void MetaspaceGC::post_initialize() {
1557   // Reset the high-water mark once the VM initialization is done.
1558   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1559 }
1560 
1561 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1562   // Check if the compressed class space is full.
1563   if (is_class && Metaspace::using_class_space()) {
1564     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1565     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {


1611   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1612 
1613   const double min_tmp = used_after_gc / maximum_used_percentage;
1614   size_t minimum_desired_capacity =
1615     (size_t)MIN2(min_tmp, double(max_uintx));
1616   // Don't shrink less than the initial generation size
1617   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1618                                   MetaspaceSize);
1619 
1620   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1621   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1622                            minimum_free_percentage, maximum_used_percentage);
1623   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1624 
1625 
1626   size_t shrink_bytes = 0;
1627   if (capacity_until_GC < minimum_desired_capacity) {
1628     // If we have less capacity below the metaspace HWM, then
1629     // increment the HWM.
1630     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1631     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1632     // Don't expand unless it's significant
1633     if (expand_bytes >= MinMetaspaceExpansion) {
1634       size_t new_capacity_until_GC = 0;
1635       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1636       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1637 
1638       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1639                                                new_capacity_until_GC,
1640                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1641       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1642                                minimum_desired_capacity / (double) K,
1643                                expand_bytes / (double) K,
1644                                MinMetaspaceExpansion / (double) K,
1645                                new_capacity_until_GC / (double) K);
1646     }
1647     return;
1648   }
1649 
1650   // No expansion, now see if we want to shrink
1651   // We would never want to shrink more than this


1664                                     MetaspaceSize);
1665     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1666                              maximum_free_percentage, minimum_used_percentage);
1667     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1668                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1669 
1670     assert(minimum_desired_capacity <= maximum_desired_capacity,
1671            "sanity check");
1672 
1673     if (capacity_until_GC > maximum_desired_capacity) {
1674       // Capacity too large, compute shrinking size
1675       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1676       // We don't want shrink all the way back to initSize if people call
1677       // System.gc(), because some programs do that between "phases" and then
1678       // we'd just have to grow the heap up again for the next phase.  So we
1679       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1680       // on the third call, and 100% by the fourth call.  But if we recompute
1681       // size without shrinking, it goes back to 0%.
1682       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1683 
1684       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1685 
1686       assert(shrink_bytes <= max_shrink_bytes,
1687              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1688              shrink_bytes, max_shrink_bytes);
1689       if (current_shrink_factor == 0) {
1690         _shrink_factor = 10;
1691       } else {
1692         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1693       }
1694       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1695                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1696       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1697                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1698     }
1699   }
1700 
1701   // Don't shrink unless it's significant
1702   if (shrink_bytes >= MinMetaspaceExpansion &&
1703       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1704     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);


2223 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2224 
2225   // Decide between a small chunk and a medium chunk.  Up to
2226   // _small_chunk_limit small chunks can be allocated.
2227   // After that a medium chunk is preferred.
2228   size_t chunk_word_size;
2229   if (chunks_in_use(MediumIndex) == NULL &&
2230       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2231     chunk_word_size = (size_t) small_chunk_size();
2232     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2233       chunk_word_size = medium_chunk_size();
2234     }
2235   } else {
2236     chunk_word_size = medium_chunk_size();
2237   }
2238 
2239   // Might still need a humongous chunk.  Enforce
2240   // humongous allocations sizes to be aligned up to
2241   // the smallest chunk size.
2242   size_t if_humongous_sized_chunk =
2243     align_size_up(word_size + Metachunk::overhead(),
2244                   smallest_chunk_size());
2245   chunk_word_size =
2246     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2247 
2248   assert(!SpaceManager::is_humongous(word_size) ||
2249          chunk_word_size == if_humongous_sized_chunk,
2250          "Size calculation is wrong, word_size " SIZE_FORMAT
2251          " chunk_word_size " SIZE_FORMAT,
2252          word_size, chunk_word_size);
2253   Log(gc, metaspace, alloc) log;
2254   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2255     log.debug("Metadata humongous allocation:");
2256     log.debug("  word_size " PTR_FORMAT, word_size);
2257     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2258     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2259   }
2260   return chunk_word_size;
2261 }
2262 
2263 void SpaceManager::track_metaspace_memory_usage() {


3082 
3083 #if INCLUDE_CDS
3084 // Return TRUE if the specified metaspace_base and cds_base are close enough
3085 // to work with compressed klass pointers.
3086 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3087   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3088   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3089   address lower_base = MIN2((address)metaspace_base, cds_base);
3090   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3091                                 (address)(metaspace_base + compressed_class_space_size()));
3092   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3093 }
3094 #endif
3095 
3096 // Try to allocate the metaspace at the requested addr.
3097 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3098   assert(using_class_space(), "called improperly");
3099   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3100   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3101          "Metaspace size is too big");
3102   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3103   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3104   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3105 
3106   // Don't use large pages for the class space.
3107   bool large_pages = false;
3108 
3109 #if !(defined(AARCH64) || defined(AIX))
3110   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3111                                              _reserve_alignment,
3112                                              large_pages,
3113                                              requested_addr);
3114 #else // AARCH64
3115   ReservedSpace metaspace_rs;
3116 
3117   // Our compressed klass pointers may fit nicely into the lower 32
3118   // bits.
3119   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3120     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3121                                  _reserve_alignment,
3122                                  large_pages,
3123                                  requested_addr);
3124   }
3125 
3126   if (! metaspace_rs.is_reserved()) {
3127     // Aarch64: Try to align metaspace so that we can decode a compressed
3128     // klass with a single MOVK instruction.  We can do this iff the
3129     // compressed class base is a multiple of 4G.
3130     // Aix: Search for a place where we can find memory. If we need to load
3131     // the base, 4G alignment is helpful, too.
3132     size_t increment = AARCH64_ONLY(4*)G;
3133     for (char *a = align_ptr_up(requested_addr, increment);
3134          a < (char*)(1024*G);
3135          a += increment) {
3136       if (a == (char *)(32*G)) {
3137         // Go faster from here on. Zero-based is no longer possible.
3138         increment = 4*G;
3139       }
3140 
3141 #if INCLUDE_CDS
3142       if (UseSharedSpaces
3143           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3144         // We failed to find an aligned base that will reach.  Fall
3145         // back to using our requested addr.
3146         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3147                                      _reserve_alignment,
3148                                      large_pages,
3149                                      requested_addr);
3150         break;
3151       }
3152 #endif
3153 
3154       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3155                                    _reserve_alignment,
3156                                    large_pages,
3157                                    a);
3158       if (metaspace_rs.is_reserved())
3159         break;
3160     }
3161   }
3162 
3163 #endif // AARCH64
3164 
3165   if (!metaspace_rs.is_reserved()) {
3166 #if INCLUDE_CDS
3167     if (UseSharedSpaces) {
3168       size_t increment = align_size_up(1*G, _reserve_alignment);
3169 
3170       // Keep trying to allocate the metaspace, increasing the requested_addr
3171       // by 1GB each time, until we reach an address that will no longer allow
3172       // use of CDS with compressed klass pointers.
3173       char *addr = requested_addr;
3174       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3175              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3176         addr = addr + increment;
3177         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3178                                      _reserve_alignment, large_pages, addr);
3179       }
3180     }
3181 #endif
3182     // If no successful allocation then try to allocate the space anywhere.  If
3183     // that fails then OOM doom.  At this point we cannot try allocating the
3184     // metaspace as if UseCompressedClassPointers is off because too much
3185     // initialization has happened that depends on UseCompressedClassPointers.
3186     // So, UseCompressedClassPointers cannot be turned off at this point.
3187     if (!metaspace_rs.is_reserved()) {
3188       metaspace_rs = ReservedSpace(compressed_class_space_size(),


3252     // Using large pages when dumping the shared archive is currently not implemented.
3253     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3254   }
3255 
3256   size_t page_size = os::vm_page_size();
3257   if (UseLargePages && UseLargePagesInMetaspace) {
3258     page_size = os::large_page_size();
3259   }
3260 
3261   _commit_alignment  = page_size;
3262   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3263 
3264   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3265   // override if MaxMetaspaceSize was set on the command line or not.
3266   // This information is needed later to conform to the specification of the
3267   // java.lang.management.MemoryUsage API.
3268   //
3269   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3270   // globals.hpp to the aligned value, but this is not possible, since the
3271   // alignment depends on other flags being parsed.
3272   MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3273 
3274   if (MetaspaceSize > MaxMetaspaceSize) {
3275     MetaspaceSize = MaxMetaspaceSize;
3276   }
3277 
3278   MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3279 
3280   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3281 
3282   MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3283   MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3284 
3285   CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3286   set_compressed_class_space_size(CompressedClassSpaceSize);
3287 }
3288 
3289 void Metaspace::global_initialize() {
3290   MetaspaceGC::initialize();
3291 
3292   // Initialize the alignment for shared spaces.
3293   int max_alignment = os::vm_allocation_granularity();
3294   size_t cds_total = 0;
3295 
3296   MetaspaceShared::set_max_alignment(max_alignment);
3297 
3298   if (DumpSharedSpaces) {
3299 #if INCLUDE_CDS
3300     MetaspaceShared::estimate_regions_size();
3301 
3302     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
3303     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3304     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
3305     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
3306 
3307     // Initialize with the sum of the shared space sizes.  The read-only
3308     // and read write metaspace chunks will be allocated out of this and the
3309     // remainder is the misc code and data chunks.
3310     cds_total = FileMapInfo::shared_spaces_size();
3311     cds_total = align_size_up(cds_total, _reserve_alignment);
3312     _space_list = new VirtualSpaceList(cds_total/wordSize);
3313     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3314 
3315     if (!_space_list->initialization_succeeded()) {
3316       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3317     }
3318 
3319 #ifdef _LP64
3320     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3321       vm_exit_during_initialization("Unable to dump shared archive.",
3322           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3323                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3324                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3325                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3326     }
3327 
3328     // Set the compressed klass pointer base so that decoding of these pointers works
3329     // properly when creating the shared archive.
3330     assert(UseCompressedOops && UseCompressedClassPointers,
3331       "UseCompressedOops and UseCompressedClassPointers must be set");


3338 #endif // INCLUDE_CDS
3339   } else {
3340 #if INCLUDE_CDS
3341     if (UseSharedSpaces) {
3342       // If using shared space, open the file that contains the shared space
3343       // and map in the memory before initializing the rest of metaspace (so
3344       // the addresses don't conflict)
3345       address cds_address = NULL;
3346       FileMapInfo* mapinfo = new FileMapInfo();
3347 
3348       // Open the shared archive file, read and validate the header. If
3349       // initialization fails, shared spaces [UseSharedSpaces] are
3350       // disabled and the file is closed.
3351       // Map in spaces now also
3352       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3353         cds_total = FileMapInfo::shared_spaces_size();
3354         cds_address = (address)mapinfo->header()->region_addr(0);
3355 #ifdef _LP64
3356         if (using_class_space()) {
3357           char* cds_end = (char*)(cds_address + cds_total);
3358           cds_end = align_ptr_up(cds_end, _reserve_alignment);
3359           // If UseCompressedClassPointers is set then allocate the metaspace area
3360           // above the heap and above the CDS area (if it exists).
3361           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3362           // Map the shared string space after compressed pointers
3363           // because it relies on compressed class pointers setting to work
3364           mapinfo->map_string_regions();
3365         }
3366 #endif // _LP64
3367       } else {
3368         assert(!mapinfo->is_open() && !UseSharedSpaces,
3369                "archive file not closed or shared spaces not disabled.");
3370       }
3371     }
3372 #endif // INCLUDE_CDS
3373 
3374 #ifdef _LP64
3375     if (!UseSharedSpaces && using_class_space()) {
3376       char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3377       allocate_metaspace_compressed_klass_ptrs(base, 0);
3378     }
3379 #endif // _LP64
3380 
3381     // Initialize these before initializing the VirtualSpaceList
3382     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3383     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3384     // Make the first class chunk bigger than a medium chunk so it's not put
3385     // on the medium chunk list.   The next chunk will be small and progress
3386     // from there.  This size calculated by -version.
3387     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3388                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3389     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3390     // Arbitrarily set the initial virtual space to a multiple
3391     // of the boot class loader size.
3392     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3393     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3394 
3395     // Initialize the list of virtual spaces.
3396     _space_list = new VirtualSpaceList(word_size);
3397     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3398 
3399     if (!_space_list->initialization_succeeded()) {
3400       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3401     }
3402   }
3403 
3404   _tracer = new MetaspaceTracer();
3405 }
3406 
3407 void Metaspace::post_initialize() {
3408   MetaspaceGC::post_initialize();
3409 }
3410 
3411 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3412   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3413   if (chunk != NULL) {


3455     // Allocate SpaceManager for classes.
3456     _class_vsm = new SpaceManager(ClassType, lock);
3457   }
3458 
3459   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3460 
3461   // Allocate chunk for metadata objects
3462   initialize_first_chunk(type, NonClassType);
3463 
3464   // Allocate chunk for class metadata objects
3465   if (using_class_space()) {
3466     initialize_first_chunk(type, ClassType);
3467   }
3468 
3469   _alloc_record_head = NULL;
3470   _alloc_record_tail = NULL;
3471 }
3472 
3473 size_t Metaspace::align_word_size_up(size_t word_size) {
3474   size_t byte_size = word_size * wordSize;
3475   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3476 }
3477 
3478 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3479   // DumpSharedSpaces doesn't use class metadata area (yet)
3480   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3481   if (is_class_space_allocation(mdtype)) {
3482     return  class_vsm()->allocate(word_size);
3483   } else {
3484     return  vsm()->allocate(word_size);
3485   }
3486 }
3487 
3488 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3489   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3490   assert(delta_bytes > 0, "Must be");
3491 
3492   size_t before = 0;
3493   size_t after = 0;
3494   MetaWord* res;
3495   bool incremented;


4126   static int get_random_position() {
4127     return os::random() % num_chunks;
4128   }
4129 
4130   // Asserts that ChunkManager counters match expectations.
4131   void assert_counters() {
4132     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4133     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4134     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4135   }
4136 
4137   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4138   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4139   size_t get_random_chunk_size() {
4140     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4141     const int rand = os::random() % 4;
4142     if (rand < 3) {
4143       return sizes[rand];
4144     } else {
4145       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4146       return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4147     }
4148   }
4149 
4150   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4151   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4152   int next_matching_chunk(int start, bool is_free) const {
4153     assert(start >= 0 && start < num_chunks, "invalid parameter");
4154     int pos = start;
4155     do {
4156       if (++pos == num_chunks) {
4157         pos = 0;
4158       }
4159       if (_pool[pos]->is_tagged_free() == is_free) {
4160         return pos;
4161       }
4162     } while (pos != start);
4163     return -1;
4164   }
4165 
4166   // A structure to keep information about a chunk list including which


4273     }
4274     // Before returning chunks are returned, they should be tagged in use.
4275     for (int i = 0; i < aChunkList.num; i ++) {
4276       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4277     }
4278     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4279     _chunks_in_chunkmanager += aChunkList.num;
4280     _words_in_chunkmanager += aChunkList.size;
4281     // After all chunks are returned, check that they are now tagged free.
4282     for (int i = 0; i < aChunkList.num; i ++) {
4283       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4284     }
4285     assert_counters();
4286     _cm.locked_verify();
4287     return aChunkList.num;
4288   }
4289 
4290 public:
4291 
4292   ChunkManagerReturnTestImpl()
4293     : _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4294     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4295     , _chunks_in_chunkmanager(0)
4296     , _words_in_chunkmanager(0)
4297   {
4298     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4299     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4300     // "in use", because not yet added to any chunk manager.
4301     _vsn.initialize();
4302     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4303     for (int i = 0; i < num_chunks; i ++) {
4304       const size_t size = get_random_chunk_size();
4305       _pool[i] = _vsn.get_chunk_vs(size);
4306       assert(_pool[i] != NULL, "allocation failed");
4307     }
4308     assert_counters();
4309     _cm.locked_verify();
4310   }
4311 
4312   // Test entry point.
4313   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.




 445 
 446   // In preparation for deleting this node, remove all the chunks
 447   // in the node from any freelist.
 448   void purge(ChunkManager* chunk_manager);
 449 
 450   // If an allocation doesn't fit in the current node a new node is created.
 451   // Allocate chunks out of the remaining committed space in this node
 452   // to avoid wasting that memory.
 453   // This always adds up because all the chunk sizes are multiples of
 454   // the smallest chunk size.
 455   void retire(ChunkManager* chunk_manager);
 456 
 457 #ifdef ASSERT
 458   // Debug support
 459   void mangle();
 460 #endif
 461 
 462   void print_on(outputStream* st) const;
 463 };
 464 
 465 #define assert_is_aligned(value, alignment)                  \
 466   assert(is_aligned((value), (alignment)),                   \
 467          SIZE_FORMAT_HEX " is not aligned to "               \
 468          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))






 469 
 470 // Decide if large pages should be committed when the memory is reserved.
 471 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 472   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 473     size_t words = bytes / BytesPerWord;
 474     bool is_class = false; // We never reserve large pages for the class space.
 475     if (MetaspaceGC::can_expand(words, is_class) &&
 476         MetaspaceGC::allowed_expansion() >= words) {
 477       return true;
 478     }
 479   }
 480 
 481   return false;
 482 }
 483 
 484   // byte_size is the size of the associated virtualspace.
 485 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 486   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 487 
 488 #if INCLUDE_CDS
 489   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 490   // configurable address, generally at the top of the Java heap so other
 491   // memory addresses don't conflict.
 492   if (DumpSharedSpaces) {
 493     bool large_pages = false; // No large pages when dumping the CDS archive.
 494     char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 495 
 496     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 497     if (_rs.is_reserved()) {
 498       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 499     } else {
 500       // Get a mmap region anywhere if the SharedBaseAddress fails.
 501       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 502     }
 503     if (!_rs.is_reserved()) {
 504       vm_exit_during_initialization("Unable to allocate memory for shared space",
 505         err_msg(SIZE_FORMAT " bytes.", bytes));
 506     }
 507     MetaspaceShared::initialize_shared_rs(&_rs);
 508   } else
 509 #endif
 510   {
 511     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 512 
 513     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 514   }
 515 
 516   if (_rs.is_reserved()) {
 517     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 518     assert(_rs.size() != 0, "Catch if we get a 0 size");
 519     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 520     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 521 
 522     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 523   }
 524 }
 525 
 526 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 527   Metachunk* chunk = first_chunk();
 528   Metachunk* invalid_chunk = (Metachunk*) top();
 529   while (chunk < invalid_chunk ) {
 530     assert(chunk->is_tagged_free(), "Should be tagged free");
 531     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 532     chunk_manager->remove_chunk(chunk);
 533     assert(chunk->next() == NULL &&
 534            chunk->prev() == NULL,
 535            "Was not removed from its list");
 536     chunk = (Metachunk*) next;
 537   }
 538 }
 539 
 540 #ifdef ASSERT


 840   void track_metaspace_memory_usage();
 841 
 842   // debugging support.
 843 
 844   void dump(outputStream* const out) const;
 845   void print_on(outputStream* st) const;
 846   void locked_print_chunks_in_use_on(outputStream* st) const;
 847 
 848   void verify();
 849   void verify_chunk_size(Metachunk* chunk);
 850 #ifdef ASSERT
 851   void verify_allocated_blocks_words();
 852 #endif
 853 
 854   // This adjusts the size given to be greater than the minimum allocation size in
 855   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
 856   size_t get_allocation_word_size(size_t word_size) {
 857     size_t byte_size = word_size * BytesPerWord;
 858 
 859     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
 860     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 861 
 862     size_t raw_word_size = raw_bytes_size / BytesPerWord;
 863     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 864 
 865     return raw_word_size;
 866   }
 867 };
 868 
 869 uint const SpaceManager::_small_chunk_limit = 4;
 870 
 871 const char* SpaceManager::_expand_lock_name =
 872   "SpaceManager chunk allocation lock";
 873 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 874 Mutex* const SpaceManager::_expand_lock =
 875   new Mutex(SpaceManager::_expand_lock_rank,
 876             SpaceManager::_expand_lock_name,
 877             Mutex::_allow_vm_block_flag,
 878             Monitor::_safepoint_check_never);
 879 
 880 void VirtualSpaceNode::inc_container_count() {


1045 }
1046 
1047 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1048   assert_lock_strong(SpaceManager::expand_lock());
1049   Metachunk* result = take_from_committed(chunk_word_size);
1050   if (result != NULL) {
1051     inc_container_count();
1052   }
1053   return result;
1054 }
1055 
1056 bool VirtualSpaceNode::initialize() {
1057 
1058   if (!_rs.is_reserved()) {
1059     return false;
1060   }
1061 
1062   // These are necessary restriction to make sure that the virtual space always
1063   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1064   // aligned only the middle alignment of the VirtualSpace is used.
1065   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1066   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1067 
1068   // ReservedSpaces marked as special will have the entire memory
1069   // pre-committed. Setting a committed size will make sure that
1070   // committed_size and actual_committed_size agrees.
1071   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1072 
1073   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1074                                             Metaspace::commit_alignment());
1075   if (result) {
1076     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1077         "Checking that the pre-committed memory was registered by the VirtualSpace");
1078 
1079     set_top((MetaWord*)virtual_space()->low());
1080     set_reserved(MemRegion((HeapWord*)_rs.base(),
1081                  (HeapWord*)(_rs.base() + _rs.size())));
1082 
1083     assert(reserved()->start() == (HeapWord*) _rs.base(),
1084            "Reserved start was not set properly " PTR_FORMAT
1085            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1086     assert(reserved()->word_size() == _rs.size() / BytesPerWord,


1300 }
1301 
1302 // Allocate another meta virtual space and add it to the list.
1303 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1304   assert_lock_strong(SpaceManager::expand_lock());
1305 
1306   if (is_class()) {
1307     assert(false, "We currently don't support more than one VirtualSpace for"
1308                   " the compressed class space. The initialization of the"
1309                   " CCS uses another code path and should not hit this path.");
1310     return false;
1311   }
1312 
1313   if (vs_word_size == 0) {
1314     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1315     return false;
1316   }
1317 
1318   // Reserve the space
1319   size_t vs_byte_size = vs_word_size * BytesPerWord;
1320   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1321 
1322   // Allocate the meta virtual space and initialize it.
1323   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1324   if (!new_entry->initialize()) {
1325     delete new_entry;
1326     return false;
1327   } else {
1328     assert(new_entry->reserved_words() == vs_word_size,
1329         "Reserved memory size differs from requested memory size");
1330     // ensure lock-free iteration sees fully initialized node
1331     OrderAccess::storestore();
1332     link_vs(new_entry);
1333     return true;
1334   }
1335 }
1336 
1337 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1338   if (virtual_space_list() == NULL) {
1339       set_virtual_space_list(new_entry);
1340   } else {


1355   }
1356 }
1357 
1358 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1359                                       size_t min_words,
1360                                       size_t preferred_words) {
1361   size_t before = node->committed_words();
1362 
1363   bool result = node->expand_by(min_words, preferred_words);
1364 
1365   size_t after = node->committed_words();
1366 
1367   // after and before can be the same if the memory was pre-committed.
1368   assert(after >= before, "Inconsistency");
1369   inc_committed_words(after - before);
1370 
1371   return result;
1372 }
1373 
1374 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1375   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
1376   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1377   assert(min_words <= preferred_words, "Invalid arguments");
1378 
1379   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1380     return  false;
1381   }
1382 
1383   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1384   if (allowed_expansion_words < min_words) {
1385     return false;
1386   }
1387 
1388   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1389 
1390   // Commit more memory from the the current virtual space.
1391   bool vs_expanded = expand_node_by(current_virtual_space(),
1392                                     min_words,
1393                                     max_expansion_words);
1394   if (vs_expanded) {
1395     return true;
1396   }
1397   retire_current_virtual_space();
1398 
1399   // Get another virtual space.
1400   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1401   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1402 
1403   if (create_new_virtual_space(grow_vs_words)) {
1404     if (current_virtual_space()->is_pre_committed()) {
1405       // The memory was pre-committed, so we are done here.
1406       assert(min_words <= current_virtual_space()->committed_words(),
1407           "The new VirtualSpace was pre-committed, so it"
1408           "should be large enough to fit the alloc request.");
1409       return true;
1410     }
1411 
1412     return expand_node_by(current_virtual_space(),
1413                           min_words,
1414                           max_expansion_words);
1415   }
1416 
1417   return false;
1418 }
1419 
1420 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1421 
1422   // Allocate a chunk out of the current virtual space.
1423   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1424 
1425   if (next != NULL) {
1426     return next;
1427   }
1428 
1429   // The expand amount is currently only determined by the requested sizes
1430   // and not how much committed memory is left in the current virtual space.
1431 
1432   size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
1433   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1434   if (min_word_size >= preferred_word_size) {
1435     // Can happen when humongous chunks are allocated.
1436     preferred_word_size = min_word_size;
1437   }
1438 
1439   bool expanded = expand_by(min_word_size, preferred_word_size);
1440   if (expanded) {
1441     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1442     assert(next != NULL, "The allocation was expected to succeed after the expansion");
1443   }
1444 
1445    return next;
1446 }
1447 
1448 void VirtualSpaceList::print_on(outputStream* st) const {
1449   VirtualSpaceListIterator iter(virtual_space_list());
1450   while (iter.repeat()) {
1451     VirtualSpaceNode* node = iter.get_next();
1452     node->print_on(st);
1453   }


1465 //
1466 // After the GC the compute_new_size() for MetaspaceGC is called to
1467 // resize the capacity of the metaspaces.  The current implementation
1468 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1469 // to resize the Java heap by some GC's.  New flags can be implemented
1470 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1471 // free space is desirable in the metaspace capacity to decide how much
1472 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1473 // free space is desirable in the metaspace capacity before decreasing
1474 // the HWM.
1475 
1476 // Calculate the amount to increase the high water mark (HWM).
1477 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1478 // another expansion is not requested too soon.  If that is not
1479 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1480 // If that is still not enough, expand by the size of the allocation
1481 // plus some.
1482 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1483   size_t min_delta = MinMetaspaceExpansion;
1484   size_t max_delta = MaxMetaspaceExpansion;
1485   size_t delta = align_up(bytes, Metaspace::commit_alignment());
1486 
1487   if (delta <= min_delta) {
1488     delta = min_delta;
1489   } else if (delta <= max_delta) {
1490     // Don't want to hit the high water mark on the next
1491     // allocation so make the delta greater than just enough
1492     // for this allocation.
1493     delta = max_delta;
1494   } else {
1495     // This allocation is large but the next ones are probably not
1496     // so increase by the minimum.
1497     delta = delta + min_delta;
1498   }
1499 
1500   assert_is_aligned(delta, Metaspace::commit_alignment());
1501 
1502   return delta;
1503 }
1504 
1505 size_t MetaspaceGC::capacity_until_GC() {
1506   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1507   assert(value >= MetaspaceSize, "Not initialized properly?");
1508   return value;
1509 }
1510 
1511 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1512   assert_is_aligned(v, Metaspace::commit_alignment());
1513 
1514   size_t capacity_until_GC = (size_t) _capacity_until_GC;
1515   size_t new_value = capacity_until_GC + v;
1516 
1517   if (new_value < capacity_until_GC) {
1518     // The addition wrapped around, set new_value to aligned max value.
1519     new_value = align_down(max_uintx, Metaspace::commit_alignment());
1520   }
1521 
1522   intptr_t expected = (intptr_t) capacity_until_GC;
1523   intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1524 
1525   if (expected != actual) {
1526     return false;
1527   }
1528 
1529   if (new_cap_until_GC != NULL) {
1530     *new_cap_until_GC = new_value;
1531   }
1532   if (old_cap_until_GC != NULL) {
1533     *old_cap_until_GC = capacity_until_GC;
1534   }
1535   return true;
1536 }
1537 
1538 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1539   assert_is_aligned(v, Metaspace::commit_alignment());
1540 
1541   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1542 }
1543 
1544 void MetaspaceGC::initialize() {
1545   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1546   // we can't do a GC during initialization.
1547   _capacity_until_GC = MaxMetaspaceSize;
1548 }
1549 
1550 void MetaspaceGC::post_initialize() {
1551   // Reset the high-water mark once the VM initialization is done.
1552   _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1553 }
1554 
1555 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1556   // Check if the compressed class space is full.
1557   if (is_class && Metaspace::using_class_space()) {
1558     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1559     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {


1605   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1606 
1607   const double min_tmp = used_after_gc / maximum_used_percentage;
1608   size_t minimum_desired_capacity =
1609     (size_t)MIN2(min_tmp, double(max_uintx));
1610   // Don't shrink less than the initial generation size
1611   minimum_desired_capacity = MAX2(minimum_desired_capacity,
1612                                   MetaspaceSize);
1613 
1614   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
1615   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
1616                            minimum_free_percentage, maximum_used_percentage);
1617   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
1618 
1619 
1620   size_t shrink_bytes = 0;
1621   if (capacity_until_GC < minimum_desired_capacity) {
1622     // If we have less capacity below the metaspace HWM, then
1623     // increment the HWM.
1624     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1625     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
1626     // Don't expand unless it's significant
1627     if (expand_bytes >= MinMetaspaceExpansion) {
1628       size_t new_capacity_until_GC = 0;
1629       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1630       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1631 
1632       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1633                                                new_capacity_until_GC,
1634                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
1635       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
1636                                minimum_desired_capacity / (double) K,
1637                                expand_bytes / (double) K,
1638                                MinMetaspaceExpansion / (double) K,
1639                                new_capacity_until_GC / (double) K);
1640     }
1641     return;
1642   }
1643 
1644   // No expansion, now see if we want to shrink
1645   // We would never want to shrink more than this


1658                                     MetaspaceSize);
1659     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
1660                              maximum_free_percentage, minimum_used_percentage);
1661     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
1662                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
1663 
1664     assert(minimum_desired_capacity <= maximum_desired_capacity,
1665            "sanity check");
1666 
1667     if (capacity_until_GC > maximum_desired_capacity) {
1668       // Capacity too large, compute shrinking size
1669       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1670       // We don't want shrink all the way back to initSize if people call
1671       // System.gc(), because some programs do that between "phases" and then
1672       // we'd just have to grow the heap up again for the next phase.  So we
1673       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1674       // on the third call, and 100% by the fourth call.  But if we recompute
1675       // size without shrinking, it goes back to 0%.
1676       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1677 
1678       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
1679 
1680       assert(shrink_bytes <= max_shrink_bytes,
1681              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1682              shrink_bytes, max_shrink_bytes);
1683       if (current_shrink_factor == 0) {
1684         _shrink_factor = 10;
1685       } else {
1686         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1687       }
1688       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
1689                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
1690       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
1691                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
1692     }
1693   }
1694 
1695   // Don't shrink unless it's significant
1696   if (shrink_bytes >= MinMetaspaceExpansion &&
1697       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1698     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);


2217 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2218 
2219   // Decide between a small chunk and a medium chunk.  Up to
2220   // _small_chunk_limit small chunks can be allocated.
2221   // After that a medium chunk is preferred.
2222   size_t chunk_word_size;
2223   if (chunks_in_use(MediumIndex) == NULL &&
2224       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2225     chunk_word_size = (size_t) small_chunk_size();
2226     if (word_size + Metachunk::overhead() > small_chunk_size()) {
2227       chunk_word_size = medium_chunk_size();
2228     }
2229   } else {
2230     chunk_word_size = medium_chunk_size();
2231   }
2232 
2233   // Might still need a humongous chunk.  Enforce
2234   // humongous allocations sizes to be aligned up to
2235   // the smallest chunk size.
2236   size_t if_humongous_sized_chunk =
2237     align_up(word_size + Metachunk::overhead(),
2238                   smallest_chunk_size());
2239   chunk_word_size =
2240     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2241 
2242   assert(!SpaceManager::is_humongous(word_size) ||
2243          chunk_word_size == if_humongous_sized_chunk,
2244          "Size calculation is wrong, word_size " SIZE_FORMAT
2245          " chunk_word_size " SIZE_FORMAT,
2246          word_size, chunk_word_size);
2247   Log(gc, metaspace, alloc) log;
2248   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
2249     log.debug("Metadata humongous allocation:");
2250     log.debug("  word_size " PTR_FORMAT, word_size);
2251     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
2252     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
2253   }
2254   return chunk_word_size;
2255 }
2256 
2257 void SpaceManager::track_metaspace_memory_usage() {


3076 
3077 #if INCLUDE_CDS
3078 // Return TRUE if the specified metaspace_base and cds_base are close enough
3079 // to work with compressed klass pointers.
3080 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3081   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3082   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3083   address lower_base = MIN2((address)metaspace_base, cds_base);
3084   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3085                                 (address)(metaspace_base + compressed_class_space_size()));
3086   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3087 }
3088 #endif
3089 
3090 // Try to allocate the metaspace at the requested addr.
3091 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3092   assert(using_class_space(), "called improperly");
3093   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3094   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3095          "Metaspace size is too big");
3096   assert_is_aligned(requested_addr, _reserve_alignment);
3097   assert_is_aligned(cds_base, _reserve_alignment);
3098   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
3099 
3100   // Don't use large pages for the class space.
3101   bool large_pages = false;
3102 
3103 #if !(defined(AARCH64) || defined(AIX))
3104   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3105                                              _reserve_alignment,
3106                                              large_pages,
3107                                              requested_addr);
3108 #else // AARCH64
3109   ReservedSpace metaspace_rs;
3110 
3111   // Our compressed klass pointers may fit nicely into the lower 32
3112   // bits.
3113   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3114     metaspace_rs = ReservedSpace(compressed_class_space_size(),
3115                                  _reserve_alignment,
3116                                  large_pages,
3117                                  requested_addr);
3118   }
3119 
3120   if (! metaspace_rs.is_reserved()) {
3121     // Aarch64: Try to align metaspace so that we can decode a compressed
3122     // klass with a single MOVK instruction.  We can do this iff the
3123     // compressed class base is a multiple of 4G.
3124     // Aix: Search for a place where we can find memory. If we need to load
3125     // the base, 4G alignment is helpful, too.
3126     size_t increment = AARCH64_ONLY(4*)G;
3127     for (char *a = align_up(requested_addr, increment);
3128          a < (char*)(1024*G);
3129          a += increment) {
3130       if (a == (char *)(32*G)) {
3131         // Go faster from here on. Zero-based is no longer possible.
3132         increment = 4*G;
3133       }
3134 
3135 #if INCLUDE_CDS
3136       if (UseSharedSpaces
3137           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3138         // We failed to find an aligned base that will reach.  Fall
3139         // back to using our requested addr.
3140         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3141                                      _reserve_alignment,
3142                                      large_pages,
3143                                      requested_addr);
3144         break;
3145       }
3146 #endif
3147 
3148       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3149                                    _reserve_alignment,
3150                                    large_pages,
3151                                    a);
3152       if (metaspace_rs.is_reserved())
3153         break;
3154     }
3155   }
3156 
3157 #endif // AARCH64
3158 
3159   if (!metaspace_rs.is_reserved()) {
3160 #if INCLUDE_CDS
3161     if (UseSharedSpaces) {
3162       size_t increment = align_up(1*G, _reserve_alignment);
3163 
3164       // Keep trying to allocate the metaspace, increasing the requested_addr
3165       // by 1GB each time, until we reach an address that will no longer allow
3166       // use of CDS with compressed klass pointers.
3167       char *addr = requested_addr;
3168       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3169              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3170         addr = addr + increment;
3171         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3172                                      _reserve_alignment, large_pages, addr);
3173       }
3174     }
3175 #endif
3176     // If no successful allocation then try to allocate the space anywhere.  If
3177     // that fails then OOM doom.  At this point we cannot try allocating the
3178     // metaspace as if UseCompressedClassPointers is off because too much
3179     // initialization has happened that depends on UseCompressedClassPointers.
3180     // So, UseCompressedClassPointers cannot be turned off at this point.
3181     if (!metaspace_rs.is_reserved()) {
3182       metaspace_rs = ReservedSpace(compressed_class_space_size(),


3246     // Using large pages when dumping the shared archive is currently not implemented.
3247     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3248   }
3249 
3250   size_t page_size = os::vm_page_size();
3251   if (UseLargePages && UseLargePagesInMetaspace) {
3252     page_size = os::large_page_size();
3253   }
3254 
3255   _commit_alignment  = page_size;
3256   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3257 
3258   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3259   // override if MaxMetaspaceSize was set on the command line or not.
3260   // This information is needed later to conform to the specification of the
3261   // java.lang.management.MemoryUsage API.
3262   //
3263   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3264   // globals.hpp to the aligned value, but this is not possible, since the
3265   // alignment depends on other flags being parsed.
3266   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3267 
3268   if (MetaspaceSize > MaxMetaspaceSize) {
3269     MetaspaceSize = MaxMetaspaceSize;
3270   }
3271 
3272   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
3273 
3274   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3275 
3276   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3277   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3278 
3279   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3280   set_compressed_class_space_size(CompressedClassSpaceSize);
3281 }
3282 
3283 void Metaspace::global_initialize() {
3284   MetaspaceGC::initialize();
3285 
3286   // Initialize the alignment for shared spaces.
3287   int max_alignment = os::vm_allocation_granularity();
3288   size_t cds_total = 0;
3289 
3290   MetaspaceShared::set_max_alignment(max_alignment);
3291 
3292   if (DumpSharedSpaces) {
3293 #if INCLUDE_CDS
3294     MetaspaceShared::estimate_regions_size();
3295 
3296     SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
3297     SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
3298     SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
3299     SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
3300 
3301     // Initialize with the sum of the shared space sizes.  The read-only
3302     // and read write metaspace chunks will be allocated out of this and the
3303     // remainder is the misc code and data chunks.
3304     cds_total = FileMapInfo::shared_spaces_size();
3305     cds_total = align_up(cds_total, _reserve_alignment);
3306     _space_list = new VirtualSpaceList(cds_total/wordSize);
3307     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3308 
3309     if (!_space_list->initialization_succeeded()) {
3310       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3311     }
3312 
3313 #ifdef _LP64
3314     if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3315       vm_exit_during_initialization("Unable to dump shared archive.",
3316           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3317                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3318                   "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3319                   cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3320     }
3321 
3322     // Set the compressed klass pointer base so that decoding of these pointers works
3323     // properly when creating the shared archive.
3324     assert(UseCompressedOops && UseCompressedClassPointers,
3325       "UseCompressedOops and UseCompressedClassPointers must be set");


3332 #endif // INCLUDE_CDS
3333   } else {
3334 #if INCLUDE_CDS
3335     if (UseSharedSpaces) {
3336       // If using shared space, open the file that contains the shared space
3337       // and map in the memory before initializing the rest of metaspace (so
3338       // the addresses don't conflict)
3339       address cds_address = NULL;
3340       FileMapInfo* mapinfo = new FileMapInfo();
3341 
3342       // Open the shared archive file, read and validate the header. If
3343       // initialization fails, shared spaces [UseSharedSpaces] are
3344       // disabled and the file is closed.
3345       // Map in spaces now also
3346       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3347         cds_total = FileMapInfo::shared_spaces_size();
3348         cds_address = (address)mapinfo->header()->region_addr(0);
3349 #ifdef _LP64
3350         if (using_class_space()) {
3351           char* cds_end = (char*)(cds_address + cds_total);
3352           cds_end = align_up(cds_end, _reserve_alignment);
3353           // If UseCompressedClassPointers is set then allocate the metaspace area
3354           // above the heap and above the CDS area (if it exists).
3355           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3356           // Map the shared string space after compressed pointers
3357           // because it relies on compressed class pointers setting to work
3358           mapinfo->map_string_regions();
3359         }
3360 #endif // _LP64
3361       } else {
3362         assert(!mapinfo->is_open() && !UseSharedSpaces,
3363                "archive file not closed or shared spaces not disabled.");
3364       }
3365     }
3366 #endif // INCLUDE_CDS
3367 
3368 #ifdef _LP64
3369     if (!UseSharedSpaces && using_class_space()) {
3370       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3371       allocate_metaspace_compressed_klass_ptrs(base, 0);
3372     }
3373 #endif // _LP64
3374 
3375     // Initialize these before initializing the VirtualSpaceList
3376     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3377     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3378     // Make the first class chunk bigger than a medium chunk so it's not put
3379     // on the medium chunk list.   The next chunk will be small and progress
3380     // from there.  This size calculated by -version.
3381     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3382                                        (CompressedClassSpaceSize/BytesPerWord)*2);
3383     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3384     // Arbitrarily set the initial virtual space to a multiple
3385     // of the boot class loader size.
3386     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3387     word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3388 
3389     // Initialize the list of virtual spaces.
3390     _space_list = new VirtualSpaceList(word_size);
3391     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3392 
3393     if (!_space_list->initialization_succeeded()) {
3394       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3395     }
3396   }
3397 
3398   _tracer = new MetaspaceTracer();
3399 }
3400 
3401 void Metaspace::post_initialize() {
3402   MetaspaceGC::post_initialize();
3403 }
3404 
3405 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3406   Metachunk* chunk = get_initialization_chunk(type, mdtype);
3407   if (chunk != NULL) {


3449     // Allocate SpaceManager for classes.
3450     _class_vsm = new SpaceManager(ClassType, lock);
3451   }
3452 
3453   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3454 
3455   // Allocate chunk for metadata objects
3456   initialize_first_chunk(type, NonClassType);
3457 
3458   // Allocate chunk for class metadata objects
3459   if (using_class_space()) {
3460     initialize_first_chunk(type, ClassType);
3461   }
3462 
3463   _alloc_record_head = NULL;
3464   _alloc_record_tail = NULL;
3465 }
3466 
3467 size_t Metaspace::align_word_size_up(size_t word_size) {
3468   size_t byte_size = word_size * wordSize;
3469   return ReservedSpace::allocation_align_up(byte_size) / wordSize;
3470 }
3471 
3472 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3473   // DumpSharedSpaces doesn't use class metadata area (yet)
3474   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3475   if (is_class_space_allocation(mdtype)) {
3476     return  class_vsm()->allocate(word_size);
3477   } else {
3478     return  vsm()->allocate(word_size);
3479   }
3480 }
3481 
3482 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3483   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3484   assert(delta_bytes > 0, "Must be");
3485 
3486   size_t before = 0;
3487   size_t after = 0;
3488   MetaWord* res;
3489   bool incremented;


4120   static int get_random_position() {
4121     return os::random() % num_chunks;
4122   }
4123 
4124   // Asserts that ChunkManager counters match expectations.
4125   void assert_counters() {
4126     assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4127     assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4128     assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4129   }
4130 
4131   // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4132   // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4133   size_t get_random_chunk_size() {
4134     const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4135     const int rand = os::random() % 4;
4136     if (rand < 3) {
4137       return sizes[rand];
4138     } else {
4139       // Note: this affects the max. size of space (see _vsn initialization in ctor).
4140       return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4141     }
4142   }
4143 
4144   // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4145   // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4146   int next_matching_chunk(int start, bool is_free) const {
4147     assert(start >= 0 && start < num_chunks, "invalid parameter");
4148     int pos = start;
4149     do {
4150       if (++pos == num_chunks) {
4151         pos = 0;
4152       }
4153       if (_pool[pos]->is_tagged_free() == is_free) {
4154         return pos;
4155       }
4156     } while (pos != start);
4157     return -1;
4158   }
4159 
4160   // A structure to keep information about a chunk list including which


4267     }
4268     // Before returning chunks are returned, they should be tagged in use.
4269     for (int i = 0; i < aChunkList.num; i ++) {
4270       assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4271     }
4272     _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4273     _chunks_in_chunkmanager += aChunkList.num;
4274     _words_in_chunkmanager += aChunkList.size;
4275     // After all chunks are returned, check that they are now tagged free.
4276     for (int i = 0; i < aChunkList.num; i ++) {
4277       assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4278     }
4279     assert_counters();
4280     _cm.locked_verify();
4281     return aChunkList.num;
4282   }
4283 
4284 public:
4285 
4286   ChunkManagerReturnTestImpl()
4287     : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4288     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4289     , _chunks_in_chunkmanager(0)
4290     , _words_in_chunkmanager(0)
4291   {
4292     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4293     // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4294     // "in use", because not yet added to any chunk manager.
4295     _vsn.initialize();
4296     _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4297     for (int i = 0; i < num_chunks; i ++) {
4298       const size_t size = get_random_chunk_size();
4299       _pool[i] = _vsn.get_chunk_vs(size);
4300       assert(_pool[i] != NULL, "allocation failed");
4301     }
4302     assert_counters();
4303     _cm.locked_verify();
4304   }
4305 
4306   // Test entry point.
4307   // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.


< prev index next >