15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/filemap.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/metaspace/chunkManager.hpp"
34 #include "memory/metaspace/metachunk.hpp"
35 #include "memory/metaspace/metaspaceCommon.hpp"
36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
37 #include "memory/metaspace/spaceManager.hpp"
38 #include "memory/metaspace/virtualSpaceList.hpp"
39 #include "memory/metaspaceShared.hpp"
40 #include "memory/metaspaceTracer.hpp"
41 #include "memory/universe.hpp"
42 #include "oops/compressedOops.hpp"
43 #include "runtime/init.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "services/memTracker.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/formatBuffer.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/vmError.hpp"
51
52
53 using namespace metaspace;
54
55 MetaWord* last_allocated = 0;
940 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
941 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
942 if (capacity_words(mdtype) != chunk_stat.cap() ||
943 used_words(mdtype) != chunk_stat.used() ||
944 overhead_words(mdtype) != chunk_stat.overhead()) {
945 mismatch = true;
946 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
947 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
948 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
949 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
950 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
951 tty->flush();
952 }
953 }
954 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
955 #endif
956 }
957
958 // Metaspace methods
959
960 size_t Metaspace::_first_chunk_word_size = 0;
961 size_t Metaspace::_first_class_chunk_word_size = 0;
962
963 size_t Metaspace::_commit_alignment = 0;
964 size_t Metaspace::_reserve_alignment = 0;
965
966 VirtualSpaceList* Metaspace::_space_list = NULL;
967 VirtualSpaceList* Metaspace::_class_space_list = NULL;
968
969 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
970 ChunkManager* Metaspace::_chunk_manager_class = NULL;
971
972 bool Metaspace::_initialized = false;
973
974 #define VIRTUALSPACEMULTIPLIER 2
975
976 #ifdef _LP64
977 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
978
979 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
980 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
981 // Figure out the narrow_klass_base and the narrow_klass_shift. The
982 // narrow_klass_base is the lower of the metaspace base and the cds base
983 // (if cds is enabled). The narrow_klass_shift depends on the distance
984 // between the lower base and higher address.
985 address lower_base;
1161 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1162 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
1163 if (_class_space_list != NULL) {
1164 address base = (address)_class_space_list->current_virtual_space()->bottom();
1165 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1166 compressed_class_space_size(), p2i(base));
1167 if (requested_addr != 0) {
1168 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1169 }
1170 st->cr();
1171 }
1172 }
1173
1174 // For UseCompressedClassPointers the class space is reserved above the top of
1175 // the Java heap. The argument passed in is at the base of the compressed space.
1176 void Metaspace::initialize_class_space(ReservedSpace rs) {
1177 // The reserved space size may be bigger because of alignment, esp with UseLargePages
1178 assert(rs.size() >= CompressedClassSpaceSize,
1179 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1180 assert(using_class_space(), "Must be using class space");
1181 _class_space_list = new VirtualSpaceList(rs);
1182 _chunk_manager_class = new ChunkManager(true/*is_class*/);
1183
1184 if (!_class_space_list->initialization_succeeded()) {
1185 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
1186 }
1187 }
1188
1189 #endif
1190
1191 void Metaspace::ergo_initialize() {
1192 if (DumpSharedSpaces) {
1193 // Using large pages when dumping the shared archive is currently not implemented.
1194 FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
1195 }
1196
1197 size_t page_size = os::vm_page_size();
1198 if (UseLargePages && UseLargePagesInMetaspace) {
1199 page_size = os::large_page_size();
1200 }
1201
1202 _commit_alignment = page_size;
1203 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
1204
1205 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1206 // override if MaxMetaspaceSize was set on the command line or not.
1207 // This information is needed later to conform to the specification of the
1208 // java.lang.management.MemoryUsage API.
1209 //
1210 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1211 // globals.hpp to the aligned value, but this is not possible, since the
1212 // alignment depends on other flags being parsed.
1213 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1214
1215 if (MetaspaceSize > MaxMetaspaceSize) {
1216 MetaspaceSize = MaxMetaspaceSize;
1217 }
1218
1219 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1220
1221 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1222
1223 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1229 size_t min_metaspace_sz =
1230 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1231 if (UseCompressedClassPointers) {
1232 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
1233 if (min_metaspace_sz >= MaxMetaspaceSize) {
1234 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1235 } else {
1236 FLAG_SET_ERGO(CompressedClassSpaceSize,
1237 MaxMetaspaceSize - min_metaspace_sz);
1238 }
1239 }
1240 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1241 FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
1242 min_metaspace_sz);
1243 }
1244
1245 set_compressed_class_space_size(CompressedClassSpaceSize);
1246 }
1247
1248 void Metaspace::global_initialize() {
1249 MetaspaceGC::initialize();
1250
1251 #if INCLUDE_CDS
1252 if (DumpSharedSpaces) {
1253 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1254 } else if (UseSharedSpaces) {
1255 // If any of the archived space fails to map, UseSharedSpaces
1256 // is reset to false. Fall through to the
1257 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
1258 // metaspace.
1259 MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1260 }
1261
1262 if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
1263 vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
1264 }
1265
1266 if (!DumpSharedSpaces && !UseSharedSpaces)
1267 #endif // INCLUDE_CDS
1268 {
1269 #ifdef _LP64
1270 if (using_class_space()) {
1271 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
1272 allocate_metaspace_compressed_klass_ptrs(base, 0);
1273 }
1274 #endif // _LP64
1275 }
1276
1277 // Initialize these before initializing the VirtualSpaceList
1278 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
1279 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
1280 // Make the first class chunk bigger than a medium chunk so it's not put
1281 // on the medium chunk list. The next chunk will be small and progress
1282 // from there. This size calculated by -version.
1283 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
1284 (CompressedClassSpaceSize/BytesPerWord)*2);
1285 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
1286 // Arbitrarily set the initial virtual space to a multiple
1287 // of the boot class loader size.
1288 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
1289 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
1290
1291 // Initialize the list of virtual spaces.
1292 _space_list = new VirtualSpaceList(word_size);
1293 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
1294
1295 if (!_space_list->initialization_succeeded()) {
1296 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
1297 }
1298
1299 _tracer = new MetaspaceTracer();
1300
1301 _initialized = true;
1302
1303 }
1304
1305 void Metaspace::post_initialize() {
1306 MetaspaceGC::post_initialize();
1307 }
1308
1309 void Metaspace::verify_global_initialization() {
1310 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1311 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1312
1313 if (using_class_space()) {
1314 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1315 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1316 }
1317 }
1441 Mutex::_no_safepoint_check_flag);
1442 purge(NonClassType);
1443 if (using_class_space()) {
1444 purge(ClassType);
1445 }
1446 }
1447
1448 bool Metaspace::contains(const void* ptr) {
1449 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1450 return true;
1451 }
1452 return contains_non_shared(ptr);
1453 }
1454
1455 bool Metaspace::contains_non_shared(const void* ptr) {
1456 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1457 return true;
1458 }
1459
1460 return get_space_list(NonClassType)->contains(ptr);
1461 }
1462
1463 // ClassLoaderMetaspace
1464
1465 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
1466 : _space_type(type)
1467 , _lock(lock)
1468 , _vsm(NULL)
1469 , _class_vsm(NULL)
1470 {
1471 initialize(lock, type);
1472 }
1473
1474 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
1475 Metaspace::assert_not_frozen();
1476 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
1477 delete _vsm;
1478 if (Metaspace::using_class_space()) {
1479 delete _class_vsm;
1480 }
1481 }
1482
1483 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1484 Metachunk* chunk = get_initialization_chunk(type, mdtype);
1485 if (chunk != NULL) {
1486 // Add to this manager's list of chunks in use and make it the current_chunk().
1487 get_space_manager(mdtype)->add_chunk(chunk, true);
1488 }
1489 }
1490
1491 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1492 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
1493
1494 // Get a chunk from the chunk freelist
1495 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
1496
1497 if (chunk == NULL) {
1498 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
1499 get_space_manager(mdtype)->medium_chunk_bunch());
1500 }
1501
1502 return chunk;
1503 }
1504
1505 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
1506 Metaspace::verify_global_initialization();
1507
1508 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
1509
1510 // Allocate SpaceManager for metadata objects.
1511 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
1512
1513 if (Metaspace::using_class_space()) {
1514 // Allocate SpaceManager for classes.
1515 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
1516 }
1517
1518 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1519
1520 // Allocate chunk for metadata objects
1521 initialize_first_chunk(type, Metaspace::NonClassType);
1522
1523 // Allocate chunk for class metadata objects
1524 if (Metaspace::using_class_space()) {
1525 initialize_first_chunk(type, Metaspace::ClassType);
1526 }
1527 }
1528
1529 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1530 Metaspace::assert_not_frozen();
1531
1532 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1533
1534 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1535 if (Metaspace::is_class_space_allocation(mdtype)) {
1536 return class_vsm()->allocate(word_size);
1537 } else {
1538 return vsm()->allocate(word_size);
1539 }
1540 }
1541
1542 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1543 Metaspace::assert_not_frozen();
1544 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1545 assert(delta_bytes > 0, "Must be");
1546
1547 size_t before = 0;
1548 size_t after = 0;
1549 bool can_retry = true;
1550 MetaWord* res;
1551 bool incremented;
1552
1553 // Each thread increments the HWM at most once. Even if the thread fails to increment
1554 // the HWM, an allocation is still attempted. This is because another thread must then
1555 // have incremented the HWM and therefore the allocation might still succeed.
1556 do {
1557 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1558 res = allocate(word_size, mdtype);
1559 } while (!incremented && res == NULL && can_retry);
1560
1561 if (incremented) {
1562 Metaspace::tracer()->report_gc_threshold(before, after,
1563 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1564 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1565 }
1566
1567 return res;
1568 }
1569
1570 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1571 return (vsm()->used_words() +
1572 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1573 }
1574
1575 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1576 return (vsm()->capacity_words() +
1577 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1578 }
1579
1580 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
1581 Metaspace::assert_not_frozen();
1582 assert(!SafepointSynchronize::is_at_safepoint()
1583 || Thread::current()->is_VM_thread(), "should be the VM thread");
1584
1585 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
1586
1587 MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
1588
1589 if (is_class && Metaspace::using_class_space()) {
1590 class_vsm()->deallocate(ptr, word_size);
1591 } else {
1592 vsm()->deallocate(ptr, word_size);
1593 }
1594 }
1595
1596 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
1597 assert(Metaspace::using_class_space(), "Has to use class space");
1598 return class_vsm()->calc_chunk_size(word_size);
1599 }
1600
1601 void ClassLoaderMetaspace::print_on(outputStream* out) const {
1602 // Print both class virtual space counts and metaspace.
1603 if (Verbose) {
1604 vsm()->print_on(out);
1605 if (Metaspace::using_class_space()) {
1606 class_vsm()->print_on(out);
1607 }
1608 }
1609 }
1610
1611 void ClassLoaderMetaspace::verify() {
1612 vsm()->verify();
1613 if (Metaspace::using_class_space()) {
1614 class_vsm()->verify();
1615 }
1616 }
1617
1618 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1619 assert_lock_strong(lock());
1620 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1621 if (Metaspace::using_class_space()) {
1622 class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1623 }
1624 }
1625
1626 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1627 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
1628 add_to_statistics_locked(out);
1629 }
1630
1631 /////////////// Unit tests ///////////////
1632
1633 struct chunkmanager_statistics_t {
1634 int num_specialized_chunks;
1635 int num_small_chunks;
1636 int num_medium_chunks;
1637 int num_humongous_chunks;
1638 };
1639
1640 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1641 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1642 ChunkManagerStatistics stat;
1643 chunk_manager->collect_statistics(&stat);
1644 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1645 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1646 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1647 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1648 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/filemap.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/metaspace/chunkManager.hpp"
34 #include "memory/metaspace/metachunk.hpp"
35 #include "memory/metaspace/chunkLevel.hpp"
36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
37 #include "memory/metaspace/spaceManager.hpp"
38 #include "memory/metaspace/virtualSpaceList.hpp"
39 #include "memory/metaspaceShared.hpp"
40 #include "memory/metaspaceTracer.hpp"
41 #include "memory/universe.hpp"
42 #include "oops/compressedOops.hpp"
43 #include "runtime/init.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "services/memTracker.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/formatBuffer.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/vmError.hpp"
51
52
53 using namespace metaspace;
54
55 MetaWord* last_allocated = 0;
940 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
941 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
942 if (capacity_words(mdtype) != chunk_stat.cap() ||
943 used_words(mdtype) != chunk_stat.used() ||
944 overhead_words(mdtype) != chunk_stat.overhead()) {
945 mismatch = true;
946 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
947 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
948 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
949 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
950 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
951 tty->flush();
952 }
953 }
954 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
955 #endif
956 }
957
958 // Metaspace methods
959
960 VirtualSpaceList* Metaspace::_space_list = NULL;
961 VirtualSpaceList* Metaspace::_class_space_list = NULL;
962
963 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
964 ChunkManager* Metaspace::_chunk_manager_class = NULL;
965
966 bool Metaspace::_initialized = false;
967
968 #define VIRTUALSPACEMULTIPLIER 2
969
970 #ifdef _LP64
971 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
972
973 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
974 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
975 // Figure out the narrow_klass_base and the narrow_klass_shift. The
976 // narrow_klass_base is the lower of the metaspace base and the cds base
977 // (if cds is enabled). The narrow_klass_shift depends on the distance
978 // between the lower base and higher address.
979 address lower_base;
1155 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1156 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
1157 if (_class_space_list != NULL) {
1158 address base = (address)_class_space_list->current_virtual_space()->bottom();
1159 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1160 compressed_class_space_size(), p2i(base));
1161 if (requested_addr != 0) {
1162 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1163 }
1164 st->cr();
1165 }
1166 }
1167
1168 // For UseCompressedClassPointers the class space is reserved above the top of
1169 // the Java heap. The argument passed in is at the base of the compressed space.
1170 void Metaspace::initialize_class_space(ReservedSpace rs) {
1171 // The reserved space size may be bigger because of alignment, esp with UseLargePages
1172 assert(rs.size() >= CompressedClassSpaceSize,
1173 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1174 assert(using_class_space(), "Must be using class space");
1175 _class_space_list = new VirtualSpaceList("class space list", rs);
1176 _chunk_manager_class = new ChunkManager("class space chunk manager", _class_space_list);
1177
1178 }
1179
1180 #endif
1181
1182 void Metaspace::ergo_initialize() {
1183 if (DumpSharedSpaces) {
1184 // Using large pages when dumping the shared archive is currently not implemented.
1185 FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
1186 }
1187
1188 size_t page_size = os::vm_page_size();
1189 if (UseLargePages && UseLargePagesInMetaspace) {
1190 page_size = os::large_page_size();
1191 }
1192
1193 _commit_alignment = page_size;
1194
1195 // Reserve alignment: all Metaspace memory mappings are to be aligned to the size of a root chunk.
1196 assert(is_aligned_to((int)MAX_CHUNK_BYTE_SIZE, os::vm_allocation_granularity()),
1197 "root chunk size must be a multiple of alloc granularity");
1198
1199 _reserve_alignment = MAX2(page_size, (size_t)MAX_CHUNK_BYTE_SIZE);
1200
1201 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1202 // override if MaxMetaspaceSize was set on the command line or not.
1203 // This information is needed later to conform to the specification of the
1204 // java.lang.management.MemoryUsage API.
1205 //
1206 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1207 // globals.hpp to the aligned value, but this is not possible, since the
1208 // alignment depends on other flags being parsed.
1209 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1210
1211 if (MetaspaceSize > MaxMetaspaceSize) {
1212 MetaspaceSize = MaxMetaspaceSize;
1213 }
1214
1215 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1216
1217 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1218
1219 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1225 size_t min_metaspace_sz =
1226 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1227 if (UseCompressedClassPointers) {
1228 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
1229 if (min_metaspace_sz >= MaxMetaspaceSize) {
1230 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1231 } else {
1232 FLAG_SET_ERGO(CompressedClassSpaceSize,
1233 MaxMetaspaceSize - min_metaspace_sz);
1234 }
1235 }
1236 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1237 FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
1238 min_metaspace_sz);
1239 }
1240
1241 set_compressed_class_space_size(CompressedClassSpaceSize);
1242 }
1243
1244 void Metaspace::global_initialize() {
1245 MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
1246
1247 #if INCLUDE_CDS
1248 if (DumpSharedSpaces) {
1249 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1250 } else if (UseSharedSpaces) {
1251 // If any of the archived space fails to map, UseSharedSpaces
1252 // is reset to false. Fall through to the
1253 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
1254 // metaspace.
1255 MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1256 }
1257
1258 if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
1259 vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
1260 }
1261 #endif // INCLUDE_CDS
1262
1263 // Initialize class space:
1264 if (CDS_ONLY(!DumpSharedSpaces && !UseSharedSpaces) NOT_CDS(true)) {
1265 #ifdef _LP64
1266 if (using_class_space()) {
1267 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
1268 allocate_metaspace_compressed_klass_ptrs(base, 0);
1269 }
1270 #endif // _LP64
1271 }
1272
1273 // Initialize non-class virtual space list, and its chunk manager:
1274 _space_list = new VirtualSpaceList("Non-Class VirtualSpaceList");
1275 _chunk_manager_metadata = new ChunkManager("Non-Class ChunkManager", _space_list);
1276
1277 _tracer = new MetaspaceTracer();
1278
1279 _initialized = true;
1280
1281 }
1282
1283 void Metaspace::post_initialize() {
1284 MetaspaceGC::post_initialize();
1285 }
1286
1287 void Metaspace::verify_global_initialization() {
1288 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1289 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1290
1291 if (using_class_space()) {
1292 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1293 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1294 }
1295 }
1419 Mutex::_no_safepoint_check_flag);
1420 purge(NonClassType);
1421 if (using_class_space()) {
1422 purge(ClassType);
1423 }
1424 }
1425
1426 bool Metaspace::contains(const void* ptr) {
1427 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1428 return true;
1429 }
1430 return contains_non_shared(ptr);
1431 }
1432
1433 bool Metaspace::contains_non_shared(const void* ptr) {
1434 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1435 return true;
1436 }
1437
1438 return get_space_list(NonClassType)->contains(ptr);
1439 }
1440
1441 /////////////// Unit tests ///////////////
1442
1443 struct chunkmanager_statistics_t {
1444 int num_specialized_chunks;
1445 int num_small_chunks;
1446 int num_medium_chunks;
1447 int num_humongous_chunks;
1448 };
1449
1450 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1451 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1452 ChunkManagerStatistics stat;
1453 chunk_manager->collect_statistics(&stat);
1454 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1455 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1456 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1457 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1458 }
|