< prev index next >

src/share/vm/memory/metaspaceShared.cpp

Print this page

*** 69,87 **** bool MetaspaceShared::_remapped_readwrite = false; address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; size_t MetaspaceShared::_core_spaces_size = 0; ! // The CDS archive is divided into 6 regions: // mc - misc code (the method entry trampolines) // rw - read-write metadata // ro - read-only metadata and read-only tables // md - misc data (the c++ vtables) ! // od - other data (original class files) ! // st - shared strings // ! // Except for the st region, the other 5 regions are linearly allocated, starting from // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions // are page-aligned, and there's no gap between any consecutive regions. // // These 5 regions are populated in the following steps: // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are --- 69,89 ---- bool MetaspaceShared::_remapped_readwrite = false; address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; size_t MetaspaceShared::_core_spaces_size = 0; ! // The CDS archive is divided into the following regions: // mc - misc code (the method entry trampolines) // rw - read-write metadata // ro - read-only metadata and read-only tables // md - misc data (the c++ vtables) ! // od - optional data (original class files) // ! // s0 - shared strings #0 ! // s1 - shared strings #1 (may be empty) ! // ! // Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions // are page-aligned, and there's no gap between any consecutive regions. // // These 5 regions are populated in the following steps: // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
*** 92,102 **** // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data // are copied into the ro region as read-only tables. // [5] C++ vtables are copied into the md region. // [6] Original class files are copied into the od region. // ! // The st region is populated inside MetaspaceShared::dump_string_and_symbols. Its // layout is independent of the other 5 regions. class DumpRegion { private: const char* _name; --- 94,104 ---- // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data // are copied into the ro region as read-only tables. // [5] C++ vtables are copied into the md region. // [6] Original class files are copied into the od region. // ! // The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their // layout is independent of the other 5 regions. class DumpRegion { private: const char* _name;
*** 119,137 **** public: DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { ! char* p = (char*)align_ptr_up(_top, alignment); ! char* newtop = p + align_size_up(num_bytes, alignment); expand_top_to(newtop); memset(p, 0, newtop - p); return p; } void append_intptr_t(intptr_t n) { ! assert(is_ptr_aligned(_top, sizeof(intptr_t)), "bad alignment"); intptr_t *p = (intptr_t*)_top; char* newtop = _top + sizeof(intptr_t); expand_top_to(newtop); *p = n; } --- 121,139 ---- public: DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { ! char* p = (char*)align_up(_top, alignment); ! char* newtop = p + align_up(num_bytes, alignment); expand_top_to(newtop); memset(p, 0, newtop - p); return p; } void append_intptr_t(intptr_t n) { ! assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); intptr_t *p = (intptr_t*)_top; char* newtop = _top + sizeof(intptr_t); expand_top_to(newtop); *p = n; }
*** 145,155 **** bool is_allocatable() const { return !is_packed() && _base != NULL; } double perc(size_t used, size_t total) const { ! if (total == 0) {total = 1;} return used / double(total) * 100.0; } void print(size_t total_bytes) const { tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, --- 147,159 ---- bool is_allocatable() const { return !is_packed() && _base != NULL; } double perc(size_t used, size_t total) const { ! if (total == 0) { ! total = 1; ! } return used / double(total) * 100.0; } void print(size_t total_bytes) const { tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
*** 175,185 **** _end = e; } void pack(DumpRegion* next = NULL) { assert(!is_packed(), "sanity"); ! _end = (char*)align_ptr_up(_top, Metaspace::reserve_alignment()); _is_packed = true; if (next != NULL) { next->_base = next->_top = this->_end; next->_end = MetaspaceShared::shared_rs()->end(); } --- 179,189 ---- _end = e; } void pack(DumpRegion* next = NULL) { assert(!is_packed(), "sanity"); ! _end = (char*)align_up(_top, Metaspace::reserve_alignment()); _is_packed = true; if (next != NULL) { next->_base = next->_top = this->_end; next->_end = MetaspaceShared::shared_rs()->end(); }
*** 187,198 **** bool contains(char* p) { return base() <= p && p < top(); } }; ! DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); ! DumpRegion _st_region("st"), _od_region("od"); char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { return _mc_region.allocate(num_bytes); } --- 191,202 ---- bool contains(char* p) { return base() <= p && p < top(); } }; ! DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); ! DumpRegion _s0_region("s0"), _s1_region("s1"); char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { return _mc_region.allocate(num_bytes); }
*** 201,218 **** } void MetaspaceShared::initialize_shared_rs() { const size_t reserve_alignment = Metaspace::reserve_alignment(); bool large_pages = false; // No large pages when dumping the CDS archive. ! char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, reserve_alignment); #ifdef _LP64 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); ! const size_t cds_total = align_size_down(UnscaledClassSpaceMax, reserve_alignment); #else // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. ! size_t cds_total = align_size_down(256*M, reserve_alignment); #endif // First try to reserve the space at the specified SharedBaseAddress. _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); if (_shared_rs.is_reserved()) { --- 205,222 ---- } void MetaspaceShared::initialize_shared_rs() { const size_t reserve_alignment = Metaspace::reserve_alignment(); bool large_pages = false; // No large pages when dumping the CDS archive. ! char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); #ifdef _LP64 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); ! const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); #else // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. ! size_t cds_total = align_down(256*M, reserve_alignment); #endif // First try to reserve the space at the specified SharedBaseAddress. _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); if (_shared_rs.is_reserved()) {
*** 235,247 **** // then the RO parts. assert(UseCompressedOops && UseCompressedClassPointers, "UseCompressedOops and UseCompressedClassPointers must be set"); ! size_t max_archive_size = align_size_down(cds_total * 3 / 4, reserve_alignment); ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); ! CompressedClassSpaceSize = align_size_down(tmp_class_space.size(), reserve_alignment); _shared_rs = _shared_rs.first_part(max_archive_size); // Set up compress class pointers. Universe::set_narrow_klass_base((address)_shared_rs.base()); if (UseAOT || cds_total > UnscaledClassSpaceMax) { --- 239,251 ---- // then the RO parts. assert(UseCompressedOops && UseCompressedClassPointers, "UseCompressedOops and UseCompressedClassPointers must be set"); ! size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); ! CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); _shared_rs = _shared_rs.first_part(max_archive_size); // Set up compress class pointers. Universe::set_narrow_klass_base((address)_shared_rs.base()); if (UseAOT || cds_total > UnscaledClassSpaceMax) {
*** 285,298 **** size_t commit = MAX2(min_bytes, preferred_bytes); assert(commit <= uncommitted, "sanity"); bool result = _shared_vs.expand_by(commit, false); ! assert(result, "Failed to commit memory"); ! log_info(cds)("Expanding shared spaces by %7d bytes [total %8d bytes ending at %p]", ! int(commit), int(_shared_vs.actual_committed_size()), _shared_vs.high()); } // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file. --- 289,305 ---- size_t commit = MAX2(min_bytes, preferred_bytes); assert(commit <= uncommitted, "sanity"); bool result = _shared_vs.expand_by(commit, false); ! if (!result) { ! vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", ! need_committed_size)); ! } ! log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", ! commit, _shared_vs.actual_committed_size(), _shared_vs.high()); } // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file.
*** 495,505 **** template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; template <class T> intptr_t* CppVtableCloner<T>::allocate(const char* name) { ! assert(is_ptr_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); int n = get_vtable_length(name); _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); _info->set_vtable_size(n); intptr_t* p = clone_vtable(name, _info); --- 502,512 ---- template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; template <class T> intptr_t* CppVtableCloner<T>::allocate(const char* name) { ! assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); int n = get_vtable_length(name); _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); _info->set_vtable_size(n); intptr_t* p = clone_vtable(name, _info);
*** 733,742 **** --- 740,753 ---- int which = (read_only) ? RO : RW; _counts[which][type] ++; _bytes [which][type] += byte_size; } + void record_other_type(int byte_size, bool read_only) { + int which = (read_only) ? RO : RW; + _bytes [which][OtherType] += byte_size; + } void print_stats(int ro_all, int rw_all, int mc_all, int md_all); }; void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { // Calculate size of data that was not allocated by Metaspace::allocate()
*** 780,790 **** ResourceMark rm; LogMessage(cds) msg; stringStream info_stream; ! info_stream.print_cr("Detailed metadata info (rw includes md and mc):"); info_stream.print_cr("%s", hdr); info_stream.print_cr("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { const char *name = type_name((Type)type); int ro_count = _counts[RO][type]; --- 791,801 ---- ResourceMark rm; LogMessage(cds) msg; stringStream info_stream; ! info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); info_stream.print_cr("%s", hdr); info_stream.print_cr("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { const char *name = type_name((Type)type); int ro_count = _counts[RO][type];
*** 820,830 **** info_stream.print_cr(fmt_stats, "Total", all_ro_count, all_ro_bytes, all_ro_perc, all_rw_count, all_rw_bytes, all_rw_perc, all_count, all_bytes, all_perc); ! //assert(all_ro_bytes == ro_all, "everything should have been counted"); assert(all_rw_bytes == rw_all, "everything should have been counted"); msg.info("%s", info_stream.as_string()); #undef fmt_stats } --- 831,841 ---- info_stream.print_cr(fmt_stats, "Total", all_ro_count, all_ro_bytes, all_ro_perc, all_rw_count, all_rw_bytes, all_rw_perc, all_count, all_bytes, all_perc); ! assert(all_ro_bytes == ro_all, "everything should have been counted"); assert(all_rw_bytes == rw_all, "everything should have been counted"); msg.info("%s", info_stream.as_string()); #undef fmt_stats }
*** 835,844 **** --- 846,856 ---- private: GrowableArray<MemRegion> *_string_regions; void dump_string_and_symbols(); char* dump_read_only_tables(); + void print_region_stats(); public: VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } void doit(); // outline because gdb sucks static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
*** 888,925 **** } typedef ResourceHashtable< address, address, ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address> ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address> ! 16384, ResourceObj::C_HEAP> MyTable; ! static MyTable* _new_loc_table; public: static void initialize() { _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; ! _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)MyTable; } static DumpAllocStats* alloc_stats() { return _alloc_stats; } static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { address obj = ref->obj(); int bytes = ref->size() * BytesPerWord; char* p; size_t alignment = BytesPerWord; if (read_only) { p = _ro_region.allocate(bytes, alignment); } else { p = _rw_region.allocate(bytes, alignment); } memcpy(p, obj, bytes); bool isnew = _new_loc_table->put(obj, (address)p); assert(isnew, "must be"); log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); ! _alloc_stats->record(ref->msotype(), bytes, read_only); if (ref->msotype() == MetaspaceObj::SymbolType) { uintx delta = MetaspaceShared::object_delta(p); if (delta > MAX_SHARED_DELTA) { // This is just a sanity check and should not appear in any real world usage. This // happens only if you allocate more than 2GB of Symbols and would require --- 900,944 ---- } typedef ResourceHashtable< address, address, ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address> ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address> ! 16384, ResourceObj::C_HEAP> RelocationTable; ! static RelocationTable* _new_loc_table; public: static void initialize() { _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; ! _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable; } static DumpAllocStats* alloc_stats() { return _alloc_stats; } static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { address obj = ref->obj(); int bytes = ref->size() * BytesPerWord; char* p; size_t alignment = BytesPerWord; + char* oldtop; + char* newtop; + if (read_only) { + oldtop = _ro_region.top(); p = _ro_region.allocate(bytes, alignment); + newtop = _ro_region.top(); } else { + oldtop = _rw_region.top(); p = _rw_region.allocate(bytes, alignment); + newtop = _rw_region.top(); } memcpy(p, obj, bytes); bool isnew = _new_loc_table->put(obj, (address)p); assert(isnew, "must be"); log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); ! _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); if (ref->msotype() == MetaspaceObj::SymbolType) { uintx delta = MetaspaceShared::object_delta(p); if (delta > MAX_SHARED_DELTA) { // This is just a sanity check and should not appear in any real world usage. This // happens only if you allocate more than 2GB of Symbols and would require
*** 1040,1055 **** // cleanup _ssc = NULL; } // We must relocate the System::_well_known_klasses only after we have copied the ! // strings in during dump_string_and_symbols(): during the copy, we operate on old // String objects which assert that their klass is the old // SystemDictionary::String_klass(). static void relocate_well_known_klasses() { { ! tty->print_cr("Relocating _well_known_klasses[] ... "); ResourceMark rm; RefRelocator ext_reloc; SystemDictionary::well_known_klasses_do(&ext_reloc); } // NOTE: after this point, we shouldn't have any globals that can reach the old --- 1059,1074 ---- // cleanup _ssc = NULL; } // We must relocate the System::_well_known_klasses only after we have copied the ! // strings in during dump_string_and_symbols(): during the string copy, we operate on old // String objects which assert that their klass is the old // SystemDictionary::String_klass(). static void relocate_well_known_klasses() { { ! tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); ResourceMark rm; RefRelocator ext_reloc; SystemDictionary::well_known_klasses_do(&ext_reloc); } // NOTE: after this point, we shouldn't have any globals that can reach the old
*** 1088,1098 **** } }; DumpAllocStats* ArchiveCompactor::_alloc_stats; SortedSymbolClosure* ArchiveCompactor::_ssc; ! ArchiveCompactor::MyTable* ArchiveCompactor::_new_loc_table; void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); } --- 1107,1117 ---- } }; DumpAllocStats* ArchiveCompactor::_alloc_stats; SortedSymbolClosure* ArchiveCompactor::_ssc; ! ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); }
*** 1104,1122 **** NOT_PRODUCT(StringTable::verify()); SymbolTable::write_to_archive(); // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details. _string_regions = new GrowableArray<MemRegion>(2); ! size_t shared_string_bytes = 0; ! StringTable::write_to_archive(_string_regions, &shared_string_bytes); ! char* st_base = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start(); ! char* st_top = st_base + shared_string_bytes; ! _st_region.init(st_base, st_top, st_top); ! _st_region.pack(); } char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { // Reorder the system dictionary. Moving the symbols affects // how the hash table indices are calculated. SystemDictionary::reorder_dictionary_for_sharing(); NOT_PRODUCT(SystemDictionary::verify();) --- 1123,1137 ---- NOT_PRODUCT(StringTable::verify()); SymbolTable::write_to_archive(); // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details. _string_regions = new GrowableArray<MemRegion>(2); ! StringTable::write_to_archive(_string_regions); } char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { + char* oldtop = _ro_region.top(); // Reorder the system dictionary. Moving the symbols affects // how the hash table indices are calculated. SystemDictionary::reorder_dictionary_for_sharing(); NOT_PRODUCT(SystemDictionary::verify();)
*** 1130,1139 **** --- 1145,1156 ---- // Write the other data to the output array. WriteClosure wc(&_ro_region); MetaspaceShared::serialize(&wc); + char* newtop = _ro_region.top(); + ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true); return buckets_top; } void VM_PopulateDumpSharedSpace::doit() { Thread* THREAD = VMThread::vm_thread();
*** 1207,1238 **** _od_region.pack(); // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size // is just the spaces between the two ends. size_t core_spaces_size = _od_region.end() - _mc_region.base(); ! assert(core_spaces_size == (size_t)align_size_up(core_spaces_size, Metaspace::reserve_alignment()), "should already be aligned"); - // Print statistics of all the regions - const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + - _mc_region.reserved() + _md_region.reserved() + - _st_region.reserved() + _od_region.reserved(); - const size_t total_bytes = _ro_region.used() + _rw_region.used() + - _mc_region.used() + _md_region.used() + - _st_region.used() + _od_region.used(); - const double total_u_perc = total_bytes / double(total_reserved) * 100.0; - - _mc_region.print(total_reserved); - _rw_region.print(total_reserved); - _ro_region.print(total_reserved); - _md_region.print(total_reserved); - _st_region.print(total_reserved); - _od_region.print(total_reserved); - - tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", - total_bytes, total_reserved, total_u_perc); - // During patching, some virtual methods may be called, so at this point // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). MetaspaceShared::patch_cpp_vtable_pointers(); // The vtable clones contain addresses of the current process. --- 1224,1236 ---- _od_region.pack(); // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size // is just the spaces between the two ends. size_t core_spaces_size = _od_region.end() - _mc_region.base(); ! assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), "should already be aligned"); // During patching, some virtual methods may be called, so at this point // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). MetaspaceShared::patch_cpp_vtable_pointers(); // The vtable clones contain addresses of the current process.
*** 1247,1256 **** --- 1245,1257 ---- mapinfo->set_misc_data_patching_start(vtbl_list); mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); mapinfo->set_core_spaces_size(core_spaces_size); + char* s0_start, *s0_top, *s0_end; + char* s1_start, *s1_top, *s1_end; + for (int pass=1; pass<=2; pass++) { if (pass == 1) { // The first pass doesn't actually write the data to disk. All it // does is to update the fields in the mapinfo->_header. } else {
*** 1267,1290 **** write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); ! mapinfo->write_string_regions(_string_regions); } mapinfo->close(); // Restore the vtable in case we invoke any virtual methods. MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); if (log_is_enabled(Info, cds)) { ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()), int(_md_region.used())); } } // Update a Java object to point its Klass* to the new location after // shared archive has been compacted. void MetaspaceShared::relocate_klass_ptr(oop o) { assert(DumpSharedSpaces, "sanity"); Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); --- 1268,1323 ---- write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); ! ! mapinfo->write_string_regions(_string_regions, ! &s0_start, &s0_top, &s0_end, ! &s1_start, &s1_top, &s1_end); } mapinfo->close(); // Restore the vtable in case we invoke any virtual methods. MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); + _s0_region.init(s0_start, s0_top, s0_end); + _s1_region.init(s1_start, s1_top, s1_end); + print_region_stats(); + if (log_is_enabled(Info, cds)) { ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()), int(_md_region.used())); } } + void VM_PopulateDumpSharedSpace::print_region_stats() { + // Print statistics of all the regions + const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + + _mc_region.reserved() + _md_region.reserved() + + _od_region.reserved() + + _s0_region.reserved() + _s1_region.reserved(); + const size_t total_bytes = _ro_region.used() + _rw_region.used() + + _mc_region.used() + _md_region.used() + + _od_region.used() + + _s0_region.used() + _s1_region.used(); + const double total_u_perc = total_bytes / double(total_reserved) * 100.0; + + _mc_region.print(total_reserved); + _rw_region.print(total_reserved); + _ro_region.print(total_reserved); + _md_region.print(total_reserved); + _od_region.print(total_reserved); + _s0_region.print(total_reserved); + _s1_region.print(total_reserved); + + tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", + total_bytes, total_reserved, total_u_perc); + } + + // Update a Java object to point its Klass* to the new location after // shared archive has been compacted. void MetaspaceShared::relocate_klass_ptr(oop o) { assert(DumpSharedSpaces, "sanity"); Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
*** 1725,1735 **** // or so. _mc_region.print_out_of_space_msg(name, needed_bytes); _rw_region.print_out_of_space_msg(name, needed_bytes); _ro_region.print_out_of_space_msg(name, needed_bytes); _md_region.print_out_of_space_msg(name, needed_bytes); - _st_region.print_out_of_space_msg(name, needed_bytes); _od_region.print_out_of_space_msg(name, needed_bytes); vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), "Please reduce the number of shared classes."); } --- 1758,1767 ----
< prev index next >