--- old/src/share/vm/memory/metaspaceShared.cpp 2017-07-27 12:12:28.995901147 -0700 +++ new/src/share/vm/memory/metaspaceShared.cpp 2017-07-27 12:12:28.851895575 -0700 @@ -71,15 +71,17 @@ size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; size_t MetaspaceShared::_core_spaces_size = 0; -// The CDS archive is divided into 6 regions: +// The CDS archive is divided into the following regions: // mc - misc code (the method entry trampolines) // rw - read-write metadata // ro - read-only metadata and read-only tables // md - misc data (the c++ vtables) -// od - other data (original class files) -// st - shared strings +// od - optional data (original class files) // -// Except for the st region, the other 5 regions are linearly allocated, starting from +// s0 - shared strings #0 +// s1 - shared strings #1 (may be empty) +// +// Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions // are page-aligned, and there's no gap between any consecutive regions. // @@ -94,7 +96,7 @@ // [5] C++ vtables are copied into the md region. // [6] Original class files are copied into the od region. // -// The st region is populated inside MetaspaceShared::dump_string_and_symbols. Its +// The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their // layout is independent of the other 5 regions. class DumpRegion { @@ -121,15 +123,15 @@ DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { - char* p = (char*)align_ptr_up(_top, alignment); - char* newtop = p + align_size_up(num_bytes, alignment); + char* p = (char*)align_up(_top, alignment); + char* newtop = p + align_up(num_bytes, alignment); expand_top_to(newtop); memset(p, 0, newtop - p); return p; } void append_intptr_t(intptr_t n) { - assert(is_ptr_aligned(_top, sizeof(intptr_t)), "bad alignment"); + assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); intptr_t *p = (intptr_t*)_top; char* newtop = _top + sizeof(intptr_t); expand_top_to(newtop); @@ -147,7 +149,9 @@ } double perc(size_t used, size_t total) const { - if (total == 0) {total = 1;} + if (total == 0) { + total = 1; + } return used / double(total) * 100.0; } @@ -177,7 +181,7 @@ void pack(DumpRegion* next = NULL) { assert(!is_packed(), "sanity"); - _end = (char*)align_ptr_up(_top, Metaspace::reserve_alignment()); + _end = (char*)align_up(_top, Metaspace::reserve_alignment()); _is_packed = true; if (next != NULL) { next->_base = next->_top = this->_end; @@ -189,8 +193,8 @@ } }; -DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); -DumpRegion _st_region("st"), _od_region("od"); +DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); +DumpRegion _s0_region("s0"), _s1_region("s1"); char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { return _mc_region.allocate(num_bytes); @@ -203,14 +207,14 @@ void MetaspaceShared::initialize_shared_rs() { const size_t reserve_alignment = Metaspace::reserve_alignment(); bool large_pages = false; // No large pages when dumping the CDS archive. - char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, reserve_alignment); + char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); #ifdef _LP64 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); - const size_t cds_total = align_size_down(UnscaledClassSpaceMax, reserve_alignment); + const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); #else // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. - size_t cds_total = align_size_down(256*M, reserve_alignment); + size_t cds_total = align_down(256*M, reserve_alignment); #endif // First try to reserve the space at the specified SharedBaseAddress. @@ -237,9 +241,9 @@ assert(UseCompressedOops && UseCompressedClassPointers, "UseCompressedOops and UseCompressedClassPointers must be set"); - size_t max_archive_size = align_size_down(cds_total * 3 / 4, reserve_alignment); + size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); - CompressedClassSpaceSize = align_size_down(tmp_class_space.size(), reserve_alignment); + CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); _shared_rs = _shared_rs.first_part(max_archive_size); // Set up compress class pointers. @@ -287,10 +291,13 @@ assert(commit <= uncommitted, "sanity"); bool result = _shared_vs.expand_by(commit, false); - assert(result, "Failed to commit memory"); + if (!result) { + vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", + need_committed_size)); + } - log_info(cds)("Expanding shared spaces by %7d bytes [total %8d bytes ending at %p]", - int(commit), int(_shared_vs.actual_committed_size()), _shared_vs.high()); + log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", + commit, _shared_vs.actual_committed_size(), _shared_vs.high()); } // Read/write a data stream for restoring/preserving metadata pointers and @@ -497,7 +504,7 @@ template intptr_t* CppVtableCloner::allocate(const char* name) { - assert(is_ptr_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); + assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); int n = get_vtable_length(name); _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); _info->set_vtable_size(n); @@ -735,6 +742,10 @@ _bytes [which][type] += byte_size; } + void record_other_type(int byte_size, bool read_only) { + int which = (read_only) ? RO : RW; + _bytes [which][OtherType] += byte_size; + } void print_stats(int ro_all, int rw_all, int mc_all, int md_all); }; @@ -782,7 +793,7 @@ LogMessage(cds) msg; stringStream info_stream; - info_stream.print_cr("Detailed metadata info (rw includes md and mc):"); + info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); info_stream.print_cr("%s", hdr); info_stream.print_cr("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { @@ -822,7 +833,7 @@ all_rw_count, all_rw_bytes, all_rw_perc, all_count, all_bytes, all_perc); -//assert(all_ro_bytes == ro_all, "everything should have been counted"); + assert(all_ro_bytes == ro_all, "everything should have been counted"); assert(all_rw_bytes == rw_all, "everything should have been counted"); msg.info("%s", info_stream.as_string()); @@ -837,6 +848,7 @@ void dump_string_and_symbols(); char* dump_read_only_tables(); + void print_region_stats(); public: VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } @@ -890,13 +902,13 @@ address, address, ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash
ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals
- 16384, ResourceObj::C_HEAP> MyTable; - static MyTable* _new_loc_table; + 16384, ResourceObj::C_HEAP> RelocationTable; + static RelocationTable* _new_loc_table; public: static void initialize() { _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; - _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)MyTable; + _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable; } static DumpAllocStats* alloc_stats() { return _alloc_stats; @@ -906,18 +918,25 @@ address obj = ref->obj(); int bytes = ref->size() * BytesPerWord; char* p; - size_t alignment = BytesPerWord; + size_t alignment = BytesPerWord; + char* oldtop; + char* newtop; + if (read_only) { + oldtop = _ro_region.top(); p = _ro_region.allocate(bytes, alignment); + newtop = _ro_region.top(); } else { + oldtop = _rw_region.top(); p = _rw_region.allocate(bytes, alignment); + newtop = _rw_region.top(); } memcpy(p, obj, bytes); bool isnew = _new_loc_table->put(obj, (address)p); assert(isnew, "must be"); log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); - _alloc_stats->record(ref->msotype(), bytes, read_only); + _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); if (ref->msotype() == MetaspaceObj::SymbolType) { uintx delta = MetaspaceShared::object_delta(p); if (delta > MAX_SHARED_DELTA) { @@ -1042,12 +1061,12 @@ } // We must relocate the System::_well_known_klasses only after we have copied the - // strings in during dump_string_and_symbols(): during the copy, we operate on old + // strings in during dump_string_and_symbols(): during the string copy, we operate on old // String objects which assert that their klass is the old // SystemDictionary::String_klass(). static void relocate_well_known_klasses() { { - tty->print_cr("Relocating _well_known_klasses[] ... "); + tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); ResourceMark rm; RefRelocator ext_reloc; SystemDictionary::well_known_klasses_do(&ext_reloc); @@ -1090,7 +1109,7 @@ DumpAllocStats* ArchiveCompactor::_alloc_stats; SortedSymbolClosure* ArchiveCompactor::_ssc; -ArchiveCompactor::MyTable* ArchiveCompactor::_new_loc_table; +ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { @@ -1106,15 +1125,11 @@ // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details. _string_regions = new GrowableArray(2); - size_t shared_string_bytes = 0; - StringTable::write_to_archive(_string_regions, &shared_string_bytes); - char* st_base = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start(); - char* st_top = st_base + shared_string_bytes; - _st_region.init(st_base, st_top, st_top); - _st_region.pack(); + StringTable::write_to_archive(_string_regions); } char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { + char* oldtop = _ro_region.top(); // Reorder the system dictionary. Moving the symbols affects // how the hash table indices are calculated. SystemDictionary::reorder_dictionary_for_sharing(); @@ -1132,6 +1147,8 @@ WriteClosure wc(&_ro_region); MetaspaceShared::serialize(&wc); + char* newtop = _ro_region.top(); + ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true); return buckets_top; } @@ -1209,28 +1226,9 @@ // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size // is just the spaces between the two ends. size_t core_spaces_size = _od_region.end() - _mc_region.base(); - assert(core_spaces_size == (size_t)align_size_up(core_spaces_size, Metaspace::reserve_alignment()), + assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), "should already be aligned"); - // Print statistics of all the regions - const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + - _mc_region.reserved() + _md_region.reserved() + - _st_region.reserved() + _od_region.reserved(); - const size_t total_bytes = _ro_region.used() + _rw_region.used() + - _mc_region.used() + _md_region.used() + - _st_region.used() + _od_region.used(); - const double total_u_perc = total_bytes / double(total_reserved) * 100.0; - - _mc_region.print(total_reserved); - _rw_region.print(total_reserved); - _ro_region.print(total_reserved); - _md_region.print(total_reserved); - _st_region.print(total_reserved); - _od_region.print(total_reserved); - - tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", - total_bytes, total_reserved, total_u_perc); - // During patching, some virtual methods may be called, so at this point // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). MetaspaceShared::patch_cpp_vtable_pointers(); @@ -1249,6 +1247,9 @@ mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); mapinfo->set_core_spaces_size(core_spaces_size); + char* s0_start, *s0_top, *s0_end; + char* s1_start, *s1_top, *s1_end; + for (int pass=1; pass<=2; pass++) { if (pass == 1) { // The first pass doesn't actually write the data to disk. All it @@ -1269,7 +1270,10 @@ write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); - mapinfo->write_string_regions(_string_regions); + + mapinfo->write_string_regions(_string_regions, + &s0_start, &s0_top, &s0_end, + &s1_start, &s1_top, &s1_end); } mapinfo->close(); @@ -1277,12 +1281,41 @@ // Restore the vtable in case we invoke any virtual methods. MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); + _s0_region.init(s0_start, s0_top, s0_end); + _s1_region.init(s1_start, s1_top, s1_end); + print_region_stats(); + if (log_is_enabled(Info, cds)) { ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()), int(_md_region.used())); } } +void VM_PopulateDumpSharedSpace::print_region_stats() { + // Print statistics of all the regions + const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + + _mc_region.reserved() + _md_region.reserved() + + _od_region.reserved() + + _s0_region.reserved() + _s1_region.reserved(); + const size_t total_bytes = _ro_region.used() + _rw_region.used() + + _mc_region.used() + _md_region.used() + + _od_region.used() + + _s0_region.used() + _s1_region.used(); + const double total_u_perc = total_bytes / double(total_reserved) * 100.0; + + _mc_region.print(total_reserved); + _rw_region.print(total_reserved); + _ro_region.print(total_reserved); + _md_region.print(total_reserved); + _od_region.print(total_reserved); + _s0_region.print(total_reserved); + _s1_region.print(total_reserved); + + tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", + total_bytes, total_reserved, total_u_perc); +} + + // Update a Java object to point its Klass* to the new location after // shared archive has been compacted. void MetaspaceShared::relocate_klass_ptr(oop o) { @@ -1727,7 +1760,6 @@ _rw_region.print_out_of_space_msg(name, needed_bytes); _ro_region.print_out_of_space_msg(name, needed_bytes); _md_region.print_out_of_space_msg(name, needed_bytes); - _st_region.print_out_of_space_msg(name, needed_bytes); _od_region.print_out_of_space_msg(name, needed_bytes); vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),