< prev index next >
src/hotspot/share/memory/metaspaceShared.cpp
Print this page
*** 205,215 ****
return base() <= p && p < top();
}
};
! DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
return _mc_region.allocate(num_bytes);
}
--- 205,215 ----
return base() <= p && p < top();
}
};
! DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
return _mc_region.allocate(num_bytes);
}
*** 596,622 ****
}
}
}
}
- static void relocate_cached_class_file() {
- for (int i = 0; i < _global_klass_objects->length(); i++) {
- Klass* k = _global_klass_objects->at(i);
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- JvmtiCachedClassFileData* p = ik->get_archived_class_data();
- if (p != NULL) {
- int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
- JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
- q->length = p->length;
- memcpy(q->data, p->data, p->length);
- ik->set_archived_class_data(q);
- }
- }
- }
- }
-
// Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
// (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
//
// Addresses of the vtables and the methods may be different across JVM runs,
// if libjvm.so is dynamically loaded at a different base address.
--- 596,605 ----
*** 1436,1454 ****
char* read_only_tables_start = dump_read_only_tables();
_ro_region.pack(&_md_region);
char* vtbl_list = _md_region.top();
MetaspaceShared::allocate_cpp_vtable_clones();
! _md_region.pack(&_od_region);
!
! // Relocate the archived class file data into the od region
! relocate_cached_class_file();
! _od_region.pack();
! // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
// is just the spaces between the two ends.
! size_t core_spaces_size = _od_region.end() - _mc_region.base();
assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
"should already be aligned");
// During patching, some virtual methods may be called, so at this point
// the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
--- 1419,1433 ----
char* read_only_tables_start = dump_read_only_tables();
_ro_region.pack(&_md_region);
char* vtbl_list = _md_region.top();
MetaspaceShared::allocate_cpp_vtable_clones();
! _md_region.pack();
! // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size
// is just the spaces between the two ends.
! size_t core_spaces_size = _md_region.end() - _mc_region.base();
assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
"should already be aligned");
// During patching, some virtual methods may be called, so at this point
// the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
*** 1486,1496 ****
// so it needs to be read/write.
write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
- write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
_total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
_closed_archive_heap_regions,
_closed_archive_heap_oopmaps,
MetaspaceShared::first_closed_archive_heap_region,
--- 1465,1474 ----
*** 1533,1557 ****
void VM_PopulateDumpSharedSpace::print_region_stats() {
// Print statistics of all the regions
const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
_mc_region.reserved() + _md_region.reserved() +
- _od_region.reserved() +
_total_closed_archive_region_size +
_total_open_archive_region_size;
const size_t total_bytes = _ro_region.used() + _rw_region.used() +
_mc_region.used() + _md_region.used() +
- _od_region.used() +
_total_closed_archive_region_size +
_total_open_archive_region_size;
const double total_u_perc = percent_of(total_bytes, total_reserved);
_mc_region.print(total_reserved);
_rw_region.print(total_reserved);
_ro_region.print(total_reserved);
_md_region.print(total_reserved);
- _od_region.print(total_reserved);
print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
total_bytes, total_reserved, total_u_perc);
--- 1511,1532 ----
*** 1929,1974 ****
char* ro_base = NULL; char* ro_top;
char* rw_base = NULL; char* rw_top;
char* mc_base = NULL; char* mc_top;
char* md_base = NULL; char* md_top;
- char* od_base = NULL; char* od_top;
// Map each shared region
if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
(rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
(ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
(md_base = mapinfo->map_region(md, &md_top)) != NULL &&
- (od_base = mapinfo->map_region(od, &od_top)) != NULL &&
(image_alignment == (size_t)os::vm_allocation_granularity()) &&
mapinfo->validate_shared_path_table()) {
// Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
// fast checking in MetaspaceShared::is_in_shared_metaspace() and
// MetaspaceObj::is_shared().
//
! // We require that mc->rw->ro->md->od to be laid out consecutively, with no
// gaps between them. That way, we can ensure that the OS won't be able to
// allocate any new memory spaces inside _shared_metaspace_{base,top}, which
// would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
! assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be");
! assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be");
assert(mc_top == rw_base, "must be");
assert(rw_top == ro_base, "must be");
assert(ro_top == md_base, "must be");
- assert(md_top == od_base, "must be");
_core_spaces_size = mapinfo->core_spaces_size();
! MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)od_top);
return true;
} else {
// If there was a failure in mapping any of the spaces, unmap the ones
// that succeeded
if (ro_base != NULL) mapinfo->unmap_region(ro);
if (rw_base != NULL) mapinfo->unmap_region(rw);
if (mc_base != NULL) mapinfo->unmap_region(mc);
if (md_base != NULL) mapinfo->unmap_region(md);
- if (od_base != NULL) mapinfo->unmap_region(od);
#ifndef _WINDOWS
// Release the entire mapped region
shared_rs.release();
#endif
// If -Xshare:on is specified, print out the error message and exit VM,
--- 1904,1945 ----
char* ro_base = NULL; char* ro_top;
char* rw_base = NULL; char* rw_top;
char* mc_base = NULL; char* mc_top;
char* md_base = NULL; char* md_top;
// Map each shared region
if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
(rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
(ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
(md_base = mapinfo->map_region(md, &md_top)) != NULL &&
(image_alignment == (size_t)os::vm_allocation_granularity()) &&
mapinfo->validate_shared_path_table()) {
// Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
// fast checking in MetaspaceShared::is_in_shared_metaspace() and
// MetaspaceObj::is_shared().
//
! // We require that mc->rw->ro->md to be laid out consecutively, with no
// gaps between them. That way, we can ensure that the OS won't be able to
// allocate any new memory spaces inside _shared_metaspace_{base,top}, which
// would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
! assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base, "must be");
! assert(md_top > ro_top && md_top > rw_top && md_top > mc_top , "must be");
assert(mc_top == rw_base, "must be");
assert(rw_top == ro_base, "must be");
assert(ro_top == md_base, "must be");
_core_spaces_size = mapinfo->core_spaces_size();
! MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)md_top);
return true;
} else {
// If there was a failure in mapping any of the spaces, unmap the ones
// that succeeded
if (ro_base != NULL) mapinfo->unmap_region(ro);
if (rw_base != NULL) mapinfo->unmap_region(rw);
if (mc_base != NULL) mapinfo->unmap_region(mc);
if (md_base != NULL) mapinfo->unmap_region(md);
#ifndef _WINDOWS
// Release the entire mapped region
shared_rs.release();
#endif
// If -Xshare:on is specified, print out the error message and exit VM,
*** 2047,2056 ****
// or so.
_mc_region.print_out_of_space_msg(name, needed_bytes);
_rw_region.print_out_of_space_msg(name, needed_bytes);
_ro_region.print_out_of_space_msg(name, needed_bytes);
_md_region.print_out_of_space_msg(name, needed_bytes);
- _od_region.print_out_of_space_msg(name, needed_bytes);
vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
"Please reduce the number of shared classes.");
}
--- 2018,2026 ----
< prev index next >