< prev index next >

src/share/vm/memory/metaspaceShared.cpp

Print this page




 185   void init(char* b, char* t, char* e) {
 186     _base = b;
 187     _top = t;
 188     _end = e;
 189   }
 190 
 191   void pack(DumpRegion* next = NULL) {
 192     assert(!is_packed(), "sanity");
 193     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 194     _is_packed = true;
 195     if (next != NULL) {
 196       next->_base = next->_top = this->_end;
 197       next->_end = MetaspaceShared::shared_rs()->end();
 198     }
 199   }
 200   bool contains(char* p) {
 201     return base() <= p && p < top();
 202   }
 203 };
 204 

 205 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 206 DumpRegion _s0_region("s0"), _s1_region("s1"), _oa0_region("oa0"), _oa1_region("oa1");
 207 
 208 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 209   return _mc_region.allocate(num_bytes);
 210 }
 211 
 212 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 213   return _ro_region.allocate(num_bytes);
 214 }
 215 
 216 void MetaspaceShared::initialize_shared_rs() {
 217   const size_t reserve_alignment = Metaspace::reserve_alignment();
 218   bool large_pages = false; // No large pages when dumping the CDS archive.
 219   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 220 
 221 #ifdef _LP64
 222   // On 64-bit VM, the heap and class space layout will be the same as if
 223   // you're running in -Xshare:on mode:
 224   //
 225   //                         +-- SharedBaseAddress (default = 0x800000000)
 226   //                         v


 853                        all_count, all_bytes, all_perc);
 854 
 855   assert(all_ro_bytes == ro_all, "everything should have been counted");
 856   assert(all_rw_bytes == rw_all, "everything should have been counted");
 857 
 858   msg.info("%s", info_stream.as_string());
 859 #undef fmt_stats
 860 }
 861 
 862 // Populate the shared space.
 863 
 864 class VM_PopulateDumpSharedSpace: public VM_Operation {
 865 private:
 866   GrowableArray<MemRegion> *_string_regions;
 867   GrowableArray<MemRegion> *_open_archive_heap_regions;
 868 
 869   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
 870   void dump_symbols();
 871   char* dump_read_only_tables();
 872   void print_region_stats();


 873 public:
 874 
 875   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 876   void doit();   // outline because gdb sucks
 877   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 878 }; // class VM_PopulateDumpSharedSpace
 879 
 880 class SortedSymbolClosure: public SymbolClosure {
 881   GrowableArray<Symbol*> _symbols;
 882   virtual void do_symbol(Symbol** sym) {
 883     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 884     _symbols.append(*sym);
 885   }
 886   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 887     if (a[0] < b[0]) {
 888       return -1;
 889     } else if (a[0] == b[0]) {
 890       return 0;
 891     } else {
 892       return 1;


1250          "should already be aligned");
1251 
1252   // During patching, some virtual methods may be called, so at this point
1253   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1254   MetaspaceShared::patch_cpp_vtable_pointers();
1255 
1256   // The vtable clones contain addresses of the current process.
1257   // We don't want to write these addresses into the archive.
1258   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1259 
1260   // Create and write the archive file that maps the shared spaces.
1261 
1262   FileMapInfo* mapinfo = new FileMapInfo();
1263   mapinfo->populate_header(os::vm_allocation_granularity());
1264   mapinfo->set_read_only_tables_start(read_only_tables_start);
1265   mapinfo->set_misc_data_patching_start(vtbl_list);
1266   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1267   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1268   mapinfo->set_core_spaces_size(core_spaces_size);
1269 
1270   char* s0_start, *s0_top;
1271   char* s1_start, *s1_top;
1272   char* oa0_start, *oa0_top;
1273   char* oa1_start, *oa1_top;
1274 
1275   for (int pass=1; pass<=2; pass++) {
1276     if (pass == 1) {
1277       // The first pass doesn't actually write the data to disk. All it
1278       // does is to update the fields in the mapinfo->_header.
1279     } else {
1280       // After the first pass, the contents of mapinfo->_header are finalized,
1281       // so we can compute the header's CRC, and write the contents of the header
1282       // and the regions into disk.
1283       mapinfo->open_for_write();
1284       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1285     }
1286     mapinfo->write_header();
1287 
1288     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1289     // so it needs to be read/write.
1290     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1291     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1292     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1293     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1294     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1295 
1296     mapinfo->write_archive_heap_regions(_string_regions,

1297                                         MetaspaceShared::first_string,
1298                                         MetaspaceShared::max_strings,
1299                                         &s0_start, &s0_top,
1300                                         &s1_start, &s1_top);
1301     mapinfo->write_archive_heap_regions(_open_archive_heap_regions,
1302                                         MetaspaceShared::first_open_archive_heap_region,
1303                                         MetaspaceShared::max_open_archive_heap_region,
1304                                         &oa0_start, &oa0_top,
1305                                         &oa1_start, &oa1_top);
1306   }
1307 
1308   mapinfo->close();
1309 
1310   // Restore the vtable in case we invoke any virtual methods.
1311   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1312 
1313   _s0_region.init(s0_start, s0_top, s0_top);
1314   _s1_region.init(s1_start, s1_top, s1_top);
1315   _oa0_region.init(oa0_start, oa0_top, oa0_top);
1316   _oa1_region.init(oa1_start, oa1_top, oa1_top);
1317   print_region_stats();
1318 
1319   if (log_is_enabled(Info, cds)) {
1320     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1321                                                  int(_mc_region.used()), int(_md_region.used()));
1322   }
1323 }
1324 
1325 void VM_PopulateDumpSharedSpace::print_region_stats() {
1326   // Print statistics of all the regions
1327   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1328                                 _mc_region.reserved()  + _md_region.reserved() +
1329                                 _od_region.reserved()  +
1330                                 _s0_region.reserved()  + _s1_region.reserved() +
1331                                 _oa0_region.reserved() + _oa1_region.reserved();
1332   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1333                              _mc_region.used()  + _md_region.used() +
1334                              _od_region.used()  +
1335                              _s0_region.used()  + _s1_region.used() +
1336                              _oa0_region.used() + _oa1_region.used();
1337   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1338 
1339   _mc_region.print(total_reserved);
1340   _rw_region.print(total_reserved);
1341   _ro_region.print(total_reserved);
1342   _md_region.print(total_reserved);
1343   _od_region.print(total_reserved);
1344   _s0_region.print(total_reserved);
1345   _s1_region.print(total_reserved);
1346   _oa0_region.print(total_reserved);
1347   _oa1_region.print(total_reserved);
1348 
1349   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1350                  total_bytes, total_reserved, total_u_perc);
1351 }
1352 












1353 
1354 // Update a Java object to point its Klass* to the new location after
1355 // shared archive has been compacted.
1356 void MetaspaceShared::relocate_klass_ptr(oop o) {
1357   assert(DumpSharedSpaces, "sanity");
1358   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1359   o->set_klass(k);
1360 }
1361 
1362 class LinkSharedClassesClosure : public KlassClosure {
1363   Thread* THREAD;
1364   bool    _made_progress;
1365  public:
1366   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1367 
1368   void reset()               { _made_progress = false; }
1369   bool made_progress() const { return _made_progress; }
1370 
1371   void do_klass(Klass* k) {
1372     if (k->is_instance_klass()) {


1567       BytecodeVerificationLocal = BytecodeVerificationRemote;
1568     }
1569     ik->link_class(THREAD);
1570     if (HAS_PENDING_EXCEPTION) {
1571       ResourceMark rm;
1572       tty->print_cr("Preload Warning: Verification failed for %s",
1573                     ik->external_name());
1574       CLEAR_PENDING_EXCEPTION;
1575       ik->set_in_error_state();
1576       _has_error_classes = true;
1577     }
1578     BytecodeVerificationLocal = saved;
1579     return true;
1580   } else {
1581     return false;
1582   }
1583 }
1584 
1585 #if INCLUDE_CDS_JAVA_HEAP
1586 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1587   if (!MetaspaceShared::allow_archive_heap_object()) {
1588     if (log_is_enabled(Info, cds)) {
1589       log_info(cds)(
1590         "Archived java heap is not supported as UseG1GC, "
1591         "UseCompressedOops and UseCompressedClassPointers are required."
1592         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1593         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1594         BOOL_TO_STR(UseCompressedClassPointers));
1595     }
1596     return;
1597   }
1598 


1599   // Cache for recording where the archived objects are copied to
1600   MetaspaceShared::create_archive_object_cache();
1601 
1602   tty->print_cr("Dumping String objects to closed archive heap region ...");
1603   NOT_PRODUCT(StringTable::verify());
1604   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1605   _string_regions = new GrowableArray<MemRegion>(2);
1606   StringTable::write_to_archive(_string_regions);
1607 
1608   tty->print_cr("Dumping objects to open archive heap region ...");
1609   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1610   MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1611 }
1612 
1613 void MetaspaceShared::dump_open_archive_heap_objects(
1614                                     GrowableArray<MemRegion> * open_archive) {
1615   assert(UseG1GC, "Only support G1 GC");
1616   assert(UseCompressedOops && UseCompressedClassPointers,
1617          "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1618 
1619   Thread* THREAD = Thread::current();
1620   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1621 
1622   MetaspaceShared::archive_resolved_constants(THREAD);
1623 
1624   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1625                                                    os::vm_allocation_granularity());
1626 }
1627 
1628 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1629 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1630   assert(DumpSharedSpaces, "dump-time only");
1631 
1632   NoSafepointVerifier nsv;
1633 
1634   ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1635   oop* p = cache->get(obj);
1636   if (p != NULL) {
1637     // already archived
1638     return *p;
1639   }
1640 
1641   int len = obj->size();
1642   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1643     return NULL;
1644   }
1645 
1646   int hash = obj->identity_hash();
1647   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1648   if (archived_oop != NULL) {
1649     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1650     relocate_klass_ptr(archived_oop);
1651     cache->put(obj, archived_oop);
1652   }




 185   void init(char* b, char* t, char* e) {
 186     _base = b;
 187     _top = t;
 188     _end = e;
 189   }
 190 
 191   void pack(DumpRegion* next = NULL) {
 192     assert(!is_packed(), "sanity");
 193     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 194     _is_packed = true;
 195     if (next != NULL) {
 196       next->_base = next->_top = this->_end;
 197       next->_end = MetaspaceShared::shared_rs()->end();
 198     }
 199   }
 200   bool contains(char* p) {
 201     return base() <= p && p < top();
 202   }
 203 };
 204 
 205 
 206 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 207 size_t _total_string_region_size = 0, _total_open_archive_region_size = 0;
 208 
 209 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 210   return _mc_region.allocate(num_bytes);
 211 }
 212 
 213 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 214   return _ro_region.allocate(num_bytes);
 215 }
 216 
 217 void MetaspaceShared::initialize_shared_rs() {
 218   const size_t reserve_alignment = Metaspace::reserve_alignment();
 219   bool large_pages = false; // No large pages when dumping the CDS archive.
 220   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 221 
 222 #ifdef _LP64
 223   // On 64-bit VM, the heap and class space layout will be the same as if
 224   // you're running in -Xshare:on mode:
 225   //
 226   //                         +-- SharedBaseAddress (default = 0x800000000)
 227   //                         v


 854                        all_count, all_bytes, all_perc);
 855 
 856   assert(all_ro_bytes == ro_all, "everything should have been counted");
 857   assert(all_rw_bytes == rw_all, "everything should have been counted");
 858 
 859   msg.info("%s", info_stream.as_string());
 860 #undef fmt_stats
 861 }
 862 
 863 // Populate the shared space.
 864 
 865 class VM_PopulateDumpSharedSpace: public VM_Operation {
 866 private:
 867   GrowableArray<MemRegion> *_string_regions;
 868   GrowableArray<MemRegion> *_open_archive_heap_regions;
 869 
 870   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
 871   void dump_symbols();
 872   char* dump_read_only_tables();
 873   void print_region_stats();
 874   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
 875                                const char *name, const size_t total_size);
 876 public:
 877 
 878   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 879   void doit();   // outline because gdb sucks
 880   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 881 }; // class VM_PopulateDumpSharedSpace
 882 
 883 class SortedSymbolClosure: public SymbolClosure {
 884   GrowableArray<Symbol*> _symbols;
 885   virtual void do_symbol(Symbol** sym) {
 886     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 887     _symbols.append(*sym);
 888   }
 889   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 890     if (a[0] < b[0]) {
 891       return -1;
 892     } else if (a[0] == b[0]) {
 893       return 0;
 894     } else {
 895       return 1;


1253          "should already be aligned");
1254 
1255   // During patching, some virtual methods may be called, so at this point
1256   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1257   MetaspaceShared::patch_cpp_vtable_pointers();
1258 
1259   // The vtable clones contain addresses of the current process.
1260   // We don't want to write these addresses into the archive.
1261   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1262 
1263   // Create and write the archive file that maps the shared spaces.
1264 
1265   FileMapInfo* mapinfo = new FileMapInfo();
1266   mapinfo->populate_header(os::vm_allocation_granularity());
1267   mapinfo->set_read_only_tables_start(read_only_tables_start);
1268   mapinfo->set_misc_data_patching_start(vtbl_list);
1269   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1270   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1271   mapinfo->set_core_spaces_size(core_spaces_size);
1272 





1273   for (int pass=1; pass<=2; pass++) {
1274     if (pass == 1) {
1275       // The first pass doesn't actually write the data to disk. All it
1276       // does is to update the fields in the mapinfo->_header.
1277     } else {
1278       // After the first pass, the contents of mapinfo->_header are finalized,
1279       // so we can compute the header's CRC, and write the contents of the header
1280       // and the regions into disk.
1281       mapinfo->open_for_write();
1282       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1283     }
1284     mapinfo->write_header();
1285 
1286     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1287     // so it needs to be read/write.
1288     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1289     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1290     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1291     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1292     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1293 
1294     _total_string_region_size = mapinfo->write_archive_heap_regions(
1295                                         _string_regions,
1296                                         MetaspaceShared::first_string,
1297                                         MetaspaceShared::max_strings);
1298     _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1299                                         _open_archive_heap_regions,

1300                                         MetaspaceShared::first_open_archive_heap_region,
1301                                         MetaspaceShared::max_open_archive_heap_region);


1302   }
1303 
1304   mapinfo->close();
1305 
1306   // Restore the vtable in case we invoke any virtual methods.
1307   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1308 




1309   print_region_stats();
1310 
1311   if (log_is_enabled(Info, cds)) {
1312     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1313                                                  int(_mc_region.used()), int(_md_region.used()));
1314   }
1315 }
1316 
1317 void VM_PopulateDumpSharedSpace::print_region_stats() {
1318   // Print statistics of all the regions
1319   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1320                                 _mc_region.reserved()  + _md_region.reserved() +
1321                                 _od_region.reserved()  +
1322                                 _total_string_region_size +
1323                                 _total_open_archive_region_size;
1324   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1325                              _mc_region.used()  + _md_region.used() +
1326                              _od_region.used()  +
1327                              _total_string_region_size +
1328                              _total_open_archive_region_size;
1329   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1330 
1331   _mc_region.print(total_reserved);
1332   _rw_region.print(total_reserved);
1333   _ro_region.print(total_reserved);
1334   _md_region.print(total_reserved);
1335   _od_region.print(total_reserved);
1336   print_heap_region_stats(_string_regions, "st", total_reserved);
1337   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);


1338 
1339   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1340                  total_bytes, total_reserved, total_u_perc);
1341 }
1342 
1343 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1344                                                          const char *name, const size_t total_size) {
1345   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1346   for (int i = 0; i < arr_len; i++) {
1347       char* start = (char*)heap_mem->at(i).start();
1348       size_t size = heap_mem->at(i).byte_size();
1349       char* top = start + size;
1350       tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100%% used] at " INTPTR_FORMAT,
1351                     name, i, size, size/double(total_size)*100.0, size, p2i(start));
1352 
1353   }
1354 }
1355 
1356 // Update a Java object to point its Klass* to the new location after
1357 // shared archive has been compacted.
1358 void MetaspaceShared::relocate_klass_ptr(oop o) {
1359   assert(DumpSharedSpaces, "sanity");
1360   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1361   o->set_klass(k);
1362 }
1363 
1364 class LinkSharedClassesClosure : public KlassClosure {
1365   Thread* THREAD;
1366   bool    _made_progress;
1367  public:
1368   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1369 
1370   void reset()               { _made_progress = false; }
1371   bool made_progress() const { return _made_progress; }
1372 
1373   void do_klass(Klass* k) {
1374     if (k->is_instance_klass()) {


1569       BytecodeVerificationLocal = BytecodeVerificationRemote;
1570     }
1571     ik->link_class(THREAD);
1572     if (HAS_PENDING_EXCEPTION) {
1573       ResourceMark rm;
1574       tty->print_cr("Preload Warning: Verification failed for %s",
1575                     ik->external_name());
1576       CLEAR_PENDING_EXCEPTION;
1577       ik->set_in_error_state();
1578       _has_error_classes = true;
1579     }
1580     BytecodeVerificationLocal = saved;
1581     return true;
1582   } else {
1583     return false;
1584   }
1585 }
1586 
1587 #if INCLUDE_CDS_JAVA_HEAP
1588 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1589   if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1590     if (log_is_enabled(Info, cds)) {
1591       log_info(cds)(
1592         "Archived java heap is not supported as UseG1GC, "
1593         "UseCompressedOops and UseCompressedClassPointers are required."
1594         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1595         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1596         BOOL_TO_STR(UseCompressedClassPointers));
1597     }
1598     return;
1599   }
1600 
1601   NoSafepointVerifier nsv;
1602 
1603   // Cache for recording where the archived objects are copied to
1604   MetaspaceShared::create_archive_object_cache();
1605 
1606   tty->print_cr("Dumping String objects to closed archive heap region ...");
1607   NOT_PRODUCT(StringTable::verify());
1608   // The string space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1609   _string_regions = new GrowableArray<MemRegion>(2);
1610   StringTable::write_to_archive(_string_regions);
1611 
1612   tty->print_cr("Dumping objects to open archive heap region ...");
1613   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1614   MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1615 }
1616 
1617 void MetaspaceShared::dump_open_archive_heap_objects(
1618                                     GrowableArray<MemRegion> * open_archive) {
1619   assert(UseG1GC, "Only support G1 GC");
1620   assert(UseCompressedOops && UseCompressedClassPointers,
1621          "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1622 
1623   Thread* THREAD = Thread::current();
1624   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1625 
1626   MetaspaceShared::archive_resolved_constants(THREAD);
1627 
1628   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1629                                                    os::vm_allocation_granularity());
1630 }
1631 
1632 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1633 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1634   assert(DumpSharedSpaces, "dump-time only");


1635 
1636   ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1637   oop* p = cache->get(obj);
1638   if (p != NULL) {
1639     // already archived
1640     return *p;
1641   }
1642 
1643   int len = obj->size();
1644   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1645     return NULL;
1646   }
1647 
1648   int hash = obj->identity_hash();
1649   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1650   if (archived_oop != NULL) {
1651     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1652     relocate_klass_ptr(archived_oop);
1653     cache->put(obj, archived_oop);
1654   }


< prev index next >