< prev index next >

src/share/vm/memory/metaspaceShared.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/stringTable.hpp"
  34 #include "classfile/systemDictionary.hpp"
  35 #include "classfile/systemDictionaryShared.hpp"
  36 #include "code/codeCache.hpp"





  37 #include "gc/shared/gcLocker.hpp"
  38 #include "interpreter/bytecodeStream.hpp"
  39 #include "interpreter/bytecodes.hpp"
  40 #include "logging/log.hpp"
  41 #include "logging/logMessage.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "memory/metaspace.hpp"
  44 #include "memory/metaspaceShared.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/instanceClassLoaderKlass.hpp"
  47 #include "oops/instanceMirrorKlass.hpp"
  48 #include "oops/instanceRefKlass.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/typeArrayKlass.hpp"
  53 #include "prims/jvm.h"
  54 #include "prims/jvmtiRedefineClasses.hpp"
  55 #include "runtime/timerTrace.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/signature.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "runtime/vm_operations.hpp"
  60 #include "utilities/align.hpp"
  61 #include "utilities/defaultStream.hpp"
  62 #include "utilities/hashtable.inline.hpp"
  63 #include "memory/metaspaceClosure.hpp"
  64 
  65 ReservedSpace MetaspaceShared::_shared_rs;
  66 VirtualSpace MetaspaceShared::_shared_vs;
  67 MetaspaceSharedStats MetaspaceShared::_stats;
  68 bool MetaspaceShared::_has_error_classes;
  69 bool MetaspaceShared::_archive_loading_failed = false;
  70 bool MetaspaceShared::_remapped_readwrite = false;

  71 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  72 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  73 size_t MetaspaceShared::_core_spaces_size = 0;
  74 
  75 // The CDS archive is divided into the following regions:
  76 //     mc - misc code (the method entry trampolines)
  77 //     rw - read-write metadata
  78 //     ro - read-only metadata and read-only tables
  79 //     md - misc data (the c++ vtables)
  80 //     od - optional data (original class files)
  81 //
  82 //     s0 - shared strings #0
  83 //     s1 - shared strings #1 (may be empty)


  84 //
  85 // Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from
  86 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  87 // are page-aligned, and there's no gap between any consecutive regions.
  88 //
  89 // These 5 regions are populated in the following steps:
  90 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  91 //     temporarily allocated outside of the shared regions. Only the method entry
  92 //     trampolines are written into the mc region.
  93 // [2] ArchiveCompactor copies RW metadata into the rw region.
  94 // [3] ArchiveCompactor copies RO metadata into the ro region.
  95 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
  96 //     are copied into the ro region as read-only tables.
  97 // [5] C++ vtables are copied into the md region.
  98 // [6] Original class files are copied into the od region.
  99 //
 100 // The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their
 101 // layout is independent of the other 5 regions.
 102 
 103 class DumpRegion {
 104 private:
 105   const char* _name;
 106   char* _base;
 107   char* _top;
 108   char* _end;
 109   bool _is_packed;
 110 
 111   char* expand_top_to(char* newtop) {
 112     assert(is_allocatable(), "must be initialized and not packed");
 113     assert(newtop >= _top, "must not grow backwards");
 114     if (newtop > _end) {
 115       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 116       ShouldNotReachHere();
 117     }
 118     MetaspaceShared::commit_shared_space_to(newtop);
 119     _top = newtop;
 120     return _top;
 121   }


 177   void init(char* b, char* t, char* e) {
 178     _base = b;
 179     _top = t;
 180     _end = e;
 181   }
 182 
 183   void pack(DumpRegion* next = NULL) {
 184     assert(!is_packed(), "sanity");
 185     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 186     _is_packed = true;
 187     if (next != NULL) {
 188       next->_base = next->_top = this->_end;
 189       next->_end = MetaspaceShared::shared_rs()->end();
 190     }
 191   }
 192   bool contains(char* p) {
 193     return base() <= p && p < top();
 194   }
 195 };
 196 

 197 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 198 DumpRegion _s0_region("s0"), _s1_region("s1");
 199 
 200 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 201   return _mc_region.allocate(num_bytes);
 202 }
 203 
 204 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 205   return _ro_region.allocate(num_bytes);
 206 }
 207 
 208 void MetaspaceShared::initialize_shared_rs() {
 209   const size_t reserve_alignment = Metaspace::reserve_alignment();
 210   bool large_pages = false; // No large pages when dumping the CDS archive.
 211   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 212 
 213 #ifdef _LP64
 214   // On 64-bit VM, the heap and class space layout will be the same as if
 215   // you're running in -Xshare:on mode:
 216   //
 217   //                         +-- SharedBaseAddress (default = 0x800000000)
 218   //                         v


 839   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 840 
 841   info_stream.print_cr("%s", sep);
 842   info_stream.print_cr(fmt_stats, "Total",
 843                        all_ro_count, all_ro_bytes, all_ro_perc,
 844                        all_rw_count, all_rw_bytes, all_rw_perc,
 845                        all_count, all_bytes, all_perc);
 846 
 847   assert(all_ro_bytes == ro_all, "everything should have been counted");
 848   assert(all_rw_bytes == rw_all, "everything should have been counted");
 849 
 850   msg.info("%s", info_stream.as_string());
 851 #undef fmt_stats
 852 }
 853 
 854 // Populate the shared space.
 855 
 856 class VM_PopulateDumpSharedSpace: public VM_Operation {
 857 private:
 858   GrowableArray<MemRegion> *_string_regions;

 859 
 860   void dump_string_and_symbols();

 861   char* dump_read_only_tables();
 862   void print_region_stats();


 863 public:
 864 
 865   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 866   void doit();   // outline because gdb sucks
 867   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 868 }; // class VM_PopulateDumpSharedSpace
 869 
 870 class SortedSymbolClosure: public SymbolClosure {
 871   GrowableArray<Symbol*> _symbols;
 872   virtual void do_symbol(Symbol** sym) {
 873     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 874     _symbols.append(*sym);
 875   }
 876   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 877     if (a[0] < b[0]) {
 878       return -1;
 879     } else if (a[0] == b[0]) {
 880       return 0;
 881     } else {
 882       return 1;


1055       ResourceMark rm;
1056       RefRelocator ext_reloc;
1057       iterate_roots(&ext_reloc);
1058     }
1059 
1060 #ifdef ASSERT
1061     {
1062       tty->print_cr("Verifying external roots ... ");
1063       ResourceMark rm;
1064       IsRefInArchiveChecker checker;
1065       iterate_roots(&checker);
1066     }
1067 #endif
1068 
1069 
1070     // cleanup
1071     _ssc = NULL;
1072   }
1073 
1074   // We must relocate the System::_well_known_klasses only after we have copied the
1075   // strings in during dump_string_and_symbols(): during the string copy, we operate on old
1076   // String objects which assert that their klass is the old
1077   // SystemDictionary::String_klass().
1078   static void relocate_well_known_klasses() {
1079     {
1080       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1081       ResourceMark rm;
1082       RefRelocator ext_reloc;
1083       SystemDictionary::well_known_klasses_do(&ext_reloc);
1084     }
1085     // NOTE: after this point, we shouldn't have any globals that can reach the old
1086     // objects.
1087 
1088     // We cannot use any of the objects in the heap anymore (except for the objects
1089     // in the CDS shared string regions) because their headers no longer point to
1090     // valid Klasses.
1091   }
1092 
1093   static void iterate_roots(MetaspaceClosure* it) {
1094     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1095     for (int i=0; i<symbols->length(); i++) {
1096       it->push(symbols->adr_at(i));
1097     }


1110   }
1111 
1112   static Klass* get_relocated_klass(Klass* orig_klass) {
1113     address* pp = _new_loc_table->get((address)orig_klass);
1114     assert(pp != NULL, "must be");
1115     Klass* klass = (Klass*)(*pp);
1116     assert(klass->is_klass(), "must be");
1117     return klass;
1118   }
1119 };
1120 
1121 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1122 SortedSymbolClosure* ArchiveCompactor::_ssc;
1123 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1124 
1125 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1126                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1127   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1128 }
1129 
1130 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1131   tty->print_cr("Dumping string and symbol tables ...");
1132 
1133   NOT_PRODUCT(SymbolTable::verify());
1134   NOT_PRODUCT(StringTable::verify());
1135   SymbolTable::write_to_archive();
1136 
1137   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1138   _string_regions = new GrowableArray<MemRegion>(2);
1139   StringTable::write_to_archive(_string_regions);
1140 }
1141 
1142 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1143   char* oldtop = _ro_region.top();
1144   // Reorder the system dictionary. Moving the symbols affects
1145   // how the hash table indices are calculated.
1146   SystemDictionary::reorder_dictionary_for_sharing();
1147   NOT_PRODUCT(SystemDictionary::verify();)
1148 
1149   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1150   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1151   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1152 
1153   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1154   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1155   SystemDictionary::copy_table(table_top, _ro_region.top());
1156 
1157   // Write the other data to the output array.
1158   WriteClosure wc(&_ro_region);
1159   MetaspaceShared::serialize(&wc);


1189 
1190   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1191   {
1192     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1193     for (int i = 0; i < _global_klass_objects->length(); i++) {
1194       Klass* k = _global_klass_objects->at(i);
1195       if (k->is_instance_klass()) {
1196         num_inst ++;
1197       } else if (k->is_objArray_klass()) {
1198         num_obj_array ++;
1199       } else {
1200         assert(k->is_typeArray_klass(), "sanity");
1201         num_type_array ++;
1202       }
1203     }
1204     tty->print_cr("    instance classes   = %5d", num_inst);
1205     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1206     tty->print_cr("    type array classes = %5d", num_type_array);
1207   }
1208 
1209 
1210   // Ensure the ConstMethods won't be modified at run-time
1211   tty->print("Updating ConstMethods ... ");
1212   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1213   tty->print_cr("done. ");
1214 
1215   // Remove all references outside the metadata
1216   tty->print("Removing unshareable information ... ");
1217   remove_unshareable_in_classes();
1218   tty->print_cr("done. ");
1219 
1220   ArchiveCompactor::initialize();
1221   ArchiveCompactor::copy_and_compact();
1222 
1223   dump_string_and_symbols();






1224   ArchiveCompactor::relocate_well_known_klasses();
1225 
1226   char* read_only_tables_start = dump_read_only_tables();
1227   _ro_region.pack(&_md_region);
1228 
1229   char* vtbl_list = _md_region.top();
1230   MetaspaceShared::allocate_cpp_vtable_clones();
1231   _md_region.pack(&_od_region);
1232 
1233   // Relocate the archived class file data into the od region
1234   relocate_cached_class_file();
1235   _od_region.pack();
1236 
1237   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1238   // is just the spaces between the two ends.
1239   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1240   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1241          "should already be aligned");
1242 
1243   // During patching, some virtual methods may be called, so at this point
1244   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1245   MetaspaceShared::patch_cpp_vtable_pointers();
1246 
1247   // The vtable clones contain addresses of the current process.
1248   // We don't want to write these addresses into the archive.
1249   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1250 
1251   // Create and write the archive file that maps the shared spaces.
1252 
1253   FileMapInfo* mapinfo = new FileMapInfo();
1254   mapinfo->populate_header(os::vm_allocation_granularity());
1255   mapinfo->set_read_only_tables_start(read_only_tables_start);
1256   mapinfo->set_misc_data_patching_start(vtbl_list);
1257   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1258   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1259   mapinfo->set_core_spaces_size(core_spaces_size);
1260 
1261   char* s0_start, *s0_top, *s0_end;
1262   char* s1_start, *s1_top, *s1_end;
1263 
1264   for (int pass=1; pass<=2; pass++) {
1265     if (pass == 1) {
1266       // The first pass doesn't actually write the data to disk. All it
1267       // does is to update the fields in the mapinfo->_header.
1268     } else {
1269       // After the first pass, the contents of mapinfo->_header are finalized,
1270       // so we can compute the header's CRC, and write the contents of the header
1271       // and the regions into disk.
1272       mapinfo->open_for_write();
1273       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1274     }
1275     mapinfo->write_header();
1276 
1277     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1278     // so it needs to be read/write.
1279     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1280     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1281     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1282     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1283     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1284 
1285     mapinfo->write_string_regions(_string_regions,
1286                                   &s0_start, &s0_top, &s0_end,
1287                                   &s1_start, &s1_top, &s1_end);





1288   }
1289 
1290   mapinfo->close();
1291 
1292   // Restore the vtable in case we invoke any virtual methods.
1293   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1294 
1295   _s0_region.init(s0_start, s0_top, s0_end);
1296   _s1_region.init(s1_start, s1_top, s1_end);
1297   print_region_stats();
1298 
1299   if (log_is_enabled(Info, cds)) {
1300     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1301                                                  int(_mc_region.used()), int(_md_region.used()));
1302   }
1303 }
1304 
1305 void VM_PopulateDumpSharedSpace::print_region_stats() {
1306   // Print statistics of all the regions
1307   const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1308                                 _mc_region.reserved() + _md_region.reserved() +
1309                                 _od_region.reserved() +
1310                                 _s0_region.reserved() + _s1_region.reserved();

1311   const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1312                              _mc_region.used() + _md_region.used() +
1313                              _od_region.used() +
1314                              _s0_region.used() + _s1_region.used();

1315   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1316 
1317   _mc_region.print(total_reserved);
1318   _rw_region.print(total_reserved);
1319   _ro_region.print(total_reserved);
1320   _md_region.print(total_reserved);
1321   _od_region.print(total_reserved);
1322   _s0_region.print(total_reserved);
1323   _s1_region.print(total_reserved);
1324 
1325   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1326                  total_bytes, total_reserved, total_u_perc);
1327 }
1328 












1329 
1330 // Update a Java object to point its Klass* to the new location after
1331 // shared archive has been compacted.
1332 void MetaspaceShared::relocate_klass_ptr(oop o) {
1333   assert(DumpSharedSpaces, "sanity");
1334   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1335   o->set_klass(k);
1336 }
1337 
1338 class LinkSharedClassesClosure : public KlassClosure {
1339   Thread* THREAD;
1340   bool    _made_progress;
1341  public:
1342   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1343 
1344   void reset()               { _made_progress = false; }
1345   bool made_progress() const { return _made_progress; }
1346 
1347   void do_klass(Klass* k) {
1348     if (k->is_instance_klass()) {
1349       InstanceKlass* ik = InstanceKlass::cast(k);
1350       // Link the class to cause the bytecodes to be rewritten and the
1351       // cpcache to be created. Class verification is done according
1352       // to -Xverify setting.
1353       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1354       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");


1355     }
1356   }
1357 };
1358 
1359 class CheckSharedClassesClosure : public KlassClosure {
1360   bool    _made_progress;
1361  public:
1362   CheckSharedClassesClosure() : _made_progress(false) {}
1363 
1364   void reset()               { _made_progress = false; }
1365   bool made_progress() const { return _made_progress; }
1366   void do_klass(Klass* k) {
1367     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1368       _made_progress = true;
1369     }
1370   }
1371 };
1372 
1373 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1374   if (k->is_instance_klass()) {


1539       // classes also being verified. The extra overhead is acceptable during
1540       // dumping.
1541       BytecodeVerificationLocal = BytecodeVerificationRemote;
1542     }
1543     ik->link_class(THREAD);
1544     if (HAS_PENDING_EXCEPTION) {
1545       ResourceMark rm;
1546       tty->print_cr("Preload Warning: Verification failed for %s",
1547                     ik->external_name());
1548       CLEAR_PENDING_EXCEPTION;
1549       ik->set_in_error_state();
1550       _has_error_classes = true;
1551     }
1552     BytecodeVerificationLocal = saved;
1553     return true;
1554   } else {
1555     return false;
1556   }
1557 }
1558 
























































































1559 // Closure for serializing initialization data in from a data area
1560 // (ptr_array) read from the shared file.
1561 
1562 class ReadClosure : public SerializeClosure {
1563 private:
1564   intptr_t** _ptr_array;
1565 
1566   inline intptr_t nextPtr() {
1567     return *(*_ptr_array)++;
1568   }
1569 
1570 public:
1571   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1572 
1573   void do_ptr(void** p) {
1574     assert(*p == NULL, "initializing previous initialized pointer.");
1575     intptr_t obj = nextPtr();
1576     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1577            "hit tag while initializing ptrs.");
1578     *p = (void*)obj;


1724 
1725   // Initialize the run-time symbol table.
1726   SymbolTable::create_table();
1727 
1728   // Close the mapinfo file
1729   mapinfo->close();
1730 
1731   if (PrintSharedArchiveAndExit) {
1732     if (PrintSharedDictionary) {
1733       tty->print_cr("\nShared classes:\n");
1734       SystemDictionary::print_shared(tty);
1735     }
1736     if (_archive_loading_failed) {
1737       tty->print_cr("archive is invalid");
1738       vm_exit(1);
1739     } else {
1740       tty->print_cr("archive is valid");
1741       vm_exit(0);
1742     }
1743   }
1744 }
1745 
1746 void MetaspaceShared::fixup_shared_string_regions() {
1747   FileMapInfo *mapinfo = FileMapInfo::current_info();
1748   mapinfo->fixup_string_regions();
1749 }
1750 
1751 // JVM/TI RedefineClasses() support:
1752 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1753   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1754 
1755   if (UseSharedSpaces) {
1756     // remap the shared readonly space to shared readwrite, private
1757     FileMapInfo* mapinfo = FileMapInfo::current_info();
1758     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1759       return false;
1760     }
1761     _remapped_readwrite = true;
1762   }
1763   return true;
1764 }
1765 
1766 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1767   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1768   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/stringTable.hpp"
  34 #include "classfile/systemDictionary.hpp"
  35 #include "classfile/systemDictionaryShared.hpp"
  36 #include "code/codeCache.hpp"
  37 #if INCLUDE_ALL_GCS
  38 #include "gc/g1/g1Allocator.inline.hpp"
  39 #include "gc/g1/g1CollectedHeap.hpp"
  40 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  41 #endif
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "interpreter/bytecodeStream.hpp"
  44 #include "interpreter/bytecodes.hpp"
  45 #include "logging/log.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "memory/filemap.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/metaspaceShared.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/instanceClassLoaderKlass.hpp"
  52 #include "oops/instanceMirrorKlass.hpp"
  53 #include "oops/instanceRefKlass.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/typeArrayKlass.hpp"
  58 #include "prims/jvm.h"
  59 #include "prims/jvmtiRedefineClasses.hpp"
  60 #include "runtime/timerTrace.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/signature.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "runtime/vm_operations.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/defaultStream.hpp"
  67 #include "utilities/hashtable.inline.hpp"
  68 #include "memory/metaspaceClosure.hpp"
  69 
  70 ReservedSpace MetaspaceShared::_shared_rs;
  71 VirtualSpace MetaspaceShared::_shared_vs;
  72 MetaspaceSharedStats MetaspaceShared::_stats;
  73 bool MetaspaceShared::_has_error_classes;
  74 bool MetaspaceShared::_archive_loading_failed = false;
  75 bool MetaspaceShared::_remapped_readwrite = false;
  76 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
  77 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  78 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  79 size_t MetaspaceShared::_core_spaces_size = 0;
  80 
  81 // The CDS archive is divided into the following regions:
  82 //     mc  - misc code (the method entry trampolines)
  83 //     rw  - read-write metadata
  84 //     ro  - read-only metadata and read-only tables
  85 //     md  - misc data (the c++ vtables)
  86 //     od  - optional data (original class files)
  87 //
  88 //     s0  - shared strings(closed archive heap space) #0
  89 //     s1  - shared strings(closed archive heap space) #1 (may be empty)
  90 //     oa0 - open archive heap space #0
  91 //     oa1 - open archive heap space #1 (may be empty)
  92 //
  93 // The mc, rw, ro, md and od regions are linearly allocated, starting from
  94 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  95 // are page-aligned, and there's no gap between any consecutive regions.
  96 //
  97 // These 5 regions are populated in the following steps:
  98 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  99 //     temporarily allocated outside of the shared regions. Only the method entry
 100 //     trampolines are written into the mc region.
 101 // [2] ArchiveCompactor copies RW metadata into the rw region.
 102 // [3] ArchiveCompactor copies RO metadata into the ro region.
 103 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 104 //     are copied into the ro region as read-only tables.
 105 // [5] C++ vtables are copied into the md region.
 106 // [6] Original class files are copied into the od region.
 107 //
 108 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects.
 109 // Their layout is independent of the other 5 regions.
 110 
 111 class DumpRegion {
 112 private:
 113   const char* _name;
 114   char* _base;
 115   char* _top;
 116   char* _end;
 117   bool _is_packed;
 118 
 119   char* expand_top_to(char* newtop) {
 120     assert(is_allocatable(), "must be initialized and not packed");
 121     assert(newtop >= _top, "must not grow backwards");
 122     if (newtop > _end) {
 123       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 124       ShouldNotReachHere();
 125     }
 126     MetaspaceShared::commit_shared_space_to(newtop);
 127     _top = newtop;
 128     return _top;
 129   }


 185   void init(char* b, char* t, char* e) {
 186     _base = b;
 187     _top = t;
 188     _end = e;
 189   }
 190 
 191   void pack(DumpRegion* next = NULL) {
 192     assert(!is_packed(), "sanity");
 193     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 194     _is_packed = true;
 195     if (next != NULL) {
 196       next->_base = next->_top = this->_end;
 197       next->_end = MetaspaceShared::shared_rs()->end();
 198     }
 199   }
 200   bool contains(char* p) {
 201     return base() <= p && p < top();
 202   }
 203 };
 204 
 205 
 206 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 207 size_t _total_string_region_size = 0, _total_open_archive_region_size = 0;
 208 
 209 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 210   return _mc_region.allocate(num_bytes);
 211 }
 212 
 213 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 214   return _ro_region.allocate(num_bytes);
 215 }
 216 
 217 void MetaspaceShared::initialize_shared_rs() {
 218   const size_t reserve_alignment = Metaspace::reserve_alignment();
 219   bool large_pages = false; // No large pages when dumping the CDS archive.
 220   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 221 
 222 #ifdef _LP64
 223   // On 64-bit VM, the heap and class space layout will be the same as if
 224   // you're running in -Xshare:on mode:
 225   //
 226   //                         +-- SharedBaseAddress (default = 0x800000000)
 227   //                         v


 848   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 849 
 850   info_stream.print_cr("%s", sep);
 851   info_stream.print_cr(fmt_stats, "Total",
 852                        all_ro_count, all_ro_bytes, all_ro_perc,
 853                        all_rw_count, all_rw_bytes, all_rw_perc,
 854                        all_count, all_bytes, all_perc);
 855 
 856   assert(all_ro_bytes == ro_all, "everything should have been counted");
 857   assert(all_rw_bytes == rw_all, "everything should have been counted");
 858 
 859   msg.info("%s", info_stream.as_string());
 860 #undef fmt_stats
 861 }
 862 
 863 // Populate the shared space.
 864 
 865 class VM_PopulateDumpSharedSpace: public VM_Operation {
 866 private:
 867   GrowableArray<MemRegion> *_string_regions;
 868   GrowableArray<MemRegion> *_open_archive_heap_regions;
 869 
 870   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
 871   void dump_symbols();
 872   char* dump_read_only_tables();
 873   void print_region_stats();
 874   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
 875                                const char *name, const size_t total_size);
 876 public:
 877 
 878   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 879   void doit();   // outline because gdb sucks
 880   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 881 }; // class VM_PopulateDumpSharedSpace
 882 
 883 class SortedSymbolClosure: public SymbolClosure {
 884   GrowableArray<Symbol*> _symbols;
 885   virtual void do_symbol(Symbol** sym) {
 886     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 887     _symbols.append(*sym);
 888   }
 889   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 890     if (a[0] < b[0]) {
 891       return -1;
 892     } else if (a[0] == b[0]) {
 893       return 0;
 894     } else {
 895       return 1;


1068       ResourceMark rm;
1069       RefRelocator ext_reloc;
1070       iterate_roots(&ext_reloc);
1071     }
1072 
1073 #ifdef ASSERT
1074     {
1075       tty->print_cr("Verifying external roots ... ");
1076       ResourceMark rm;
1077       IsRefInArchiveChecker checker;
1078       iterate_roots(&checker);
1079     }
1080 #endif
1081 
1082 
1083     // cleanup
1084     _ssc = NULL;
1085   }
1086 
1087   // We must relocate the System::_well_known_klasses only after we have copied the
1088   // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1089   // old objects which assert that their klass is the original klass.

1090   static void relocate_well_known_klasses() {
1091     {
1092       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1093       ResourceMark rm;
1094       RefRelocator ext_reloc;
1095       SystemDictionary::well_known_klasses_do(&ext_reloc);
1096     }
1097     // NOTE: after this point, we shouldn't have any globals that can reach the old
1098     // objects.
1099 
1100     // We cannot use any of the objects in the heap anymore (except for the objects
1101     // in the CDS shared string regions) because their headers no longer point to
1102     // valid Klasses.
1103   }
1104 
1105   static void iterate_roots(MetaspaceClosure* it) {
1106     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1107     for (int i=0; i<symbols->length(); i++) {
1108       it->push(symbols->adr_at(i));
1109     }


1122   }
1123 
1124   static Klass* get_relocated_klass(Klass* orig_klass) {
1125     address* pp = _new_loc_table->get((address)orig_klass);
1126     assert(pp != NULL, "must be");
1127     Klass* klass = (Klass*)(*pp);
1128     assert(klass->is_klass(), "must be");
1129     return klass;
1130   }
1131 };
1132 
1133 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1134 SortedSymbolClosure* ArchiveCompactor::_ssc;
1135 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1136 
1137 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1138                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1139   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1140 }
1141 
1142 void VM_PopulateDumpSharedSpace::dump_symbols() {
1143   tty->print_cr("Dumping symbol table ...");
1144 
1145   NOT_PRODUCT(SymbolTable::verify());

1146   SymbolTable::write_to_archive();




1147 }
1148 
1149 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1150   char* oldtop = _ro_region.top();
1151   // Reorder the system dictionary. Moving the symbols affects
1152   // how the hash table indices are calculated.
1153   SystemDictionary::reorder_dictionary_for_sharing();
1154   NOT_PRODUCT(SystemDictionary::verify();)
1155 
1156   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1157   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1158   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1159 
1160   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1161   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1162   SystemDictionary::copy_table(table_top, _ro_region.top());
1163 
1164   // Write the other data to the output array.
1165   WriteClosure wc(&_ro_region);
1166   MetaspaceShared::serialize(&wc);


1196 
1197   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1198   {
1199     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1200     for (int i = 0; i < _global_klass_objects->length(); i++) {
1201       Klass* k = _global_klass_objects->at(i);
1202       if (k->is_instance_klass()) {
1203         num_inst ++;
1204       } else if (k->is_objArray_klass()) {
1205         num_obj_array ++;
1206       } else {
1207         assert(k->is_typeArray_klass(), "sanity");
1208         num_type_array ++;
1209       }
1210     }
1211     tty->print_cr("    instance classes   = %5d", num_inst);
1212     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1213     tty->print_cr("    type array classes = %5d", num_type_array);
1214   }
1215 

1216   // Ensure the ConstMethods won't be modified at run-time
1217   tty->print("Updating ConstMethods ... ");
1218   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1219   tty->print_cr("done. ");
1220 
1221   // Remove all references outside the metadata
1222   tty->print("Removing unshareable information ... ");
1223   remove_unshareable_in_classes();
1224   tty->print_cr("done. ");
1225 
1226   ArchiveCompactor::initialize();
1227   ArchiveCompactor::copy_and_compact();
1228 
1229   dump_symbols();
1230 
1231   // Dump supported java heap objects
1232   _string_regions = NULL;
1233   _open_archive_heap_regions = NULL;
1234   dump_java_heap_objects();
1235 
1236   ArchiveCompactor::relocate_well_known_klasses();
1237 
1238   char* read_only_tables_start = dump_read_only_tables();
1239   _ro_region.pack(&_md_region);
1240 
1241   char* vtbl_list = _md_region.top();
1242   MetaspaceShared::allocate_cpp_vtable_clones();
1243   _md_region.pack(&_od_region);
1244 
1245   // Relocate the archived class file data into the od region
1246   relocate_cached_class_file();
1247   _od_region.pack();
1248 
1249   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1250   // is just the spaces between the two ends.
1251   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1252   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1253          "should already be aligned");
1254 
1255   // During patching, some virtual methods may be called, so at this point
1256   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1257   MetaspaceShared::patch_cpp_vtable_pointers();
1258 
1259   // The vtable clones contain addresses of the current process.
1260   // We don't want to write these addresses into the archive.
1261   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1262 
1263   // Create and write the archive file that maps the shared spaces.
1264 
1265   FileMapInfo* mapinfo = new FileMapInfo();
1266   mapinfo->populate_header(os::vm_allocation_granularity());
1267   mapinfo->set_read_only_tables_start(read_only_tables_start);
1268   mapinfo->set_misc_data_patching_start(vtbl_list);
1269   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1270   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1271   mapinfo->set_core_spaces_size(core_spaces_size);
1272 



1273   for (int pass=1; pass<=2; pass++) {
1274     if (pass == 1) {
1275       // The first pass doesn't actually write the data to disk. All it
1276       // does is to update the fields in the mapinfo->_header.
1277     } else {
1278       // After the first pass, the contents of mapinfo->_header are finalized,
1279       // so we can compute the header's CRC, and write the contents of the header
1280       // and the regions into disk.
1281       mapinfo->open_for_write();
1282       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1283     }
1284     mapinfo->write_header();
1285 
1286     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1287     // so it needs to be read/write.
1288     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1289     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1290     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1291     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1292     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1293 
1294     _total_string_region_size = mapinfo->write_archive_heap_regions(
1295                                         _string_regions,
1296                                         MetaspaceShared::first_string,
1297                                         MetaspaceShared::max_strings);
1298     _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1299                                         _open_archive_heap_regions,
1300                                         MetaspaceShared::first_open_archive_heap_region,
1301                                         MetaspaceShared::max_open_archive_heap_region);
1302   }
1303 
1304   mapinfo->close();
1305 
1306   // Restore the vtable in case we invoke any virtual methods.
1307   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1308 


1309   print_region_stats();
1310 
1311   if (log_is_enabled(Info, cds)) {
1312     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1313                                                  int(_mc_region.used()), int(_md_region.used()));
1314   }
1315 }
1316 
1317 void VM_PopulateDumpSharedSpace::print_region_stats() {
1318   // Print statistics of all the regions
1319   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1320                                 _mc_region.reserved()  + _md_region.reserved() +
1321                                 _od_region.reserved()  +
1322                                 _total_string_region_size +
1323                                 _total_open_archive_region_size;
1324   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1325                              _mc_region.used()  + _md_region.used() +
1326                              _od_region.used()  +
1327                              _total_string_region_size +
1328                              _total_open_archive_region_size;
1329   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1330 
1331   _mc_region.print(total_reserved);
1332   _rw_region.print(total_reserved);
1333   _ro_region.print(total_reserved);
1334   _md_region.print(total_reserved);
1335   _od_region.print(total_reserved);
1336   print_heap_region_stats(_string_regions, "st", total_reserved);
1337   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
1338 
1339   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1340                  total_bytes, total_reserved, total_u_perc);
1341 }
1342 
1343 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1344                                                          const char *name, const size_t total_size) {
1345   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1346   for (int i = 0; i < arr_len; i++) {
1347       char* start = (char*)heap_mem->at(i).start();
1348       size_t size = heap_mem->at(i).byte_size();
1349       char* top = start + size;
1350       tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100%% used] at " INTPTR_FORMAT,
1351                     name, i, size, size/double(total_size)*100.0, size, p2i(start));
1352 
1353   }
1354 }
1355 
1356 // Update a Java object to point its Klass* to the new location after
1357 // shared archive has been compacted.
1358 void MetaspaceShared::relocate_klass_ptr(oop o) {
1359   assert(DumpSharedSpaces, "sanity");
1360   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1361   o->set_klass(k);
1362 }
1363 
1364 class LinkSharedClassesClosure : public KlassClosure {
1365   Thread* THREAD;
1366   bool    _made_progress;
1367  public:
1368   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1369 
1370   void reset()               { _made_progress = false; }
1371   bool made_progress() const { return _made_progress; }
1372 
1373   void do_klass(Klass* k) {
1374     if (k->is_instance_klass()) {
1375       InstanceKlass* ik = InstanceKlass::cast(k);
1376       // Link the class to cause the bytecodes to be rewritten and the
1377       // cpcache to be created. Class verification is done according
1378       // to -Xverify setting.
1379       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1380       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1381 
1382       ik->constants()->resolve_class_constants(THREAD);
1383     }
1384   }
1385 };
1386 
1387 class CheckSharedClassesClosure : public KlassClosure {
1388   bool    _made_progress;
1389  public:
1390   CheckSharedClassesClosure() : _made_progress(false) {}
1391 
1392   void reset()               { _made_progress = false; }
1393   bool made_progress() const { return _made_progress; }
1394   void do_klass(Klass* k) {
1395     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1396       _made_progress = true;
1397     }
1398   }
1399 };
1400 
1401 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1402   if (k->is_instance_klass()) {


1567       // classes also being verified. The extra overhead is acceptable during
1568       // dumping.
1569       BytecodeVerificationLocal = BytecodeVerificationRemote;
1570     }
1571     ik->link_class(THREAD);
1572     if (HAS_PENDING_EXCEPTION) {
1573       ResourceMark rm;
1574       tty->print_cr("Preload Warning: Verification failed for %s",
1575                     ik->external_name());
1576       CLEAR_PENDING_EXCEPTION;
1577       ik->set_in_error_state();
1578       _has_error_classes = true;
1579     }
1580     BytecodeVerificationLocal = saved;
1581     return true;
1582   } else {
1583     return false;
1584   }
1585 }
1586 
1587 #if INCLUDE_CDS_JAVA_HEAP
1588 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1589   if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1590     if (log_is_enabled(Info, cds)) {
1591       log_info(cds)(
1592         "Archived java heap is not supported as UseG1GC, "
1593         "UseCompressedOops and UseCompressedClassPointers are required."
1594         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1595         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1596         BOOL_TO_STR(UseCompressedClassPointers));
1597     }
1598     return;
1599   }
1600 
1601   NoSafepointVerifier nsv;
1602 
1603   // Cache for recording where the archived objects are copied to
1604   MetaspaceShared::create_archive_object_cache();
1605 
1606   tty->print_cr("Dumping String objects to closed archive heap region ...");
1607   NOT_PRODUCT(StringTable::verify());
1608   // The string space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1609   _string_regions = new GrowableArray<MemRegion>(2);
1610   StringTable::write_to_archive(_string_regions);
1611 
1612   tty->print_cr("Dumping objects to open archive heap region ...");
1613   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1614   MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1615 }
1616 
1617 void MetaspaceShared::dump_open_archive_heap_objects(
1618                                     GrowableArray<MemRegion> * open_archive) {
1619   assert(UseG1GC, "Only support G1 GC");
1620   assert(UseCompressedOops && UseCompressedClassPointers,
1621          "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1622 
1623   Thread* THREAD = Thread::current();
1624   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1625 
1626   MetaspaceShared::archive_resolved_constants(THREAD);
1627 
1628   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1629                                                    os::vm_allocation_granularity());
1630 }
1631 
1632 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1633 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1634   assert(DumpSharedSpaces, "dump-time only");
1635 
1636   ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1637   oop* p = cache->get(obj);
1638   if (p != NULL) {
1639     // already archived
1640     return *p;
1641   }
1642 
1643   int len = obj->size();
1644   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1645     return NULL;
1646   }
1647 
1648   int hash = obj->identity_hash();
1649   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1650   if (archived_oop != NULL) {
1651     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1652     relocate_klass_ptr(archived_oop);
1653     cache->put(obj, archived_oop);
1654   }
1655   return archived_oop;
1656 }
1657 
1658 void MetaspaceShared::archive_resolved_constants(Thread* THREAD) {
1659   int i;
1660   for (i = 0; i < _global_klass_objects->length(); i++) {
1661     Klass* k = _global_klass_objects->at(i);
1662     if (k->is_instance_klass()) {
1663       InstanceKlass* ik = InstanceKlass::cast(k);
1664       ik->constants()->archive_resolved_references(THREAD);
1665     }
1666   }
1667 }
1668 
1669 void MetaspaceShared::fixup_mapped_heap_regions() {
1670   FileMapInfo *mapinfo = FileMapInfo::current_info();
1671   mapinfo->fixup_mapped_heap_regions();
1672 }
1673 #endif // INCLUDE_CDS_JAVA_HEAP
1674 
1675 // Closure for serializing initialization data in from a data area
1676 // (ptr_array) read from the shared file.
1677 
1678 class ReadClosure : public SerializeClosure {
1679 private:
1680   intptr_t** _ptr_array;
1681 
1682   inline intptr_t nextPtr() {
1683     return *(*_ptr_array)++;
1684   }
1685 
1686 public:
1687   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1688 
1689   void do_ptr(void** p) {
1690     assert(*p == NULL, "initializing previous initialized pointer.");
1691     intptr_t obj = nextPtr();
1692     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1693            "hit tag while initializing ptrs.");
1694     *p = (void*)obj;


1840 
1841   // Initialize the run-time symbol table.
1842   SymbolTable::create_table();
1843 
1844   // Close the mapinfo file
1845   mapinfo->close();
1846 
1847   if (PrintSharedArchiveAndExit) {
1848     if (PrintSharedDictionary) {
1849       tty->print_cr("\nShared classes:\n");
1850       SystemDictionary::print_shared(tty);
1851     }
1852     if (_archive_loading_failed) {
1853       tty->print_cr("archive is invalid");
1854       vm_exit(1);
1855     } else {
1856       tty->print_cr("archive is valid");
1857       vm_exit(0);
1858     }
1859   }





1860 }
1861 
1862 // JVM/TI RedefineClasses() support:
1863 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1864   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1865 
1866   if (UseSharedSpaces) {
1867     // remap the shared readonly space to shared readwrite, private
1868     FileMapInfo* mapinfo = FileMapInfo::current_info();
1869     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1870       return false;
1871     }
1872     _remapped_readwrite = true;
1873   }
1874   return true;
1875 }
1876 
1877 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1878   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1879   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
< prev index next >