< prev index next >

src/share/vm/memory/metaspaceShared.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/stringTable.hpp"
  34 #include "classfile/systemDictionary.hpp"
  35 #include "classfile/systemDictionaryShared.hpp"
  36 #include "code/codeCache.hpp"





  37 #include "gc/shared/gcLocker.hpp"
  38 #include "interpreter/bytecodeStream.hpp"
  39 #include "interpreter/bytecodes.hpp"
  40 #include "logging/log.hpp"
  41 #include "logging/logMessage.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "memory/metaspace.hpp"
  44 #include "memory/metaspaceShared.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/instanceClassLoaderKlass.hpp"
  47 #include "oops/instanceMirrorKlass.hpp"
  48 #include "oops/instanceRefKlass.hpp"
  49 #include "oops/objArrayKlass.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/typeArrayKlass.hpp"
  53 #include "prims/jvm.h"
  54 #include "prims/jvmtiRedefineClasses.hpp"
  55 #include "runtime/timerTrace.hpp"
  56 #include "runtime/os.hpp"
  57 #include "runtime/signature.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "runtime/vm_operations.hpp"
  60 #include "utilities/align.hpp"
  61 #include "utilities/defaultStream.hpp"
  62 #include "utilities/hashtable.inline.hpp"
  63 #include "memory/metaspaceClosure.hpp"
  64 
  65 ReservedSpace MetaspaceShared::_shared_rs;
  66 VirtualSpace MetaspaceShared::_shared_vs;
  67 MetaspaceSharedStats MetaspaceShared::_stats;
  68 bool MetaspaceShared::_has_error_classes;
  69 bool MetaspaceShared::_archive_loading_failed = false;
  70 bool MetaspaceShared::_remapped_readwrite = false;

  71 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  72 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  73 size_t MetaspaceShared::_core_spaces_size = 0;
  74 
  75 // The CDS archive is divided into the following regions:
  76 //     mc - misc code (the method entry trampolines)
  77 //     rw - read-write metadata
  78 //     ro - read-only metadata and read-only tables
  79 //     md - misc data (the c++ vtables)
  80 //     od - optional data (original class files)
  81 //
  82 //     s0 - shared strings #0
  83 //     s1 - shared strings #1 (may be empty)


  84 //
  85 // Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from
  86 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  87 // are page-aligned, and there's no gap between any consecutive regions.
  88 //
  89 // These 5 regions are populated in the following steps:
  90 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  91 //     temporarily allocated outside of the shared regions. Only the method entry
  92 //     trampolines are written into the mc region.
  93 // [2] ArchiveCompactor copies RW metadata into the rw region.
  94 // [3] ArchiveCompactor copies RO metadata into the ro region.
  95 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
  96 //     are copied into the ro region as read-only tables.
  97 // [5] C++ vtables are copied into the md region.
  98 // [6] Original class files are copied into the od region.
  99 //
 100 // The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their
 101 // layout is independent of the other 5 regions.
 102 
 103 class DumpRegion {
 104 private:
 105   const char* _name;
 106   char* _base;
 107   char* _top;
 108   char* _end;
 109   bool _is_packed;
 110 
 111   char* expand_top_to(char* newtop) {
 112     assert(is_allocatable(), "must be initialized and not packed");
 113     assert(newtop >= _top, "must not grow backwards");
 114     if (newtop > _end) {
 115       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 116       ShouldNotReachHere();
 117     }
 118     MetaspaceShared::commit_shared_space_to(newtop);
 119     _top = newtop;
 120     return _top;
 121   }


 178     _base = b;
 179     _top = t;
 180     _end = e;
 181   }
 182 
 183   void pack(DumpRegion* next = NULL) {
 184     assert(!is_packed(), "sanity");
 185     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 186     _is_packed = true;
 187     if (next != NULL) {
 188       next->_base = next->_top = this->_end;
 189       next->_end = MetaspaceShared::shared_rs()->end();
 190     }
 191   }
 192   bool contains(char* p) {
 193     return base() <= p && p < top();
 194   }
 195 };
 196 
 197 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 198 DumpRegion _s0_region("s0"), _s1_region("s1");
 199 
 200 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 201   return _mc_region.allocate(num_bytes);
 202 }
 203 
 204 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 205   return _ro_region.allocate(num_bytes);
 206 }
 207 
 208 void MetaspaceShared::initialize_shared_rs() {
 209   const size_t reserve_alignment = Metaspace::reserve_alignment();
 210   bool large_pages = false; // No large pages when dumping the CDS archive.
 211   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 212 
 213 #ifdef _LP64
 214   // On 64-bit VM, the heap and class space layout will be the same as if
 215   // you're running in -Xshare:on mode:
 216   //
 217   //                         +-- SharedBaseAddress (default = 0x800000000)
 218   //                         v


 839   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 840 
 841   info_stream.print_cr("%s", sep);
 842   info_stream.print_cr(fmt_stats, "Total",
 843                        all_ro_count, all_ro_bytes, all_ro_perc,
 844                        all_rw_count, all_rw_bytes, all_rw_perc,
 845                        all_count, all_bytes, all_perc);
 846 
 847   assert(all_ro_bytes == ro_all, "everything should have been counted");
 848   assert(all_rw_bytes == rw_all, "everything should have been counted");
 849 
 850   msg.info("%s", info_stream.as_string());
 851 #undef fmt_stats
 852 }
 853 
 854 // Populate the shared space.
 855 
 856 class VM_PopulateDumpSharedSpace: public VM_Operation {
 857 private:
 858   GrowableArray<MemRegion> *_string_regions;

 859 
 860   void dump_string_and_symbols();

 861   char* dump_read_only_tables();
 862   void print_region_stats();
 863 public:
 864 
 865   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 866   void doit();   // outline because gdb sucks
 867   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 868 }; // class VM_PopulateDumpSharedSpace
 869 
 870 class SortedSymbolClosure: public SymbolClosure {
 871   GrowableArray<Symbol*> _symbols;
 872   virtual void do_symbol(Symbol** sym) {
 873     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 874     _symbols.append(*sym);
 875   }
 876   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 877     if (a[0] < b[0]) {
 878       return -1;
 879     } else if (a[0] == b[0]) {
 880       return 0;


1055       ResourceMark rm;
1056       RefRelocator ext_reloc;
1057       iterate_roots(&ext_reloc);
1058     }
1059 
1060 #ifdef ASSERT
1061     {
1062       tty->print_cr("Verifying external roots ... ");
1063       ResourceMark rm;
1064       IsRefInArchiveChecker checker;
1065       iterate_roots(&checker);
1066     }
1067 #endif
1068 
1069 
1070     // cleanup
1071     _ssc = NULL;
1072   }
1073 
1074   // We must relocate the System::_well_known_klasses only after we have copied the
1075   // strings in during dump_string_and_symbols(): during the string copy, we operate on old
1076   // String objects which assert that their klass is the old
1077   // SystemDictionary::String_klass().
1078   static void relocate_well_known_klasses() {
1079     {
1080       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1081       ResourceMark rm;
1082       RefRelocator ext_reloc;
1083       SystemDictionary::well_known_klasses_do(&ext_reloc);
1084     }
1085     // NOTE: after this point, we shouldn't have any globals that can reach the old
1086     // objects.
1087 
1088     // We cannot use any of the objects in the heap anymore (except for the objects
1089     // in the CDS shared string regions) because their headers no longer point to
1090     // valid Klasses.
1091   }
1092 
1093   static void iterate_roots(MetaspaceClosure* it) {
1094     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1095     for (int i=0; i<symbols->length(); i++) {
1096       it->push(symbols->adr_at(i));
1097     }


1110   }
1111 
1112   static Klass* get_relocated_klass(Klass* orig_klass) {
1113     address* pp = _new_loc_table->get((address)orig_klass);
1114     assert(pp != NULL, "must be");
1115     Klass* klass = (Klass*)(*pp);
1116     assert(klass->is_klass(), "must be");
1117     return klass;
1118   }
1119 };
1120 
1121 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1122 SortedSymbolClosure* ArchiveCompactor::_ssc;
1123 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1124 
1125 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1126                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1127   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1128 }
1129 
1130 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1131   tty->print_cr("Dumping string and symbol tables ...");
1132 
1133   NOT_PRODUCT(SymbolTable::verify());
1134   NOT_PRODUCT(StringTable::verify());
1135   SymbolTable::write_to_archive();
1136 
1137   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1138   _string_regions = new GrowableArray<MemRegion>(2);
1139   StringTable::write_to_archive(_string_regions);
1140 }
1141 
1142 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1143   char* oldtop = _ro_region.top();
1144   // Reorder the system dictionary. Moving the symbols affects
1145   // how the hash table indices are calculated.
1146   SystemDictionary::reorder_dictionary_for_sharing();
1147   NOT_PRODUCT(SystemDictionary::verify();)
1148 
1149   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1150   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1151   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1152 
1153   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1154   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1155   SystemDictionary::copy_table(table_top, _ro_region.top());
1156 
1157   // Write the other data to the output array.
1158   WriteClosure wc(&_ro_region);
1159   MetaspaceShared::serialize(&wc);


1189 
1190   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1191   {
1192     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1193     for (int i = 0; i < _global_klass_objects->length(); i++) {
1194       Klass* k = _global_klass_objects->at(i);
1195       if (k->is_instance_klass()) {
1196         num_inst ++;
1197       } else if (k->is_objArray_klass()) {
1198         num_obj_array ++;
1199       } else {
1200         assert(k->is_typeArray_klass(), "sanity");
1201         num_type_array ++;
1202       }
1203     }
1204     tty->print_cr("    instance classes   = %5d", num_inst);
1205     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1206     tty->print_cr("    type array classes = %5d", num_type_array);
1207   }
1208 
1209 
1210   // Ensure the ConstMethods won't be modified at run-time
1211   tty->print("Updating ConstMethods ... ");
1212   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1213   tty->print_cr("done. ");
1214 
1215   // Remove all references outside the metadata
1216   tty->print("Removing unshareable information ... ");
1217   remove_unshareable_in_classes();
1218   tty->print_cr("done. ");
1219 
1220   ArchiveCompactor::initialize();
1221   ArchiveCompactor::copy_and_compact();
1222 
1223   dump_string_and_symbols();






1224   ArchiveCompactor::relocate_well_known_klasses();
1225 
1226   char* read_only_tables_start = dump_read_only_tables();
1227   _ro_region.pack(&_md_region);
1228 
1229   char* vtbl_list = _md_region.top();
1230   MetaspaceShared::allocate_cpp_vtable_clones();
1231   _md_region.pack(&_od_region);
1232 
1233   // Relocate the archived class file data into the od region
1234   relocate_cached_class_file();
1235   _od_region.pack();
1236 
1237   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1238   // is just the spaces between the two ends.
1239   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1240   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1241          "should already be aligned");
1242 
1243   // During patching, some virtual methods may be called, so at this point
1244   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1245   MetaspaceShared::patch_cpp_vtable_pointers();
1246 
1247   // The vtable clones contain addresses of the current process.
1248   // We don't want to write these addresses into the archive.
1249   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1250 
1251   // Create and write the archive file that maps the shared spaces.
1252 
1253   FileMapInfo* mapinfo = new FileMapInfo();
1254   mapinfo->populate_header(os::vm_allocation_granularity());
1255   mapinfo->set_read_only_tables_start(read_only_tables_start);
1256   mapinfo->set_misc_data_patching_start(vtbl_list);
1257   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1258   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1259   mapinfo->set_core_spaces_size(core_spaces_size);
1260 
1261   char* s0_start, *s0_top, *s0_end;
1262   char* s1_start, *s1_top, *s1_end;


1263 
1264   for (int pass=1; pass<=2; pass++) {
1265     if (pass == 1) {
1266       // The first pass doesn't actually write the data to disk. All it
1267       // does is to update the fields in the mapinfo->_header.
1268     } else {
1269       // After the first pass, the contents of mapinfo->_header are finalized,
1270       // so we can compute the header's CRC, and write the contents of the header
1271       // and the regions into disk.
1272       mapinfo->open_for_write();
1273       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1274     }
1275     mapinfo->write_header();
1276 
1277     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1278     // so it needs to be read/write.
1279     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1280     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1281     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1282     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1283     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1284 
1285     mapinfo->write_string_regions(_string_regions,
1286                                   &s0_start, &s0_top, &s0_end,
1287                                   &s1_start, &s1_top, &s1_end);







1288   }
1289 
1290   mapinfo->close();
1291 
1292   // Restore the vtable in case we invoke any virtual methods.
1293   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1294 
1295   _s0_region.init(s0_start, s0_top, s0_end);
1296   _s1_region.init(s1_start, s1_top, s1_end);


1297   print_region_stats();
1298 
1299   if (log_is_enabled(Info, cds)) {
1300     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1301                                                  int(_mc_region.used()), int(_md_region.used()));
1302   }
1303 }
1304 
1305 void VM_PopulateDumpSharedSpace::print_region_stats() {
1306   // Print statistics of all the regions
1307   const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1308                                 _mc_region.reserved() + _md_region.reserved() +
1309                                 _od_region.reserved() +
1310                                 _s0_region.reserved() + _s1_region.reserved();

1311   const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1312                              _mc_region.used() + _md_region.used() +
1313                              _od_region.used() +
1314                              _s0_region.used() + _s1_region.used();

1315   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1316 
1317   _mc_region.print(total_reserved);
1318   _rw_region.print(total_reserved);
1319   _ro_region.print(total_reserved);
1320   _md_region.print(total_reserved);
1321   _od_region.print(total_reserved);
1322   _s0_region.print(total_reserved);
1323   _s1_region.print(total_reserved);


1324 
1325   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1326                  total_bytes, total_reserved, total_u_perc);
1327 }
1328 
1329 
1330 // Update a Java object to point its Klass* to the new location after
1331 // shared archive has been compacted.
1332 void MetaspaceShared::relocate_klass_ptr(oop o) {
1333   assert(DumpSharedSpaces, "sanity");
1334   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1335   o->set_klass(k);
1336 }
1337 
1338 class LinkSharedClassesClosure : public KlassClosure {
1339   Thread* THREAD;
1340   bool    _made_progress;
1341  public:
1342   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1343 
1344   void reset()               { _made_progress = false; }
1345   bool made_progress() const { return _made_progress; }
1346 
1347   void do_klass(Klass* k) {
1348     if (k->is_instance_klass()) {
1349       InstanceKlass* ik = InstanceKlass::cast(k);
1350       // Link the class to cause the bytecodes to be rewritten and the
1351       // cpcache to be created. Class verification is done according
1352       // to -Xverify setting.
1353       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1354       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");


1355     }
1356   }
1357 };
1358 
1359 class CheckSharedClassesClosure : public KlassClosure {
1360   bool    _made_progress;
1361  public:
1362   CheckSharedClassesClosure() : _made_progress(false) {}
1363 
1364   void reset()               { _made_progress = false; }
1365   bool made_progress() const { return _made_progress; }
1366   void do_klass(Klass* k) {
1367     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1368       _made_progress = true;
1369     }
1370   }
1371 };
1372 
1373 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1374   if (k->is_instance_klass()) {


1539       // classes also being verified. The extra overhead is acceptable during
1540       // dumping.
1541       BytecodeVerificationLocal = BytecodeVerificationRemote;
1542     }
1543     ik->link_class(THREAD);
1544     if (HAS_PENDING_EXCEPTION) {
1545       ResourceMark rm;
1546       tty->print_cr("Preload Warning: Verification failed for %s",
1547                     ik->external_name());
1548       CLEAR_PENDING_EXCEPTION;
1549       ik->set_in_error_state();
1550       _has_error_classes = true;
1551     }
1552     BytecodeVerificationLocal = saved;
1553     return true;
1554   } else {
1555     return false;
1556   }
1557 }
1558 
























































































1559 // Closure for serializing initialization data in from a data area
1560 // (ptr_array) read from the shared file.
1561 
1562 class ReadClosure : public SerializeClosure {
1563 private:
1564   intptr_t** _ptr_array;
1565 
1566   inline intptr_t nextPtr() {
1567     return *(*_ptr_array)++;
1568   }
1569 
1570 public:
1571   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1572 
1573   void do_ptr(void** p) {
1574     assert(*p == NULL, "initializing previous initialized pointer.");
1575     intptr_t obj = nextPtr();
1576     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1577            "hit tag while initializing ptrs.");
1578     *p = (void*)obj;


1724 
1725   // Initialize the run-time symbol table.
1726   SymbolTable::create_table();
1727 
1728   // Close the mapinfo file
1729   mapinfo->close();
1730 
1731   if (PrintSharedArchiveAndExit) {
1732     if (PrintSharedDictionary) {
1733       tty->print_cr("\nShared classes:\n");
1734       SystemDictionary::print_shared(tty);
1735     }
1736     if (_archive_loading_failed) {
1737       tty->print_cr("archive is invalid");
1738       vm_exit(1);
1739     } else {
1740       tty->print_cr("archive is valid");
1741       vm_exit(0);
1742     }
1743   }
1744 }
1745 
1746 void MetaspaceShared::fixup_shared_string_regions() {
1747   FileMapInfo *mapinfo = FileMapInfo::current_info();
1748   mapinfo->fixup_string_regions();
1749 }
1750 
1751 // JVM/TI RedefineClasses() support:
1752 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1753   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1754 
1755   if (UseSharedSpaces) {
1756     // remap the shared readonly space to shared readwrite, private
1757     FileMapInfo* mapinfo = FileMapInfo::current_info();
1758     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1759       return false;
1760     }
1761     _remapped_readwrite = true;
1762   }
1763   return true;
1764 }
1765 
1766 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1767   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1768   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/stringTable.hpp"
  34 #include "classfile/systemDictionary.hpp"
  35 #include "classfile/systemDictionaryShared.hpp"
  36 #include "code/codeCache.hpp"
  37 #if INCLUDE_ALL_GCS
  38 #include "gc/g1/g1Allocator.inline.hpp"
  39 #include "gc/g1/g1CollectedHeap.hpp"
  40 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  41 #endif
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "interpreter/bytecodeStream.hpp"
  44 #include "interpreter/bytecodes.hpp"
  45 #include "logging/log.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "memory/filemap.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/metaspaceShared.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/instanceClassLoaderKlass.hpp"
  52 #include "oops/instanceMirrorKlass.hpp"
  53 #include "oops/instanceRefKlass.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/typeArrayKlass.hpp"
  58 #include "prims/jvm.h"
  59 #include "prims/jvmtiRedefineClasses.hpp"
  60 #include "runtime/timerTrace.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/signature.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "runtime/vm_operations.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/defaultStream.hpp"
  67 #include "utilities/hashtable.inline.hpp"
  68 #include "memory/metaspaceClosure.hpp"
  69 
  70 ReservedSpace MetaspaceShared::_shared_rs;
  71 VirtualSpace MetaspaceShared::_shared_vs;
  72 MetaspaceSharedStats MetaspaceShared::_stats;
  73 bool MetaspaceShared::_has_error_classes;
  74 bool MetaspaceShared::_archive_loading_failed = false;
  75 bool MetaspaceShared::_remapped_readwrite = false;
  76 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
  77 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  78 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  79 size_t MetaspaceShared::_core_spaces_size = 0;
  80 
  81 // The CDS archive is divided into the following regions:
  82 //     mc  - misc code (the method entry trampolines)
  83 //     rw  - read-write metadata
  84 //     ro  - read-only metadata and read-only tables
  85 //     md  - misc data (the c++ vtables)
  86 //     od  - optional data (original class files)
  87 //
  88 //     s0  - shared strings(closed archive heap space) #0
  89 //     s1  - shared strings(closed archive heap space) #1 (may be empty)
  90 //     oa0 - open archive heap space #0
  91 //     oa1 - open archive heap space #1 (may be empty)
  92 //
  93 // The mc, rw, ro, md and od regions are linearly allocated, starting from
  94 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  95 // are page-aligned, and there's no gap between any consecutive regions.
  96 //
  97 // These 5 regions are populated in the following steps:
  98 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  99 //     temporarily allocated outside of the shared regions. Only the method entry
 100 //     trampolines are written into the mc region.
 101 // [2] ArchiveCompactor copies RW metadata into the rw region.
 102 // [3] ArchiveCompactor copies RO metadata into the ro region.
 103 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 104 //     are copied into the ro region as read-only tables.
 105 // [5] C++ vtables are copied into the md region.
 106 // [6] Original class files are copied into the od region.
 107 //
 108 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects.
 109 // Their layout is independent of the other 5 regions.
 110 
 111 class DumpRegion {
 112 private:
 113   const char* _name;
 114   char* _base;
 115   char* _top;
 116   char* _end;
 117   bool _is_packed;
 118 
 119   char* expand_top_to(char* newtop) {
 120     assert(is_allocatable(), "must be initialized and not packed");
 121     assert(newtop >= _top, "must not grow backwards");
 122     if (newtop > _end) {
 123       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 124       ShouldNotReachHere();
 125     }
 126     MetaspaceShared::commit_shared_space_to(newtop);
 127     _top = newtop;
 128     return _top;
 129   }


 186     _base = b;
 187     _top = t;
 188     _end = e;
 189   }
 190 
 191   void pack(DumpRegion* next = NULL) {
 192     assert(!is_packed(), "sanity");
 193     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 194     _is_packed = true;
 195     if (next != NULL) {
 196       next->_base = next->_top = this->_end;
 197       next->_end = MetaspaceShared::shared_rs()->end();
 198     }
 199   }
 200   bool contains(char* p) {
 201     return base() <= p && p < top();
 202   }
 203 };
 204 
 205 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 206 DumpRegion _s0_region("s0"), _s1_region("s1"), _oa0_region("oa0"), _oa1_region("oa1");
 207 
 208 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 209   return _mc_region.allocate(num_bytes);
 210 }
 211 
 212 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 213   return _ro_region.allocate(num_bytes);
 214 }
 215 
 216 void MetaspaceShared::initialize_shared_rs() {
 217   const size_t reserve_alignment = Metaspace::reserve_alignment();
 218   bool large_pages = false; // No large pages when dumping the CDS archive.
 219   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 220 
 221 #ifdef _LP64
 222   // On 64-bit VM, the heap and class space layout will be the same as if
 223   // you're running in -Xshare:on mode:
 224   //
 225   //                         +-- SharedBaseAddress (default = 0x800000000)
 226   //                         v


 847   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 848 
 849   info_stream.print_cr("%s", sep);
 850   info_stream.print_cr(fmt_stats, "Total",
 851                        all_ro_count, all_ro_bytes, all_ro_perc,
 852                        all_rw_count, all_rw_bytes, all_rw_perc,
 853                        all_count, all_bytes, all_perc);
 854 
 855   assert(all_ro_bytes == ro_all, "everything should have been counted");
 856   assert(all_rw_bytes == rw_all, "everything should have been counted");
 857 
 858   msg.info("%s", info_stream.as_string());
 859 #undef fmt_stats
 860 }
 861 
 862 // Populate the shared space.
 863 
 864 class VM_PopulateDumpSharedSpace: public VM_Operation {
 865 private:
 866   GrowableArray<MemRegion> *_string_regions;
 867   GrowableArray<MemRegion> *_open_archive_heap_regions;
 868 
 869   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
 870   void dump_symbols();
 871   char* dump_read_only_tables();
 872   void print_region_stats();
 873 public:
 874 
 875   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 876   void doit();   // outline because gdb sucks
 877   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 878 }; // class VM_PopulateDumpSharedSpace
 879 
 880 class SortedSymbolClosure: public SymbolClosure {
 881   GrowableArray<Symbol*> _symbols;
 882   virtual void do_symbol(Symbol** sym) {
 883     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 884     _symbols.append(*sym);
 885   }
 886   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 887     if (a[0] < b[0]) {
 888       return -1;
 889     } else if (a[0] == b[0]) {
 890       return 0;


1065       ResourceMark rm;
1066       RefRelocator ext_reloc;
1067       iterate_roots(&ext_reloc);
1068     }
1069 
1070 #ifdef ASSERT
1071     {
1072       tty->print_cr("Verifying external roots ... ");
1073       ResourceMark rm;
1074       IsRefInArchiveChecker checker;
1075       iterate_roots(&checker);
1076     }
1077 #endif
1078 
1079 
1080     // cleanup
1081     _ssc = NULL;
1082   }
1083 
1084   // We must relocate the System::_well_known_klasses only after we have copied the
1085   // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1086   // old objects which assert that their klass is the original klass. 

1087   static void relocate_well_known_klasses() {
1088     {
1089       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1090       ResourceMark rm;
1091       RefRelocator ext_reloc;
1092       SystemDictionary::well_known_klasses_do(&ext_reloc);
1093     }
1094     // NOTE: after this point, we shouldn't have any globals that can reach the old
1095     // objects.
1096 
1097     // We cannot use any of the objects in the heap anymore (except for the objects
1098     // in the CDS shared string regions) because their headers no longer point to
1099     // valid Klasses.
1100   }
1101 
1102   static void iterate_roots(MetaspaceClosure* it) {
1103     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1104     for (int i=0; i<symbols->length(); i++) {
1105       it->push(symbols->adr_at(i));
1106     }


1119   }
1120 
1121   static Klass* get_relocated_klass(Klass* orig_klass) {
1122     address* pp = _new_loc_table->get((address)orig_klass);
1123     assert(pp != NULL, "must be");
1124     Klass* klass = (Klass*)(*pp);
1125     assert(klass->is_klass(), "must be");
1126     return klass;
1127   }
1128 };
1129 
1130 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1131 SortedSymbolClosure* ArchiveCompactor::_ssc;
1132 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1133 
1134 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1135                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1136   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1137 }
1138 
1139 void VM_PopulateDumpSharedSpace::dump_symbols() {
1140   tty->print_cr("Dumping symbol table ...");
1141 
1142   NOT_PRODUCT(SymbolTable::verify());

1143   SymbolTable::write_to_archive();




1144 }
1145 
1146 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1147   char* oldtop = _ro_region.top();
1148   // Reorder the system dictionary. Moving the symbols affects
1149   // how the hash table indices are calculated.
1150   SystemDictionary::reorder_dictionary_for_sharing();
1151   NOT_PRODUCT(SystemDictionary::verify();)
1152 
1153   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1154   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1155   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1156 
1157   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1158   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1159   SystemDictionary::copy_table(table_top, _ro_region.top());
1160 
1161   // Write the other data to the output array.
1162   WriteClosure wc(&_ro_region);
1163   MetaspaceShared::serialize(&wc);


1193 
1194   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1195   {
1196     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1197     for (int i = 0; i < _global_klass_objects->length(); i++) {
1198       Klass* k = _global_klass_objects->at(i);
1199       if (k->is_instance_klass()) {
1200         num_inst ++;
1201       } else if (k->is_objArray_klass()) {
1202         num_obj_array ++;
1203       } else {
1204         assert(k->is_typeArray_klass(), "sanity");
1205         num_type_array ++;
1206       }
1207     }
1208     tty->print_cr("    instance classes   = %5d", num_inst);
1209     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1210     tty->print_cr("    type array classes = %5d", num_type_array);
1211   }
1212 

1213   // Ensure the ConstMethods won't be modified at run-time
1214   tty->print("Updating ConstMethods ... ");
1215   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1216   tty->print_cr("done. ");
1217 
1218   // Remove all references outside the metadata
1219   tty->print("Removing unshareable information ... ");
1220   remove_unshareable_in_classes();
1221   tty->print_cr("done. ");
1222 
1223   ArchiveCompactor::initialize();
1224   ArchiveCompactor::copy_and_compact();
1225 
1226   dump_symbols();
1227 
1228   // Dump supported java heap objects
1229   _string_regions = NULL;
1230   _open_archive_heap_regions = NULL;
1231   dump_java_heap_objects();
1232 
1233   ArchiveCompactor::relocate_well_known_klasses();
1234 
1235   char* read_only_tables_start = dump_read_only_tables();
1236   _ro_region.pack(&_md_region);
1237 
1238   char* vtbl_list = _md_region.top();
1239   MetaspaceShared::allocate_cpp_vtable_clones();
1240   _md_region.pack(&_od_region);
1241 
1242   // Relocate the archived class file data into the od region
1243   relocate_cached_class_file();
1244   _od_region.pack();
1245 
1246   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1247   // is just the spaces between the two ends.
1248   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1249   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1250          "should already be aligned");
1251 
1252   // During patching, some virtual methods may be called, so at this point
1253   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1254   MetaspaceShared::patch_cpp_vtable_pointers();
1255 
1256   // The vtable clones contain addresses of the current process.
1257   // We don't want to write these addresses into the archive.
1258   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1259 
1260   // Create and write the archive file that maps the shared spaces.
1261 
1262   FileMapInfo* mapinfo = new FileMapInfo();
1263   mapinfo->populate_header(os::vm_allocation_granularity());
1264   mapinfo->set_read_only_tables_start(read_only_tables_start);
1265   mapinfo->set_misc_data_patching_start(vtbl_list);
1266   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1267   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1268   mapinfo->set_core_spaces_size(core_spaces_size);
1269 
1270   char* s0_start, *s0_top;
1271   char* s1_start, *s1_top;
1272   char* oa0_start, *oa0_top;
1273   char* oa1_start, *oa1_top;
1274 
1275   for (int pass=1; pass<=2; pass++) {
1276     if (pass == 1) {
1277       // The first pass doesn't actually write the data to disk. All it
1278       // does is to update the fields in the mapinfo->_header.
1279     } else {
1280       // After the first pass, the contents of mapinfo->_header are finalized,
1281       // so we can compute the header's CRC, and write the contents of the header
1282       // and the regions into disk.
1283       mapinfo->open_for_write();
1284       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1285     }
1286     mapinfo->write_header();
1287 
1288     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1289     // so it needs to be read/write.
1290     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1291     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1292     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1293     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1294     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1295 
1296     mapinfo->write_archive_heap_regions(_string_regions,
1297                                         MetaspaceShared::first_string,
1298                                         MetaspaceShared::max_strings,
1299                                         &s0_start, &s0_top,
1300                                         &s1_start, &s1_top);
1301     mapinfo->write_archive_heap_regions(_open_archive_heap_regions,
1302                                         MetaspaceShared::first_open_archive_heap_region,
1303                                         MetaspaceShared::max_open_archive_heap_region,
1304                                         &oa0_start, &oa0_top,
1305                                         &oa1_start, &oa1_top);
1306   }
1307 
1308   mapinfo->close();
1309 
1310   // Restore the vtable in case we invoke any virtual methods.
1311   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1312 
1313   _s0_region.init(s0_start, s0_top, s0_top);
1314   _s1_region.init(s1_start, s1_top, s1_top);
1315   _oa0_region.init(oa0_start, oa0_top, oa0_top);
1316   _oa1_region.init(oa1_start, oa1_top, oa1_top);
1317   print_region_stats();
1318 
1319   if (log_is_enabled(Info, cds)) {
1320     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1321                                                  int(_mc_region.used()), int(_md_region.used()));
1322   }
1323 }
1324 
1325 void VM_PopulateDumpSharedSpace::print_region_stats() {
1326   // Print statistics of all the regions
1327   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1328                                 _mc_region.reserved()  + _md_region.reserved() +
1329                                 _od_region.reserved()  +
1330                                 _s0_region.reserved()  + _s1_region.reserved() +
1331                                 _oa0_region.reserved() + _oa1_region.reserved();
1332   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1333                              _mc_region.used()  + _md_region.used() +
1334                              _od_region.used()  +
1335                              _s0_region.used()  + _s1_region.used() +
1336                              _oa0_region.used() + _oa1_region.used();
1337   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1338 
1339   _mc_region.print(total_reserved);
1340   _rw_region.print(total_reserved);
1341   _ro_region.print(total_reserved);
1342   _md_region.print(total_reserved);
1343   _od_region.print(total_reserved);
1344   _s0_region.print(total_reserved);
1345   _s1_region.print(total_reserved);
1346   _oa0_region.print(total_reserved);
1347   _oa1_region.print(total_reserved);
1348 
1349   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1350                  total_bytes, total_reserved, total_u_perc);
1351 }
1352 
1353 
1354 // Update a Java object to point its Klass* to the new location after
1355 // shared archive has been compacted.
1356 void MetaspaceShared::relocate_klass_ptr(oop o) {
1357   assert(DumpSharedSpaces, "sanity");
1358   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1359   o->set_klass(k);
1360 }
1361 
1362 class LinkSharedClassesClosure : public KlassClosure {
1363   Thread* THREAD;
1364   bool    _made_progress;
1365  public:
1366   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1367 
1368   void reset()               { _made_progress = false; }
1369   bool made_progress() const { return _made_progress; }
1370 
1371   void do_klass(Klass* k) {
1372     if (k->is_instance_klass()) {
1373       InstanceKlass* ik = InstanceKlass::cast(k);
1374       // Link the class to cause the bytecodes to be rewritten and the
1375       // cpcache to be created. Class verification is done according
1376       // to -Xverify setting.
1377       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1378       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1379 
1380       ik->constants()->resolve_class_constants(THREAD);
1381     }
1382   }
1383 };
1384 
1385 class CheckSharedClassesClosure : public KlassClosure {
1386   bool    _made_progress;
1387  public:
1388   CheckSharedClassesClosure() : _made_progress(false) {}
1389 
1390   void reset()               { _made_progress = false; }
1391   bool made_progress() const { return _made_progress; }
1392   void do_klass(Klass* k) {
1393     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1394       _made_progress = true;
1395     }
1396   }
1397 };
1398 
1399 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1400   if (k->is_instance_klass()) {


1565       // classes also being verified. The extra overhead is acceptable during
1566       // dumping.
1567       BytecodeVerificationLocal = BytecodeVerificationRemote;
1568     }
1569     ik->link_class(THREAD);
1570     if (HAS_PENDING_EXCEPTION) {
1571       ResourceMark rm;
1572       tty->print_cr("Preload Warning: Verification failed for %s",
1573                     ik->external_name());
1574       CLEAR_PENDING_EXCEPTION;
1575       ik->set_in_error_state();
1576       _has_error_classes = true;
1577     }
1578     BytecodeVerificationLocal = saved;
1579     return true;
1580   } else {
1581     return false;
1582   }
1583 }
1584 
1585 #if INCLUDE_CDS_JAVA_HEAP
1586 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1587   if (!MetaspaceShared::allow_archive_heap_object()) {
1588     if (log_is_enabled(Info, cds)) {
1589       log_info(cds)(
1590         "Archived java heap is not supported as UseG1GC, "
1591         "UseCompressedOops and UseCompressedClassPointers are required."
1592         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1593         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1594         BOOL_TO_STR(UseCompressedClassPointers));
1595     }
1596     return;
1597   }
1598 
1599   // Cache for recording where the archived objects are copied to
1600   MetaspaceShared::create_archive_object_cache();
1601 
1602   tty->print_cr("Dumping String objects to closed archive heap region ...");
1603   NOT_PRODUCT(StringTable::verify());
1604   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1605   _string_regions = new GrowableArray<MemRegion>(2);
1606   StringTable::write_to_archive(_string_regions);
1607 
1608   tty->print_cr("Dumping objects to open archive heap region ...");
1609   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1610   MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1611 }
1612 
1613 void MetaspaceShared::dump_open_archive_heap_objects(
1614                                     GrowableArray<MemRegion> * open_archive) {
1615   assert(UseG1GC, "Only support G1 GC");
1616   assert(UseCompressedOops && UseCompressedClassPointers,
1617          "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1618 
1619   Thread* THREAD = Thread::current();
1620   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1621 
1622   MetaspaceShared::archive_resolved_constants(THREAD);
1623 
1624   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1625                                                    os::vm_allocation_granularity());
1626 }
1627 
1628 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1629 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1630   assert(DumpSharedSpaces, "dump-time only");
1631 
1632   NoSafepointVerifier nsv;
1633 
1634   ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1635   oop* p = cache->get(obj);
1636   if (p != NULL) {
1637     // already archived 
1638     return *p;
1639   }
1640 
1641   int len = obj->size();
1642   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1643     return NULL;
1644   }
1645 
1646   int hash = obj->identity_hash();
1647   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1648   if (archived_oop != NULL) {
1649     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1650     relocate_klass_ptr(archived_oop);
1651     cache->put(obj, archived_oop);
1652   }
1653   return archived_oop;
1654 }
1655 
1656 void MetaspaceShared::archive_resolved_constants(Thread* THREAD) {
1657   int i;
1658   for (i = 0; i < _global_klass_objects->length(); i++) {
1659     Klass* k = _global_klass_objects->at(i);
1660     if (k->is_instance_klass()) {
1661       InstanceKlass* ik = InstanceKlass::cast(k);
1662       ik->constants()->archive_resolved_references(THREAD);
1663     } 
1664   }
1665 }
1666 
1667 void MetaspaceShared::fixup_mapped_heap_regions() {
1668   FileMapInfo *mapinfo = FileMapInfo::current_info();
1669   mapinfo->fixup_mapped_heap_regions();
1670 }
1671 #endif // INCLUDE_CDS_JAVA_HEAP
1672 
1673 // Closure for serializing initialization data in from a data area
1674 // (ptr_array) read from the shared file.
1675 
1676 class ReadClosure : public SerializeClosure {
1677 private:
1678   intptr_t** _ptr_array;
1679 
1680   inline intptr_t nextPtr() {
1681     return *(*_ptr_array)++;
1682   }
1683 
1684 public:
1685   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1686 
1687   void do_ptr(void** p) {
1688     assert(*p == NULL, "initializing previous initialized pointer.");
1689     intptr_t obj = nextPtr();
1690     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1691            "hit tag while initializing ptrs.");
1692     *p = (void*)obj;


1838 
1839   // Initialize the run-time symbol table.
1840   SymbolTable::create_table();
1841 
1842   // Close the mapinfo file
1843   mapinfo->close();
1844 
1845   if (PrintSharedArchiveAndExit) {
1846     if (PrintSharedDictionary) {
1847       tty->print_cr("\nShared classes:\n");
1848       SystemDictionary::print_shared(tty);
1849     }
1850     if (_archive_loading_failed) {
1851       tty->print_cr("archive is invalid");
1852       vm_exit(1);
1853     } else {
1854       tty->print_cr("archive is valid");
1855       vm_exit(0);
1856     }
1857   }





1858 }
1859 
1860 // JVM/TI RedefineClasses() support:
1861 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1862   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1863 
1864   if (UseSharedSpaces) {
1865     // remap the shared readonly space to shared readwrite, private
1866     FileMapInfo* mapinfo = FileMapInfo::current_info();
1867     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1868       return false;
1869     }
1870     _remapped_readwrite = true;
1871   }
1872   return true;
1873 }
1874 
1875 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1876   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1877   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
< prev index next >