1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataShared.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/moduleEntry.hpp"
  30 #include "classfile/stringTable.hpp"
  31 #include "classfile/symbolTable.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/systemDictionaryShared.hpp"
  34 #include "classfile/vmSymbols.hpp"
  35 #include "gc/shared/gcLocker.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logMessage.hpp"
  38 #include "logging/logStream.hpp"
  39 #include "memory/archiveBuilder.hpp"
  40 #include "memory/archiveUtils.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "memory/heapShared.inline.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "memory/metadataFactory.hpp"
  45 #include "memory/metaspaceClosure.hpp"
  46 #include "memory/metaspaceShared.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/compressedOops.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/fieldDescriptor.inline.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/safepointVerifiers.hpp"
  55 #include "utilities/bitMap.inline.hpp"
  56 #if INCLUDE_G1GC
  57 #include "gc/g1/g1CollectedHeap.hpp"
  58 #endif
  59 
  60 #if INCLUDE_CDS_JAVA_HEAP
  61 
  62 bool HeapShared::_closed_archive_heap_region_mapped = false;
  63 bool HeapShared::_open_archive_heap_region_mapped = false;
  64 bool HeapShared::_archive_heap_region_fixed = false;
  65 address   HeapShared::_narrow_oop_base;
  66 int       HeapShared::_narrow_oop_shift;
  67 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
  68 
  69 //
  70 // If you add new entries to the following tables, you should know what you're doing!
  71 //
  72 
  73 // Entry fields for shareable subgraphs archived in the closed archive heap
  74 // region. Warning: Objects in the subgraphs should not have reference fields
  75 // assigned at runtime.
  76 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  77   {"java/lang/Integer$IntegerCache",              "archivedCache"},
  78   {"java/lang/Long$LongCache",                    "archivedCache"},
  79   {"java/lang/Byte$ByteCache",                    "archivedCache"},
  80   {"java/lang/Short$ShortCache",                  "archivedCache"},
  81   {"java/lang/Character$CharacterCache",          "archivedCache"},
  82   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
  83   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
  84 };
  85 // Entry fields for subgraphs archived in the open archive heap region.
  86 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
  87   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
  88   {"java/util/ImmutableCollections",              "archivedObjects"},
  89   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
  90   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
  91 };
  92 
  93 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
  94 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
  95   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
  96   {"jdk/internal/module/ArchivedBootLayer",       "archivedBootLayer"},
  97   {"java/lang/Module$ArchivedData",               "archivedData"},
  98 };
  99 
 100 const static int num_closed_archive_subgraph_entry_fields =
 101   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 102 const static int num_open_archive_subgraph_entry_fields =
 103   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 104 const static int num_fmg_open_archive_subgraph_entry_fields =
 105   sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 106 
 107 ////////////////////////////////////////////////////////////////
 108 //
 109 // Java heap object archiving support
 110 //
 111 ////////////////////////////////////////////////////////////////
 112 void HeapShared::fixup_mapped_heap_regions() {
 113   FileMapInfo *mapinfo = FileMapInfo::current_info();
 114   mapinfo->fixup_mapped_heap_regions();
 115   set_archive_heap_region_fixed();
 116   SystemDictionaryShared::update_archived_mirror_native_pointers();
 117 }
 118 
 119 unsigned HeapShared::oop_hash(oop const& p) {
 120   assert(!p->mark().has_bias_pattern(),
 121          "this object should never have been locked");  // so identity_hash won't safepoin
 122   unsigned hash = (unsigned)p->identity_hash();
 123   return hash;
 124 }
 125 
 126 static void reset_states(oop obj, TRAPS) {
 127   Handle h_obj(THREAD, obj);
 128   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 129   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 130   Symbol* method_sig = vmSymbols::void_method_signature();
 131 
 132   while (klass != NULL) {
 133     Method* method = klass->find_method(method_name, method_sig);
 134     if (method != NULL) {
 135       assert(method->is_private(), "must be");
 136       if (log_is_enabled(Debug, cds)) {
 137         ResourceMark rm(THREAD);
 138         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 139       }
 140       JavaValue result(T_VOID);
 141       JavaCalls::call_special(&result, h_obj, klass,
 142                               method_name, method_sig, CHECK);
 143     }
 144     klass = klass->java_super();
 145   }
 146 }
 147 
 148 void HeapShared::reset_archived_object_states(TRAPS) {
 149   assert(DumpSharedSpaces, "dump-time only");
 150   log_debug(cds)("Resetting platform loader");
 151   reset_states(SystemDictionary::java_platform_loader(), THREAD);
 152   log_debug(cds)("Resetting system loader");
 153   reset_states(SystemDictionary::java_system_loader(), THREAD);
 154 }
 155 
 156 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 157 oop HeapShared::find_archived_heap_object(oop obj) {
 158   assert(DumpSharedSpaces, "dump-time only");
 159   ArchivedObjectCache* cache = archived_object_cache();
 160   oop* p = cache->get(obj);
 161   if (p != NULL) {
 162     return *p;
 163   } else {
 164     return NULL;
 165   }
 166 }
 167 
 168 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
 169   assert(DumpSharedSpaces, "dump-time only");
 170 
 171   oop ao = find_archived_heap_object(obj);
 172   if (ao != NULL) {
 173     // already archived
 174     return ao;
 175   }
 176 
 177   int len = obj->size();
 178   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 179     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 180                          p2i(obj), (size_t)obj->size());
 181     return NULL;
 182   }
 183 
 184   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
 185   if (archived_oop != NULL) {
 186     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 187     MetaspaceShared::relocate_klass_ptr(archived_oop);
 188     // Reinitialize markword to remove age/marking/locking/etc.
 189     //
 190     // We need to retain the identity_hash, because it may have been used by some hashtables
 191     // in the shared heap. This also has the side effect of pre-initializing the
 192     // identity_hash for all shared objects, so they are less likely to be written
 193     // into during run time, increasing the potential of memory sharing.
 194     int hash_original = obj->identity_hash();
 195     archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
 196     assert(archived_oop->mark().is_unlocked(), "sanity");
 197 
 198     DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 199     assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 200 
 201     ArchivedObjectCache* cache = archived_object_cache();
 202     cache->put(obj, archived_oop);
 203     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
 204                          p2i(obj), p2i(archived_oop));
 205   } else {
 206     log_error(cds, heap)(
 207       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 208       p2i(obj));
 209     vm_exit(1);
 210   }
 211   return archived_oop;
 212 }
 213 
 214 oop HeapShared::materialize_archived_object(narrowOop v) {
 215   assert(archive_heap_region_fixed(),
 216          "must be called after archive heap regions are fixed");
 217   if (!CompressedOops::is_null(v)) {
 218     oop obj = HeapShared::decode_from_archive(v);
 219     return G1CollectedHeap::heap()->materialize_archived_object(obj);
 220   }
 221   return NULL;
 222 }
 223 
 224 void HeapShared::archive_klass_objects(Thread* THREAD) {
 225   GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
 226   assert(klasses != NULL, "sanity");
 227   for (int i = 0; i < klasses->length(); i++) {
 228     Klass* k = klasses->at(i);
 229 
 230     // archive mirror object
 231     java_lang_Class::archive_mirror(k, CHECK);
 232 
 233     // archive the resolved_referenes array
 234     if (k->is_instance_klass()) {
 235       InstanceKlass* ik = InstanceKlass::cast(k);
 236       ik->constants()->archive_resolved_references(THREAD);
 237     }
 238   }
 239 }
 240 
 241 void HeapShared::run_full_gc_in_vm_thread() {
 242   if (is_heap_object_archiving_allowed()) {
 243     // Avoid fragmentation while archiving heap objects.
 244     // We do this inside a safepoint, so that no further allocation can happen after GC
 245     // has finished.
 246     if (GCLocker::is_active()) {
 247       // Just checking for safety ...
 248       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 249       // has been modified such that JNI code is executed in some clean up threads after
 250       // we have finished class loading.
 251       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 252     } else {
 253       log_info(cds)("Run GC ...");
 254       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 255       log_info(cds)("Run GC done");
 256     }
 257   }
 258 }
 259 
 260 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
 261                                            GrowableArray<MemRegion> *open) {
 262   if (!is_heap_object_archiving_allowed()) {
 263     log_info(cds)(
 264       "Archived java heap is not supported as UseG1GC, "
 265       "UseCompressedOops and UseCompressedClassPointers are required."
 266       "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
 267       BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
 268       BOOL_TO_STR(UseCompressedClassPointers));
 269     return;
 270   }
 271 
 272   G1HeapVerifier::verify_ready_for_archiving();
 273 
 274   {
 275     NoSafepointVerifier nsv;
 276 
 277     // Cache for recording where the archived objects are copied to
 278     create_archived_object_cache();
 279 
 280     log_info(cds)("Dumping objects to closed archive heap region ...");
 281     copy_closed_archive_heap_objects(closed);
 282 
 283     log_info(cds)("Dumping objects to open archive heap region ...");
 284     copy_open_archive_heap_objects(open);
 285 
 286     if (MetaspaceShared::use_full_module_graph()) {
 287       ClassLoaderDataShared::init_archived_oops();
 288     }
 289 
 290     destroy_archived_object_cache();
 291   }
 292 
 293   G1HeapVerifier::verify_archive_regions();
 294 }
 295 
 296 void HeapShared::copy_closed_archive_heap_objects(
 297                                     GrowableArray<MemRegion> * closed_archive) {
 298   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 299 
 300   Thread* THREAD = Thread::current();
 301   G1CollectedHeap::heap()->begin_archive_alloc_range();
 302 
 303   // Archive interned string objects
 304   StringTable::write_to_archive(_dumped_interned_strings);
 305 
 306   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 307                            num_closed_archive_subgraph_entry_fields,
 308                            true /* is_closed_archive */,
 309                            false /* is_full_module_graph */,
 310                            THREAD);
 311 
 312   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
 313                                                    os::vm_allocation_granularity());
 314 }
 315 
 316 void HeapShared::copy_open_archive_heap_objects(
 317                                     GrowableArray<MemRegion> * open_archive) {
 318   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 319 
 320   Thread* THREAD = Thread::current();
 321   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 322 
 323   java_lang_Class::archive_basic_type_mirrors(THREAD);
 324 
 325   archive_klass_objects(THREAD);
 326 
 327   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 328                            num_open_archive_subgraph_entry_fields,
 329                            false /* is_closed_archive */,
 330                            false /* is_full_module_graph */,
 331                            THREAD);
 332   if (MetaspaceShared::use_full_module_graph()) {
 333     archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
 334                              num_fmg_open_archive_subgraph_entry_fields,
 335                              false /* is_closed_archive */,
 336                              true /* is_full_module_graph */,
 337                              THREAD);
 338   }
 339 
 340   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
 341                                                    os::vm_allocation_granularity());
 342 }
 343 
 344 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 345   _narrow_oop_base = base;
 346   _narrow_oop_shift = shift;
 347 }
 348 
 349 //
 350 // Subgraph archiving support
 351 //
 352 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 353 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 354 
 355 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 356 // there is no existing one for k. The subgraph_info records the relocated
 357 // Klass* of the original k.
 358 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
 359   assert(DumpSharedSpaces, "dump time only");
 360   bool created;
 361   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 362   KlassSubGraphInfo* info =
 363     _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
 364                                                   &created);
 365   assert(created, "must not initialize twice");
 366   return info;
 367 }
 368 
 369 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 370   assert(DumpSharedSpaces, "dump time only");
 371   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 372   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 373   assert(info != NULL, "must have been initialized");
 374   return info;
 375 }
 376 
 377 // Add an entry field to the current KlassSubGraphInfo.
 378 void KlassSubGraphInfo::add_subgraph_entry_field(
 379       int static_field_offset, oop v, bool is_closed_archive) {
 380   assert(DumpSharedSpaces, "dump time only");
 381   if (_subgraph_entry_fields == NULL) {
 382     _subgraph_entry_fields =
 383       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass);
 384   }
 385   _subgraph_entry_fields->append((juint)static_field_offset);
 386   _subgraph_entry_fields->append(CompressedOops::narrow_oop_value(v));
 387   _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
 388 }
 389 
 390 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 391 // Only objects of boot classes can be included in sub-graph.
 392 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 393   assert(DumpSharedSpaces, "dump time only");
 394   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 395          "must be the relocated Klass in the shared space");
 396 
 397   if (_subgraph_object_klasses == NULL) {
 398     _subgraph_object_klasses =
 399       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
 400   }
 401 
 402   assert(ArchiveBuilder::singleton()->is_in_buffer_space(relocated_k), "must be a shared class");
 403 
 404   if (_k == relocated_k) {
 405     // Don't add the Klass containing the sub-graph to it's own klass
 406     // initialization list.
 407     return;
 408   }
 409 
 410   if (relocated_k->is_instance_klass()) {
 411     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 412           "must be boot class");
 413     // SystemDictionary::xxx_klass() are not updated, need to check
 414     // the original Klass*
 415     if (orig_k == SystemDictionary::String_klass() ||
 416         orig_k == SystemDictionary::Object_klass()) {
 417       // Initialized early during VM initialization. No need to be added
 418       // to the sub-graph object class list.
 419       return;
 420     }
 421   } else if (relocated_k->is_objArray_klass()) {
 422     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 423     if (abk->is_instance_klass()) {
 424       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 425             "must be boot class");
 426     }
 427     if (relocated_k == Universe::objectArrayKlassObj()) {
 428       // Initialized early during Universe::genesis. No need to be added
 429       // to the list.
 430       return;
 431     }
 432   } else {
 433     assert(relocated_k->is_typeArray_klass(), "must be");
 434     // Primitive type arrays are created early during Universe::genesis.
 435     return;
 436   }
 437 
 438   if (log_is_enabled(Debug, cds, heap)) {
 439     if (!_subgraph_object_klasses->contains(relocated_k)) {
 440       ResourceMark rm;
 441       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 442     }
 443   }
 444 
 445   _subgraph_object_klasses->append_if_missing(relocated_k);
 446 }
 447 
 448 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 449 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 450   _k = info->klass();
 451   _entry_field_records = NULL;
 452   _subgraph_object_klasses = NULL;
 453   _is_full_module_graph = info->is_full_module_graph();
 454 
 455   // populate the entry fields
 456   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 457   if (entry_fields != NULL) {
 458     int num_entry_fields = entry_fields->length();
 459     assert(num_entry_fields % 3 == 0, "sanity");
 460     _entry_field_records =
 461       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 462     for (int i = 0 ; i < num_entry_fields; i++) {
 463       _entry_field_records->at_put(i, entry_fields->at(i));
 464     }
 465   }
 466 
 467   // the Klasses of the objects in the sub-graphs
 468   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 469   if (subgraph_object_klasses != NULL) {
 470     int num_subgraphs_klasses = subgraph_object_klasses->length();
 471     _subgraph_object_klasses =
 472       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 473     for (int i = 0; i < num_subgraphs_klasses; i++) {
 474       Klass* subgraph_k = subgraph_object_klasses->at(i);
 475       if (log_is_enabled(Info, cds, heap)) {
 476         ResourceMark rm;
 477         log_info(cds, heap)(
 478           "Archived object klass %s (%2d) => %s",
 479           _k->external_name(), i, subgraph_k->external_name());
 480       }
 481       _subgraph_object_klasses->at_put(i, subgraph_k);
 482       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 483     }
 484   }
 485 
 486   ArchivePtrMarker::mark_pointer(&_k);
 487   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 488   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 489 }
 490 
 491 struct CopyKlassSubGraphInfoToArchive : StackObj {
 492   CompactHashtableWriter* _writer;
 493   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 494 
 495   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 496     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 497       ArchivedKlassSubGraphInfoRecord* record =
 498         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 499       record->init(&info);
 500 
 501       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass);
 502       u4 delta = MetaspaceShared::object_delta_u4(record);
 503       _writer->add(hash, delta);
 504     }
 505     return true; // keep on iterating
 506   }
 507 };
 508 
 509 // Build the records of archived subgraph infos, which include:
 510 // - Entry points to all subgraphs from the containing class mirror. The entry
 511 //   points are static fields in the mirror. For each entry point, the field
 512 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 513 //   info. The value is stored back to the corresponding field at runtime.
 514 // - A list of klasses that need to be loaded/initialized before archived
 515 //   java object sub-graph can be accessed at runtime.
 516 void HeapShared::write_subgraph_info_table() {
 517   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 518   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 519   CompactHashtableStats stats;
 520 
 521   _run_time_subgraph_info_table.reset();
 522 
 523   CompactHashtableWriter writer(d_table->_count, &stats);
 524   CopyKlassSubGraphInfoToArchive copy(&writer);
 525   d_table->iterate(&copy);
 526 
 527   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 528 }
 529 
 530 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 531   _run_time_subgraph_info_table.serialize_header(soc);
 532 }
 533 
 534 void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) {
 535   if (!open_archive_heap_region_mapped()) {
 536     return; // nothing to do
 537   }
 538   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 539 
 540   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
 541   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 542 
 543   // Initialize from archived data. Currently this is done only
 544   // during VM initialization time. No lock is needed.
 545   if (record != NULL) {
 546     if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
 547       return;
 548     }
 549 
 550     int i;
 551     // Load/link/initialize the klasses of the objects in the subgraph.
 552     // NULL class loader is used.
 553     Array<Klass*>* klasses = record->subgraph_object_klasses();
 554     if (klasses != NULL) {
 555       for (i = 0; i < klasses->length(); i++) {
 556         Klass* obj_k = klasses->at(i);
 557         Klass* resolved_k = SystemDictionary::resolve_or_null(
 558                                               (obj_k)->name(), THREAD);
 559         if (resolved_k != obj_k) {
 560           assert(!SystemDictionary::is_well_known_klass(resolved_k),
 561                  "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
 562           ResourceMark rm(THREAD);
 563           log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
 564                               resolved_k->external_name());
 565           return;
 566         }
 567         if ((obj_k)->is_instance_klass()) {
 568           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 569           ik->initialize(THREAD);
 570         } else if ((obj_k)->is_objArray_klass()) {
 571           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 572           oak->initialize(THREAD);
 573         }
 574       }
 575     }
 576 
 577     if (HAS_PENDING_EXCEPTION) {
 578       CLEAR_PENDING_EXCEPTION;
 579       // None of the field value will be set if there was an exception.
 580       // The java code will not see any of the archived objects in the
 581       // subgraphs referenced from k in this case.
 582       return;
 583     }
 584 
 585     // Load the subgraph entry fields from the record and store them back to
 586     // the corresponding fields within the mirror.
 587     oop m = k->java_mirror();
 588     Array<juint>* entry_field_records = record->entry_field_records();
 589     if (entry_field_records != NULL) {
 590       int efr_len = entry_field_records->length();
 591       assert(efr_len % 3 == 0, "sanity");
 592       for (i = 0; i < efr_len;) {
 593         int field_offset = entry_field_records->at(i);
 594         narrowOop nv = CompressedOops::narrow_oop_cast(entry_field_records->at(i+1));
 595         int is_closed_archive = entry_field_records->at(i+2);
 596         oop v;
 597         if (is_closed_archive == 0) {
 598           // It's an archived object in the open archive heap regions, not shared.
 599           // The object refereced by the field becomes 'known' by GC from this
 600           // point. All objects in the subgraph reachable from the object are
 601           // also 'known' by GC.
 602           v = materialize_archived_object(nv);
 603         } else {
 604           // Shared object in the closed archive heap regions. Decode directly.
 605           assert(!CompressedOops::is_null(nv), "shared object is null");
 606           v = HeapShared::decode_from_archive(nv);
 607         }
 608         m->obj_field_put(field_offset, v);
 609         i += 3;
 610 
 611         log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 612       }
 613 
 614       // Done. Java code can see the archived sub-graphs referenced from k's
 615       // mirror after this point.
 616       if (log_is_enabled(Info, cds, heap)) {
 617         ResourceMark rm;
 618         log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
 619                             k->external_name(), p2i(k));
 620       }
 621     }
 622   }
 623 }
 624 
 625 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 626   int _level;
 627   bool _is_closed_archive;
 628   bool _record_klasses_only;
 629   KlassSubGraphInfo* _subgraph_info;
 630   oop _orig_referencing_obj;
 631   oop _archived_referencing_obj;
 632   Thread* _thread;
 633  public:
 634   WalkOopAndArchiveClosure(int level,
 635                            bool is_closed_archive,
 636                            bool record_klasses_only,
 637                            KlassSubGraphInfo* subgraph_info,
 638                            oop orig, oop archived, TRAPS) :
 639     _level(level), _is_closed_archive(is_closed_archive),
 640     _record_klasses_only(record_klasses_only),
 641     _subgraph_info(subgraph_info),
 642     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 643     _thread(THREAD) {}
 644   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 645   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 646 
 647  protected:
 648   template <class T> void do_oop_work(T *p) {
 649     oop obj = RawAccess<>::oop_load(p);
 650     if (!CompressedOops::is_null(obj)) {
 651       assert(!HeapShared::is_archived_object(obj),
 652              "original objects must not point to archived objects");
 653 
 654       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 655       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
 656       Thread* THREAD = _thread;
 657 
 658       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 659         ResourceMark rm;
 660         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 661                              _orig_referencing_obj->klass()->external_name(), field_delta,
 662                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 663         LogTarget(Trace, cds, heap) log;
 664         LogStream out(log);
 665         obj->print_on(&out);
 666       }
 667 
 668       oop archived = HeapShared::archive_reachable_objects_from(
 669           _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD);
 670       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 671       assert(HeapShared::is_archived_object(archived), "must be");
 672 
 673       if (!_record_klasses_only) {
 674         // Update the reference in the archived copy of the referencing object.
 675         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 676                              _level, p2i(new_p), p2i(obj), p2i(archived));
 677         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 678       }
 679     }
 680   }
 681 };
 682 
 683 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k,
 684                                                          Thread* THREAD) {
 685   // Check fields in the object
 686   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 687     if (!fs.access_flags().is_static()) {
 688       BasicType ft = fs.field_descriptor().field_type();
 689       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
 690         ResourceMark rm(THREAD);
 691         log_warning(cds, heap)(
 692           "Please check reference field in %s instance in closed archive heap region: %s %s",
 693           k->external_name(), (fs.name())->as_C_string(),
 694           (fs.signature())->as_C_string());
 695       }
 696     }
 697   }
 698 }
 699 
 700 void HeapShared::check_module_oop(oop orig_module_obj) {
 701   assert(DumpSharedSpaces, "must be");
 702   assert(java_lang_Module::is_instance(orig_module_obj), "must be");
 703   ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
 704   if (orig_module_ent == NULL) {
 705     // These special Module objects are created in Java code. They are not
 706     // defined via Modules::define_module(), so they don't have a ModuleEntry:
 707     //     java.lang.Module::ALL_UNNAMED_MODULE
 708     //     java.lang.Module::EVERYONE_MODULE
 709     //     jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
 710     assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
 711     log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
 712   } else {
 713     ClassLoaderData* loader_data = orig_module_ent->loader_data();
 714     assert(loader_data->is_builtin_class_loader_data(), "must be");
 715   }
 716 }
 717 
 718 
 719 // (1) If orig_obj has not been archived yet, archive it.
 720 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 721 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 722 // (3) Record the klasses of all orig_obj and all reachable objects.
 723 oop HeapShared::archive_reachable_objects_from(int level,
 724                                                KlassSubGraphInfo* subgraph_info,
 725                                                oop orig_obj,
 726                                                bool is_closed_archive,
 727                                                TRAPS) {
 728   assert(orig_obj != NULL, "must be");
 729   assert(!is_archived_object(orig_obj), "sanity");
 730 
 731   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
 732     // This object has injected fields that cannot be supported easily, so we disallow them for now.
 733     // If you get an error here, you probably made a change in the JDK library that has added
 734     // these objects that are referenced (directly or indirectly) by static fields.
 735     ResourceMark rm;
 736     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
 737     vm_exit(1);
 738   }
 739 
 740   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
 741   // them as Klass::_archived_mirror because they need to be specially restored at run time.
 742   //
 743   // If you get an error here, you probably made a change in the JDK library that has added a Class
 744   // object that is referenced (directly or indirectly) by static fields.
 745   if (java_lang_Class::is_instance(orig_obj)) {
 746     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 747     vm_exit(1);
 748   }
 749 
 750   oop archived_obj = find_archived_heap_object(orig_obj);
 751   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 752     // To save time, don't walk strings that are already archived. They just contain
 753     // pointers to a type array, whose klass doesn't need to be recorded.
 754     return archived_obj;
 755   }
 756 
 757   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 758     // orig_obj has already been archived and traced. Nothing more to do.
 759     return archived_obj;
 760   } else {
 761     set_has_been_seen_during_subgraph_recording(orig_obj);
 762   }
 763 
 764   bool record_klasses_only = (archived_obj != NULL);
 765   if (archived_obj == NULL) {
 766     ++_num_new_archived_objs;
 767     archived_obj = archive_heap_object(orig_obj, THREAD);
 768     if (archived_obj == NULL) {
 769       // Skip archiving the sub-graph referenced from the current entry field.
 770       ResourceMark rm;
 771       log_error(cds, heap)(
 772         "Cannot archive the sub-graph referenced from %s object ("
 773         PTR_FORMAT ") size %d, skipped.",
 774         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 775       if (level == 1) {
 776         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 777         // as the Java code will take care of initializing this field dynamically.
 778         return NULL;
 779       } else {
 780         // We don't know how to handle an object that has been archived, but some of its reachable
 781         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 782         // we have a real use case.
 783         vm_exit(1);
 784       }
 785     }
 786 
 787     if (java_lang_Module::is_instance(orig_obj)) {
 788       check_module_oop(orig_obj);
 789       java_lang_Module::set_module_entry(archived_obj, NULL);
 790       java_lang_Module::set_loader(archived_obj, NULL);
 791     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
 792       // class_data will be restored explicitly at run time.
 793       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
 794                 orig_obj == SystemDictionary::java_system_loader() ||
 795                 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be");
 796       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
 797     }
 798   }
 799 
 800   assert(archived_obj != NULL, "must be");
 801   Klass *orig_k = orig_obj->klass();
 802   Klass *relocated_k = archived_obj->klass();
 803   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 804 
 805   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
 806                                   subgraph_info, orig_obj, archived_obj, THREAD);
 807   orig_obj->oop_iterate(&walker);
 808   if (is_closed_archive && orig_k->is_instance_klass()) {
 809     check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD);
 810   }
 811   return archived_obj;
 812 }
 813 
 814 //
 815 // Start from the given static field in a java mirror and archive the
 816 // complete sub-graph of java heap objects that are reached directly
 817 // or indirectly from the starting object by following references.
 818 // Sub-graph archiving restrictions (current):
 819 //
 820 // - All classes of objects in the archived sub-graph (including the
 821 //   entry class) must be boot class only.
 822 // - No java.lang.Class instance (java mirror) can be included inside
 823 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 824 //
 825 // The Java heap object sub-graph archiving process (see
 826 // WalkOopAndArchiveClosure):
 827 //
 828 // 1) Java object sub-graph archiving starts from a given static field
 829 // within a Class instance (java mirror). If the static field is a
 830 // refererence field and points to a non-null java object, proceed to
 831 // the next step.
 832 //
 833 // 2) Archives the referenced java object. If an archived copy of the
 834 // current object already exists, updates the pointer in the archived
 835 // copy of the referencing object to point to the current archived object.
 836 // Otherwise, proceed to the next step.
 837 //
 838 // 3) Follows all references within the current java object and recursively
 839 // archive the sub-graph of objects starting from each reference.
 840 //
 841 // 4) Updates the pointer in the archived copy of referencing object to
 842 // point to the current archived object.
 843 //
 844 // 5) The Klass of the current java object is added to the list of Klasses
 845 // for loading and initialzing before any object in the archived graph can
 846 // be accessed at runtime.
 847 //
 848 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 849                                                              const char* klass_name,
 850                                                              int field_offset,
 851                                                              const char* field_name,
 852                                                              bool is_closed_archive,
 853                                                              TRAPS) {
 854   assert(DumpSharedSpaces, "dump time only");
 855   assert(k->is_shared_boot_class(), "must be boot class");
 856 
 857   oop m = k->java_mirror();
 858 
 859   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 860   oop f = m->obj_field(field_offset);
 861 
 862   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 863 
 864   if (!CompressedOops::is_null(f)) {
 865     if (log_is_enabled(Trace, cds, heap)) {
 866       LogTarget(Trace, cds, heap) log;
 867       LogStream out(log);
 868       f->print_on(&out);
 869     }
 870 
 871     oop af = archive_reachable_objects_from(1, subgraph_info, f,
 872                                             is_closed_archive, CHECK);
 873 
 874     if (af == NULL) {
 875       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 876                            klass_name, field_name);
 877     } else {
 878       // Note: the field value is not preserved in the archived mirror.
 879       // Record the field as a new subGraph entry point. The recorded
 880       // information is restored from the archive at runtime.
 881       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
 882       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 883     }
 884   } else {
 885     // The field contains null, we still need to record the entry point,
 886     // so it can be restored at runtime.
 887     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
 888   }
 889 }
 890 
 891 #ifndef PRODUCT
 892 class VerifySharedOopClosure: public BasicOopIterateClosure {
 893  private:
 894   bool _is_archived;
 895 
 896  public:
 897   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 898 
 899   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 900   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 901 
 902  protected:
 903   template <class T> void do_oop_work(T *p) {
 904     oop obj = RawAccess<>::oop_load(p);
 905     if (!CompressedOops::is_null(obj)) {
 906       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 907     }
 908   }
 909 };
 910 
 911 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 912   assert(DumpSharedSpaces, "dump time only");
 913   assert(k->is_shared_boot_class(), "must be boot class");
 914 
 915   oop m = k->java_mirror();
 916   oop f = m->obj_field(field_offset);
 917   if (!CompressedOops::is_null(f)) {
 918     verify_subgraph_from(f);
 919   }
 920 }
 921 
 922 void HeapShared::verify_subgraph_from(oop orig_obj) {
 923   oop archived_obj = find_archived_heap_object(orig_obj);
 924   if (archived_obj == NULL) {
 925     // It's OK for the root of a subgraph to be not archived. See comments in
 926     // archive_reachable_objects_from().
 927     return;
 928   }
 929 
 930   // Verify that all objects reachable from orig_obj are archived.
 931   init_seen_objects_table();
 932   verify_reachable_objects_from(orig_obj, false);
 933   delete_seen_objects_table();
 934 
 935   // Note: we could also verify that all objects reachable from the archived
 936   // copy of orig_obj can only point to archived objects, with:
 937   //      init_seen_objects_table();
 938   //      verify_reachable_objects_from(archived_obj, true);
 939   //      init_seen_objects_table();
 940   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 941   // won't do it here.
 942 }
 943 
 944 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 945   _num_total_verifications ++;
 946   if (!has_been_seen_during_subgraph_recording(obj)) {
 947     set_has_been_seen_during_subgraph_recording(obj);
 948 
 949     if (is_archived) {
 950       assert(is_archived_object(obj), "must be");
 951       assert(find_archived_heap_object(obj) == NULL, "must be");
 952     } else {
 953       assert(!is_archived_object(obj), "must be");
 954       assert(find_archived_heap_object(obj) != NULL, "must be");
 955     }
 956 
 957     VerifySharedOopClosure walker(is_archived);
 958     obj->oop_iterate(&walker);
 959   }
 960 }
 961 #endif
 962 
 963 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 964 int HeapShared::_num_new_walked_objs;
 965 int HeapShared::_num_new_archived_objs;
 966 int HeapShared::_num_old_recorded_klasses;
 967 
 968 int HeapShared::_num_total_subgraph_recordings = 0;
 969 int HeapShared::_num_total_walked_objs = 0;
 970 int HeapShared::_num_total_archived_objs = 0;
 971 int HeapShared::_num_total_recorded_klasses = 0;
 972 int HeapShared::_num_total_verifications = 0;
 973 
 974 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 975   return _seen_objects_table->get(obj) != NULL;
 976 }
 977 
 978 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 979   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 980   _seen_objects_table->put(obj, true);
 981   ++ _num_new_walked_objs;
 982 }
 983 
 984 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
 985   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 986   init_subgraph_info(k, is_full_module_graph);
 987   init_seen_objects_table();
 988   _num_new_walked_objs = 0;
 989   _num_new_archived_objs = 0;
 990   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 991 }
 992 
 993 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 994   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 995     _num_old_recorded_klasses;
 996   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 997                       "walked %d objs, archived %d new objs, recorded %d classes",
 998                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 999                       num_new_recorded_klasses);
1000 
1001   delete_seen_objects_table();
1002 
1003   _num_total_subgraph_recordings ++;
1004   _num_total_walked_objs      += _num_new_walked_objs;
1005   _num_total_archived_objs    += _num_new_archived_objs;
1006   _num_total_recorded_klasses +=  num_new_recorded_klasses;
1007 }
1008 
1009 class ArchivableStaticFieldFinder: public FieldClosure {
1010   InstanceKlass* _ik;
1011   Symbol* _field_name;
1012   bool _found;
1013   int _offset;
1014 public:
1015   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1016     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1017 
1018   virtual void do_field(fieldDescriptor* fd) {
1019     if (fd->name() == _field_name) {
1020       assert(!_found, "fields cannot be overloaded");
1021       assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
1022       _found = true;
1023       _offset = fd->offset();
1024     }
1025   }
1026   bool found()     { return _found;  }
1027   int offset()     { return _offset; }
1028 };
1029 
1030 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1031                                             int num, Thread* THREAD) {
1032   for (int i = 0; i < num; i++) {
1033     ArchivableStaticFieldInfo* info = &fields[i];
1034     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1035     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1036 
1037     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
1038     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
1039     InstanceKlass* ik = InstanceKlass::cast(k);
1040     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1041            "Only support boot classes");
1042     ik->initialize(THREAD);
1043     guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize");
1044 
1045     ArchivableStaticFieldFinder finder(ik, field_name);
1046     ik->do_local_static_fields(&finder);
1047     assert(finder.found(), "field must exist");
1048 
1049     info->klass = ik;
1050     info->offset = finder.offset();
1051   }
1052 }
1053 
1054 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
1055   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1056 
1057   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1058                              num_closed_archive_subgraph_entry_fields,
1059                              THREAD);
1060   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1061                              num_open_archive_subgraph_entry_fields,
1062                              THREAD);
1063   if (MetaspaceShared::use_full_module_graph()) {
1064     init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields,
1065                                num_fmg_open_archive_subgraph_entry_fields,
1066                                THREAD);
1067   }
1068 }
1069 
1070 void HeapShared::init_for_dumping(Thread* THREAD) {
1071   _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1072   init_subgraph_entry_fields(THREAD);
1073 }
1074 
1075 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1076                                           int num, bool is_closed_archive,
1077                                           bool is_full_module_graph,
1078                                           Thread* THREAD) {
1079   _num_total_subgraph_recordings = 0;
1080   _num_total_walked_objs = 0;
1081   _num_total_archived_objs = 0;
1082   _num_total_recorded_klasses = 0;
1083   _num_total_verifications = 0;
1084 
1085   // For each class X that has one or more archived fields:
1086   // [1] Dump the subgraph of each archived field
1087   // [2] Create a list of all the class of the objects that can be reached
1088   //     by any of these static fields.
1089   //     At runtime, these classes are initialized before X's archived fields
1090   //     are restored by HeapShared::initialize_from_archived_subgraph().
1091   int i;
1092   for (i = 0; i < num; ) {
1093     ArchivableStaticFieldInfo* info = &fields[i];
1094     const char* klass_name = info->klass_name;
1095     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1096 
1097     // If you have specified consecutive fields of the same klass in
1098     // fields[], these will be archived in the same
1099     // {start_recording_subgraph ... done_recording_subgraph} pass to
1100     // save time.
1101     for (; i < num; i++) {
1102       ArchivableStaticFieldInfo* f = &fields[i];
1103       if (f->klass_name != klass_name) {
1104         break;
1105       }
1106 
1107       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1108                                                   f->offset, f->field_name,
1109                                                   is_closed_archive, CHECK);
1110     }
1111     done_recording_subgraph(info->klass, klass_name);
1112   }
1113 
1114   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1115                       is_closed_archive ? "closed" : "open",
1116                       _num_total_subgraph_recordings);
1117   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1118   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1119   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1120 
1121 #ifndef PRODUCT
1122   for (int i = 0; i < num; i++) {
1123     ArchivableStaticFieldInfo* f = &fields[i];
1124     verify_subgraph_from_static_field(f->klass, f->offset);
1125   }
1126   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1127 #endif
1128 }
1129 
1130 // Not all the strings in the global StringTable are dumped into the archive, because
1131 // some of those strings may be only referenced by classes that are excluded from
1132 // the archive. We need to explicitly mark the strings that are:
1133 //   [1] used by classes that WILL be archived;
1134 //   [2] included in the SharedArchiveConfigFile.
1135 void HeapShared::add_to_dumped_interned_strings(oop string) {
1136   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1137   bool created;
1138   _dumped_interned_strings->put_if_absent(string, true, &created);
1139 }
1140 
1141 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1142 // region. This way we can quickly relocate all the pointers without using
1143 // BasicOopIterateClosure at runtime.
1144 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1145   narrowOop* _start;
1146   BitMap *_oopmap;
1147   int _num_total_oops;
1148   int _num_null_oops;
1149  public:
1150   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1151     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1152 
1153   virtual bool should_verify_oops(void) {
1154     return false;
1155   }
1156   virtual void do_oop(narrowOop* p) {
1157     _num_total_oops ++;
1158     narrowOop v = *p;
1159     if (!CompressedOops::is_null(v)) {
1160       size_t idx = p - _start;
1161       _oopmap->set_bit(idx);
1162     } else {
1163       _num_null_oops ++;
1164     }
1165   }
1166   virtual void do_oop(oop *p) {
1167     ShouldNotReachHere();
1168   }
1169   int num_total_oops() const { return _num_total_oops; }
1170   int num_null_oops()  const { return _num_null_oops; }
1171 };
1172 
1173 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1174   assert(UseCompressedOops, "must be");
1175   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1176   ResourceBitMap oopmap(num_bits);
1177 
1178   HeapWord* p   = region.start();
1179   HeapWord* end = region.end();
1180   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1181 
1182   int num_objs = 0;
1183   while (p < end) {
1184     oop o = (oop)p;
1185     o->oop_iterate(&finder);
1186     p += o->size();
1187     ++ num_objs;
1188   }
1189 
1190   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1191                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1192   return oopmap;
1193 }
1194 
1195 // Patch all the embedded oop pointers inside an archived heap region,
1196 // to be consistent with the runtime oop encoding.
1197 class PatchEmbeddedPointers: public BitMapClosure {
1198   narrowOop* _start;
1199 
1200  public:
1201   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1202 
1203   bool do_bit(size_t offset) {
1204     narrowOop* p = _start + offset;
1205     narrowOop v = *p;
1206     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1207     oop o = HeapShared::decode_from_archive(v);
1208     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1209     return true;
1210   }
1211 };
1212 
1213 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1214                                                        size_t oopmap_size_in_bits) {
1215   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1216 
1217 #ifndef PRODUCT
1218   ResourceMark rm;
1219   ResourceBitMap checkBm = calculate_oopmap(region);
1220   assert(bm.is_same(checkBm), "sanity");
1221 #endif
1222 
1223   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1224   bm.iterate(&patcher);
1225 }
1226 
1227 #endif // INCLUDE_CDS_JAVA_HEAP