1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataShared.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "classfile/moduleEntry.hpp"
  30 #include "classfile/stringTable.hpp"
  31 #include "classfile/symbolTable.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/systemDictionaryShared.hpp"
  34 #include "classfile/vmSymbols.hpp"
  35 #include "gc/shared/gcLocker.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logMessage.hpp"
  38 #include "logging/logStream.hpp"
  39 #include "memory/archiveUtils.hpp"
  40 #include "memory/filemap.hpp"
  41 #include "memory/heapShared.inline.hpp"
  42 #include "memory/iterator.inline.hpp"
  43 #include "memory/metadataFactory.hpp"
  44 #include "memory/metaspaceClosure.hpp"
  45 #include "memory/metaspaceShared.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "memory/universe.hpp"
  48 #include "oops/compressedOops.inline.hpp"
  49 #include "oops/fieldStreams.inline.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/fieldDescriptor.inline.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/safepointVerifiers.hpp"
  54 #include "utilities/bitMap.inline.hpp"
  55 #if INCLUDE_G1GC
  56 #include "gc/g1/g1CollectedHeap.hpp"
  57 #endif
  58 
  59 #if INCLUDE_CDS_JAVA_HEAP
  60 
  61 bool HeapShared::_closed_archive_heap_region_mapped = false;
  62 bool HeapShared::_open_archive_heap_region_mapped = false;
  63 bool HeapShared::_archive_heap_region_fixed = false;
  64 
  65 address   HeapShared::_narrow_oop_base;
  66 int       HeapShared::_narrow_oop_shift;
  67 
  68 //
  69 // If you add new entries to the following tables, you should know what you're doing!
  70 //
  71 
  72 // Entry fields for shareable subgraphs archived in the closed archive heap
  73 // region. Warning: Objects in the subgraphs should not have reference fields
  74 // assigned at runtime.
  75 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  76   {"java/lang/Integer$IntegerCache",              0, "archivedCache"},
  77   {"java/lang/Long$LongCache",                    0, "archivedCache"},
  78   {"java/lang/Byte$ByteCache",                    0, "archivedCache"},
  79   {"java/lang/Short$ShortCache",                  0, "archivedCache"},
  80   {"java/lang/Character$CharacterCache",          0, "archivedCache"},
  81   {"java/util/jar/Attributes$Name",               0, "KNOWN_NAMES"},
  82   {"sun/util/locale/BaseLocale",                  0, "constantBaseLocales"},
  83 };
  84 // Entry fields for subgraphs archived in the open archive heap region.
  85 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
  86   {"jdk/internal/loader/ArchivedClassLoaders",                    1, "archivedClassLoaders"},
  87   {"jdk/internal/module/ArchivedBootLayer",                       1, "archivedBootLayer"},
  88   {"jdk/internal/module/ArchivedModuleGraph",                     0, "archivedModuleGraph"},
  89   {"java/util/ImmutableCollections",                              0, "archivedObjects"},
  90   {"java/lang/Module$ArchivedData",                               1, "archivedData"},
  91   {"java/lang/module/Configuration",                              0, "EMPTY_CONFIGURATION"},
  92   {"jdk/internal/math/FDBigInteger",                              0, "archivedCaches"},
  93 };
  94 
  95 const static int num_closed_archive_subgraph_entry_fields =
  96   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  97 const static int num_open_archive_subgraph_entry_fields =
  98   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  99 
 100 ////////////////////////////////////////////////////////////////
 101 //
 102 // Java heap object archiving support
 103 //
 104 ////////////////////////////////////////////////////////////////
 105 void HeapShared::fixup_mapped_heap_regions() {
 106   FileMapInfo *mapinfo = FileMapInfo::current_info();
 107   mapinfo->fixup_mapped_heap_regions();
 108   set_archive_heap_region_fixed();
 109   SystemDictionaryShared::update_archived_mirror_native_pointers();
 110 }
 111 
 112 unsigned HeapShared::oop_hash(oop const& p) {
 113   assert(!p->mark().has_bias_pattern(),
 114          "this object should never have been locked");  // so identity_hash won't safepoin
 115   unsigned hash = (unsigned)p->identity_hash();
 116   return hash;
 117 }
 118 
 119 static void reset_states(oop obj, TRAPS) {
 120   Handle h_obj(THREAD, obj);
 121   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 122   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 123   Symbol* method_sig = vmSymbols::void_method_signature();
 124 
 125   while (klass != NULL) {
 126     Method* method = klass->find_method(method_name, method_sig);
 127     if (method != NULL) {
 128       assert(method->is_private(), "must be");
 129       if (log_is_enabled(Debug, cds)) {
 130         ResourceMark rm(THREAD);
 131         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 132       }
 133       JavaValue result(T_VOID);
 134       JavaCalls::call_special(&result, h_obj, klass,
 135                               method_name, method_sig, CHECK);
 136     }
 137     klass = klass->java_super();
 138   }
 139 }
 140 
 141 void HeapShared::reset_archived_object_states(TRAPS) {
 142   assert(DumpSharedSpaces, "dump-time only");
 143   log_debug(cds)("Resetting platform loader");
 144   reset_states(SystemDictionary::java_platform_loader(), THREAD);
 145   log_debug(cds)("Resetting system loader");
 146   reset_states(SystemDictionary::java_system_loader(), THREAD);
 147 }
 148 
 149 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 150 oop HeapShared::find_archived_heap_object(oop obj) {
 151   assert(DumpSharedSpaces, "dump-time only");
 152   ArchivedObjectCache* cache = archived_object_cache();
 153   oop* p = cache->get(obj);
 154   if (p != NULL) {
 155     return *p;
 156   } else {
 157     return NULL;
 158   }
 159 }
 160 
 161 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
 162   assert(DumpSharedSpaces, "dump-time only");
 163 
 164   oop ao = find_archived_heap_object(obj);
 165   if (ao != NULL) {
 166     // already archived
 167     return ao;
 168   }
 169 
 170   int len = obj->size();
 171   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 172     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 173                          p2i(obj), (size_t)obj->size());
 174     return NULL;
 175   }
 176 
 177   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
 178   if (archived_oop != NULL) {
 179     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 180     MetaspaceShared::relocate_klass_ptr(archived_oop);
 181     // Reinitialize markword to remove age/marking/locking/etc.
 182     //
 183     // We need to retain the identity_hash, because it may have been used by some hashtables
 184     // in the shared heap. This also has the side effect of pre-initializing the
 185     // identity_hash for all shared objects, so they are less likely to be written
 186     // into during run time, increasing the potential of memory sharing.
 187     int hash_original = obj->identity_hash();
 188     archived_oop->set_mark_raw(markWord::prototype().copy_set_hash(hash_original));
 189     assert(archived_oop->mark().is_unlocked(), "sanity");
 190 
 191     DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 192     assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 193 
 194     ArchivedObjectCache* cache = archived_object_cache();
 195     cache->put(obj, archived_oop);
 196     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
 197                          p2i(obj), p2i(archived_oop));
 198   } else {
 199     log_error(cds, heap)(
 200       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 201       p2i(obj));
 202     vm_exit(1);
 203   }
 204   return archived_oop;
 205 }
 206 
 207 oop HeapShared::materialize_archived_object(narrowOop v) {
 208   assert(archive_heap_region_fixed(),
 209          "must be called after archive heap regions are fixed");
 210   if (!CompressedOops::is_null(v)) {
 211     oop obj = HeapShared::decode_from_archive(v);
 212     return G1CollectedHeap::heap()->materialize_archived_object(obj);
 213   }
 214   return NULL;
 215 }
 216 
 217 void HeapShared::archive_klass_objects(Thread* THREAD) {
 218   GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
 219   assert(klasses != NULL, "sanity");
 220   for (int i = 0; i < klasses->length(); i++) {
 221     Klass* k = klasses->at(i);
 222 
 223     // archive mirror object
 224     java_lang_Class::archive_mirror(k, CHECK);
 225 
 226     // archive the resolved_referenes array
 227     if (k->is_instance_klass()) {
 228       InstanceKlass* ik = InstanceKlass::cast(k);
 229       ik->constants()->archive_resolved_references(THREAD);
 230     }
 231   }
 232 }
 233 
 234 void HeapShared::run_full_gc_in_vm_thread() {
 235   if (is_heap_object_archiving_allowed()) {
 236     // Avoid fragmentation while archiving heap objects.
 237     // We do this inside a safepoint, so that no further allocation can happen after GC
 238     // has finished.
 239     if (GCLocker::is_active()) {
 240       // Just checking for safety ...
 241       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 242       // has been modified such that JNI code is executed in some clean up threads after
 243       // we have finished class loading.
 244       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 245     } else {
 246       log_info(cds)("Run GC ...");
 247       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 248       log_info(cds)("Run GC done");
 249     }
 250   }
 251 }
 252 
 253 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
 254                                            GrowableArray<MemRegion> *open) {
 255   if (!is_heap_object_archiving_allowed()) {
 256     log_info(cds)(
 257       "Archived java heap is not supported as UseG1GC, "
 258       "UseCompressedOops and UseCompressedClassPointers are required."
 259       "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
 260       BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
 261       BOOL_TO_STR(UseCompressedClassPointers));
 262     return;
 263   }
 264 
 265   G1HeapVerifier::verify_ready_for_archiving();
 266 
 267   {
 268     NoSafepointVerifier nsv;
 269 
 270     // Cache for recording where the archived objects are copied to
 271     create_archived_object_cache();
 272 
 273     log_info(cds)("Dumping objects to closed archive heap region ...");
 274     NOT_PRODUCT(StringTable::verify());
 275     copy_closed_archive_heap_objects(closed);
 276 
 277     log_info(cds)("Dumping objects to open archive heap region ...");
 278     copy_open_archive_heap_objects(open);
 279 
 280     ClassLoaderDataShared::init_archived_oops();
 281 
 282     destroy_archived_object_cache();
 283   }
 284 
 285   G1HeapVerifier::verify_archive_regions();
 286 }
 287 
 288 void HeapShared::copy_closed_archive_heap_objects(
 289                                     GrowableArray<MemRegion> * closed_archive) {
 290   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 291 
 292   Thread* THREAD = Thread::current();
 293   G1CollectedHeap::heap()->begin_archive_alloc_range();
 294 
 295   // Archive interned string objects
 296   StringTable::write_to_archive();
 297 
 298   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 299                            num_closed_archive_subgraph_entry_fields,
 300                            true /* is_closed_archive */, THREAD);
 301 
 302   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
 303                                                    os::vm_allocation_granularity());
 304 }
 305 
 306 void HeapShared::copy_open_archive_heap_objects(
 307                                     GrowableArray<MemRegion> * open_archive) {
 308   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 309 
 310   Thread* THREAD = Thread::current();
 311   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 312 
 313   java_lang_Class::archive_basic_type_mirrors(THREAD);
 314 
 315   archive_klass_objects(THREAD);
 316 
 317   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 318                            num_open_archive_subgraph_entry_fields,
 319                            false /* is_closed_archive */,
 320                            THREAD);
 321 
 322   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
 323                                                    os::vm_allocation_granularity());
 324 }
 325 
 326 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 327   _narrow_oop_base = base;
 328   _narrow_oop_shift = shift;
 329 }
 330 
 331 //
 332 // Subgraph archiving support
 333 //
 334 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 335 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 336 
 337 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 338 // there is no existing one for k. The subgraph_info records the relocated
 339 // Klass* of the original k.
 340 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 341   assert(DumpSharedSpaces, "dump time only");
 342   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 343   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 344   if (info == NULL) {
 345     _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
 346     info = _dump_time_subgraph_info_table->get(relocated_k);
 347     ++ _dump_time_subgraph_info_table->_count;
 348   }
 349   return info;
 350 }
 351 
 352 // Add an entry field to the current KlassSubGraphInfo.
 353 void KlassSubGraphInfo::add_subgraph_entry_field(
 354       int static_field_offset, oop v, bool is_closed_archive) {
 355   assert(DumpSharedSpaces, "dump time only");
 356   if (_subgraph_entry_fields == NULL) {
 357     _subgraph_entry_fields =
 358       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass);
 359   }
 360   _subgraph_entry_fields->append((juint)static_field_offset);
 361   _subgraph_entry_fields->append(CompressedOops::encode(v));
 362   _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
 363 }
 364 
 365 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 366 // Only objects of boot classes can be included in sub-graph.
 367 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 368   assert(DumpSharedSpaces, "dump time only");
 369   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 370          "must be the relocated Klass in the shared space");
 371 
 372   if (_subgraph_object_klasses == NULL) {
 373     _subgraph_object_klasses =
 374       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
 375   }
 376 
 377   assert(relocated_k->is_shared(), "must be a shared class");
 378 
 379   if (_k == relocated_k) {
 380     // Don't add the Klass containing the sub-graph to it's own klass
 381     // initialization list.
 382     return;
 383   }
 384 
 385   if (relocated_k->is_instance_klass()) {
 386     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 387           "must be boot class");
 388     // SystemDictionary::xxx_klass() are not updated, need to check
 389     // the original Klass*
 390     if (orig_k == SystemDictionary::String_klass() ||
 391         orig_k == SystemDictionary::Object_klass()) {
 392       // Initialized early during VM initialization. No need to be added
 393       // to the sub-graph object class list.
 394       return;
 395     }
 396   } else if (relocated_k->is_objArray_klass()) {
 397     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 398     if (abk->is_instance_klass()) {
 399       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 400             "must be boot class");
 401     }
 402     if (relocated_k == Universe::objectArrayKlassObj()) {
 403       // Initialized early during Universe::genesis. No need to be added
 404       // to the list.
 405       return;
 406     }
 407   } else {
 408     assert(relocated_k->is_typeArray_klass(), "must be");
 409     // Primitive type arrays are created early during Universe::genesis.
 410     return;
 411   }
 412 
 413   if (log_is_enabled(Debug, cds, heap)) {
 414     if (!_subgraph_object_klasses->contains(relocated_k)) {
 415       ResourceMark rm;
 416       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 417     }
 418   }
 419 
 420   _subgraph_object_klasses->append_if_missing(relocated_k);
 421 }
 422 
 423 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 424 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 425   _k = info->klass();
 426   _entry_field_records = NULL;
 427   _subgraph_object_klasses = NULL;
 428 
 429   // populate the entry fields
 430   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 431   if (entry_fields != NULL) {
 432     int num_entry_fields = entry_fields->length();
 433     assert(num_entry_fields % 3 == 0, "sanity");
 434     _entry_field_records =
 435       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 436     for (int i = 0 ; i < num_entry_fields; i++) {
 437       _entry_field_records->at_put(i, entry_fields->at(i));
 438     }
 439   }
 440 
 441   // the Klasses of the objects in the sub-graphs
 442   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 443   if (subgraph_object_klasses != NULL) {
 444     int num_subgraphs_klasses = subgraph_object_klasses->length();
 445     _subgraph_object_klasses =
 446       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 447     for (int i = 0; i < num_subgraphs_klasses; i++) {
 448       Klass* subgraph_k = subgraph_object_klasses->at(i);
 449       if (log_is_enabled(Info, cds, heap)) {
 450         ResourceMark rm;
 451         log_info(cds, heap)(
 452           "Archived object klass %s (%2d) => %s",
 453           _k->external_name(), i, subgraph_k->external_name());
 454       }
 455       _subgraph_object_klasses->at_put(i, subgraph_k);
 456       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 457     }
 458   }
 459 
 460   ArchivePtrMarker::mark_pointer(&_k);
 461   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 462   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 463 }
 464 
 465 struct CopyKlassSubGraphInfoToArchive : StackObj {
 466   CompactHashtableWriter* _writer;
 467   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 468 
 469   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 470     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 471       ArchivedKlassSubGraphInfoRecord* record =
 472         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 473       record->init(&info);
 474 
 475       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass);
 476       u4 delta = MetaspaceShared::object_delta_u4(record);
 477       _writer->add(hash, delta);
 478     }
 479     return true; // keep on iterating
 480   }
 481 };
 482 
 483 // Build the records of archived subgraph infos, which include:
 484 // - Entry points to all subgraphs from the containing class mirror. The entry
 485 //   points are static fields in the mirror. For each entry point, the field
 486 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 487 //   info. The value is stored back to the corresponding field at runtime.
 488 // - A list of klasses that need to be loaded/initialized before archived
 489 //   java object sub-graph can be accessed at runtime.
 490 void HeapShared::write_subgraph_info_table() {
 491   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 492   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 493   CompactHashtableStats stats;
 494 
 495   _run_time_subgraph_info_table.reset();
 496 
 497   CompactHashtableWriter writer(d_table->_count, &stats);
 498   CopyKlassSubGraphInfoToArchive copy(&writer);
 499   d_table->iterate(&copy);
 500 
 501   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 502 }
 503 
 504 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 505   _run_time_subgraph_info_table.serialize_header(soc);
 506 }
 507 
 508 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 509   if (!open_archive_heap_region_mapped()) {
 510     return; // nothing to do
 511   }
 512   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 513 
 514   if (!MetaspaceShared::use_full_module_graph()) {
 515     for (int i = 0; i < num_open_archive_subgraph_entry_fields; i++) {
 516       const ArchivableStaticFieldInfo* info = &open_archive_subgraph_entry_fields[i];
 517       if (info->full_module_graph_only && k->name()->equals(info->klass_name)) {
 518         return;
 519       }
 520     }
 521   }
 522 
 523   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
 524   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 525 
 526   // Initialize from archived data. Currently this is done only
 527   // during VM initialization time. No lock is needed.
 528   if (record != NULL) {
 529     Thread* THREAD = Thread::current();
 530 
 531     int i;
 532     // Load/link/initialize the klasses of the objects in the subgraph.
 533     // NULL class loader is used.
 534     Array<Klass*>* klasses = record->subgraph_object_klasses();
 535     if (klasses != NULL) {
 536       for (i = 0; i < klasses->length(); i++) {
 537         Klass* obj_k = klasses->at(i);
 538         Klass* resolved_k = SystemDictionary::resolve_or_null(
 539                                               (obj_k)->name(), THREAD);
 540         if (resolved_k != obj_k) {
 541           assert(!SystemDictionary::is_well_known_klass(resolved_k),
 542                  "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
 543           ResourceMark rm(THREAD);
 544           log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
 545                               resolved_k->external_name());
 546           return;
 547         }
 548         if ((obj_k)->is_instance_klass()) {
 549           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 550           ik->initialize(THREAD);
 551         } else if ((obj_k)->is_objArray_klass()) {
 552           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 553           oak->initialize(THREAD);
 554         }
 555       }
 556     }
 557 
 558     if (HAS_PENDING_EXCEPTION) {
 559       CLEAR_PENDING_EXCEPTION;
 560       // None of the field value will be set if there was an exception.
 561       // The java code will not see any of the archived objects in the
 562       // subgraphs referenced from k in this case.
 563       return;
 564     }
 565 
 566     // Load the subgraph entry fields from the record and store them back to
 567     // the corresponding fields within the mirror.
 568     oop m = k->java_mirror();
 569     Array<juint>* entry_field_records = record->entry_field_records();
 570     if (entry_field_records != NULL) {
 571       int efr_len = entry_field_records->length();
 572       assert(efr_len % 3 == 0, "sanity");
 573       for (i = 0; i < efr_len;) {
 574         int field_offset = entry_field_records->at(i);
 575         narrowOop nv = entry_field_records->at(i+1);
 576         int is_closed_archive = entry_field_records->at(i+2);
 577         oop v;
 578         if (is_closed_archive == 0) {
 579           // It's an archived object in the open archive heap regions, not shared.
 580           // The object refereced by the field becomes 'known' by GC from this
 581           // point. All objects in the subgraph reachable from the object are
 582           // also 'known' by GC.
 583           v = materialize_archived_object(nv);
 584         } else {
 585           // Shared object in the closed archive heap regions. Decode directly.
 586           assert(!CompressedOops::is_null(nv), "shared object is null");
 587           v = HeapShared::decode_from_archive(nv);
 588         }
 589         m->obj_field_put(field_offset, v);
 590         i += 3;
 591 
 592         log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 593       }
 594 
 595       // Done. Java code can see the archived sub-graphs referenced from k's
 596       // mirror after this point.
 597       if (log_is_enabled(Info, cds, heap)) {
 598         ResourceMark rm;
 599         log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
 600                             k->external_name(), p2i(k));
 601       }
 602     }
 603   }
 604 }
 605 
 606 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 607   int _level;
 608   bool _is_closed_archive;
 609   bool _record_klasses_only;
 610   KlassSubGraphInfo* _subgraph_info;
 611   oop _orig_referencing_obj;
 612   oop _archived_referencing_obj;
 613   Thread* _thread;
 614  public:
 615   WalkOopAndArchiveClosure(int level,
 616                            bool is_closed_archive,
 617                            bool record_klasses_only,
 618                            KlassSubGraphInfo* subgraph_info,
 619                            oop orig, oop archived, TRAPS) :
 620     _level(level), _is_closed_archive(is_closed_archive),
 621     _record_klasses_only(record_klasses_only),
 622     _subgraph_info(subgraph_info),
 623     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 624     _thread(THREAD) {}
 625   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 626   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 627 
 628  protected:
 629   template <class T> void do_oop_work(T *p) {
 630     oop obj = RawAccess<>::oop_load(p);
 631     if (!CompressedOops::is_null(obj)) {
 632       assert(!HeapShared::is_archived_object(obj),
 633              "original objects must not point to archived objects");
 634 
 635       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 636       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
 637       Thread* THREAD = _thread;
 638 
 639       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 640         ResourceMark rm;
 641         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 642                              _orig_referencing_obj->klass()->external_name(), field_delta,
 643                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 644         LogTarget(Trace, cds, heap) log;
 645         LogStream out(log);
 646         obj->print_on(&out);
 647       }
 648 
 649       oop archived = HeapShared::archive_reachable_objects_from(
 650           _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD);
 651       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 652       assert(HeapShared::is_archived_object(archived), "must be");
 653 
 654       if (!_record_klasses_only) {
 655         // Update the reference in the archived copy of the referencing object.
 656         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 657                              _level, p2i(new_p), p2i(obj), p2i(archived));
 658         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 659       }
 660     }
 661   }
 662 };
 663 
 664 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k,
 665                                                          Thread* THREAD) {
 666   // Check fields in the object
 667   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 668     if (!fs.access_flags().is_static()) {
 669       BasicType ft = fs.field_descriptor().field_type();
 670       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
 671         ResourceMark rm(THREAD);
 672         log_warning(cds, heap)(
 673           "Please check reference field in %s instance in closed archive heap region: %s %s",
 674           k->external_name(), (fs.name())->as_C_string(),
 675           (fs.signature())->as_C_string());
 676       }
 677     }
 678   }
 679 }
 680 
 681 void HeapShared::check_module_oop(oop orig_module_obj) {
 682   assert(DumpSharedSpaces, "must be");
 683   assert(java_lang_Module::is_instance(orig_module_obj), "must be");
 684   ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
 685   if (orig_module_ent == NULL) {
 686     // These special Module objects are created in Java code. They are not
 687     // defined via Modules::define_module(), so they don't have a ModuleEntry:
 688     //     java.lang.Module::ALL_UNNAMED_MODULE
 689     //     java.lang.Module::EVERYONE_MODULE
 690     //     jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
 691     assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
 692     log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
 693   } else {
 694     ClassLoaderData* loader_data = orig_module_ent->loader_data();
 695     assert(loader_data->is_builtin_class_loader_data(), "must be");
 696   }
 697 }
 698 
 699 
 700 // (1) If orig_obj has not been archived yet, archive it.
 701 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 702 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 703 // (3) Record the klasses of all orig_obj and all reachable objects.
 704 oop HeapShared::archive_reachable_objects_from(int level,
 705                                                KlassSubGraphInfo* subgraph_info,
 706                                                oop orig_obj,
 707                                                bool is_closed_archive,
 708                                                TRAPS) {
 709   assert(orig_obj != NULL, "must be");
 710   assert(!is_archived_object(orig_obj), "sanity");
 711 
 712   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
 713     // This object has injected fields that cannot be supported easily, so we disallow them for now.
 714     // If you get an error here, you probably made a change in the JDK library that has added
 715     // these objects that are referenced (directly or indirectly) by static fields.
 716     ResourceMark rm;
 717     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
 718     vm_exit(1);
 719   }
 720 
 721   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
 722   // them as Klass::_archived_mirror because they need to be specially restored at run time.
 723   //
 724   // If you get an error here, you probably made a change in the JDK library that has added a Class
 725   // object that is referenced (directly or indirectly) by static fields.
 726   if (java_lang_Class::is_instance(orig_obj)) {
 727     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 728     vm_exit(1);
 729   }
 730 
 731   oop archived_obj = find_archived_heap_object(orig_obj);
 732   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 733     // To save time, don't walk strings that are already archived. They just contain
 734     // pointers to a type array, whose klass doesn't need to be recorded.
 735     return archived_obj;
 736   }
 737 
 738   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 739     // orig_obj has already been archived and traced. Nothing more to do.
 740     return archived_obj;
 741   } else {
 742     set_has_been_seen_during_subgraph_recording(orig_obj);
 743   }
 744 
 745   bool record_klasses_only = (archived_obj != NULL);
 746   if (archived_obj == NULL) {
 747     ++_num_new_archived_objs;
 748     archived_obj = archive_heap_object(orig_obj, THREAD);
 749     if (archived_obj == NULL) {
 750       // Skip archiving the sub-graph referenced from the current entry field.
 751       ResourceMark rm;
 752       log_error(cds, heap)(
 753         "Cannot archive the sub-graph referenced from %s object ("
 754         PTR_FORMAT ") size %d, skipped.",
 755         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 756       if (level == 1) {
 757         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 758         // as the Java code will take care of initializing this field dynamically.
 759         return NULL;
 760       } else {
 761         // We don't know how to handle an object that has been archived, but some of its reachable
 762         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 763         // we have a real use case.
 764         vm_exit(1);
 765       }
 766     }
 767 
 768     if (java_lang_Module::is_instance(orig_obj)) {
 769       check_module_oop(orig_obj);
 770       java_lang_Module::set_module_entry(archived_obj, NULL);
 771       java_lang_Module::set_loader(archived_obj, NULL);
 772     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
 773       // class_data will be restored explicitly at run time. 
 774       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
 775                 orig_obj == SystemDictionary::java_system_loader() ||
 776                 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be");
 777       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
 778     }
 779   }
 780 
 781   assert(archived_obj != NULL, "must be");
 782   Klass *orig_k = orig_obj->klass();
 783   Klass *relocated_k = archived_obj->klass();
 784   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 785 
 786   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
 787                                   subgraph_info, orig_obj, archived_obj, THREAD);
 788   orig_obj->oop_iterate(&walker);
 789   if (is_closed_archive && orig_k->is_instance_klass()) {
 790     check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD);
 791   }
 792   return archived_obj;
 793 }
 794 
 795 //
 796 // Start from the given static field in a java mirror and archive the
 797 // complete sub-graph of java heap objects that are reached directly
 798 // or indirectly from the starting object by following references.
 799 // Sub-graph archiving restrictions (current):
 800 //
 801 // - All classes of objects in the archived sub-graph (including the
 802 //   entry class) must be boot class only.
 803 // - No java.lang.Class instance (java mirror) can be included inside
 804 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 805 //
 806 // The Java heap object sub-graph archiving process (see
 807 // WalkOopAndArchiveClosure):
 808 //
 809 // 1) Java object sub-graph archiving starts from a given static field
 810 // within a Class instance (java mirror). If the static field is a
 811 // refererence field and points to a non-null java object, proceed to
 812 // the next step.
 813 //
 814 // 2) Archives the referenced java object. If an archived copy of the
 815 // current object already exists, updates the pointer in the archived
 816 // copy of the referencing object to point to the current archived object.
 817 // Otherwise, proceed to the next step.
 818 //
 819 // 3) Follows all references within the current java object and recursively
 820 // archive the sub-graph of objects starting from each reference.
 821 //
 822 // 4) Updates the pointer in the archived copy of referencing object to
 823 // point to the current archived object.
 824 //
 825 // 5) The Klass of the current java object is added to the list of Klasses
 826 // for loading and initialzing before any object in the archived graph can
 827 // be accessed at runtime.
 828 //
 829 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 830                                                              const char* klass_name,
 831                                                              int field_offset,
 832                                                              const char* field_name,
 833                                                              bool is_closed_archive,
 834                                                              TRAPS) {
 835   assert(DumpSharedSpaces, "dump time only");
 836   assert(k->is_shared_boot_class(), "must be boot class");
 837 
 838   oop m = k->java_mirror();
 839 
 840   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 841   oop f = m->obj_field(field_offset);
 842 
 843   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 844 
 845   if (!CompressedOops::is_null(f)) {
 846     if (log_is_enabled(Trace, cds, heap)) {
 847       LogTarget(Trace, cds, heap) log;
 848       LogStream out(log);
 849       f->print_on(&out);
 850     }
 851 
 852     oop af = archive_reachable_objects_from(1, subgraph_info, f,
 853                                             is_closed_archive, CHECK);
 854 
 855     if (af == NULL) {
 856       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 857                            klass_name, field_name);
 858     } else {
 859       // Note: the field value is not preserved in the archived mirror.
 860       // Record the field as a new subGraph entry point. The recorded
 861       // information is restored from the archive at runtime.
 862       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
 863       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 864     }
 865   } else {
 866     // The field contains null, we still need to record the entry point,
 867     // so it can be restored at runtime.
 868     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
 869   }
 870 }
 871 
 872 #ifndef PRODUCT
 873 class VerifySharedOopClosure: public BasicOopIterateClosure {
 874  private:
 875   bool _is_archived;
 876 
 877  public:
 878   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 879 
 880   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 881   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 882 
 883  protected:
 884   template <class T> void do_oop_work(T *p) {
 885     oop obj = RawAccess<>::oop_load(p);
 886     if (!CompressedOops::is_null(obj)) {
 887       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 888     }
 889   }
 890 };
 891 
 892 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 893   assert(DumpSharedSpaces, "dump time only");
 894   assert(k->is_shared_boot_class(), "must be boot class");
 895 
 896   oop m = k->java_mirror();
 897   oop f = m->obj_field(field_offset);
 898   if (!CompressedOops::is_null(f)) {
 899     verify_subgraph_from(f);
 900   }
 901 }
 902 
 903 void HeapShared::verify_subgraph_from(oop orig_obj) {
 904   oop archived_obj = find_archived_heap_object(orig_obj);
 905   if (archived_obj == NULL) {
 906     // It's OK for the root of a subgraph to be not archived. See comments in
 907     // archive_reachable_objects_from().
 908     return;
 909   }
 910 
 911   // Verify that all objects reachable from orig_obj are archived.
 912   init_seen_objects_table();
 913   verify_reachable_objects_from(orig_obj, false);
 914   delete_seen_objects_table();
 915 
 916   // Note: we could also verify that all objects reachable from the archived
 917   // copy of orig_obj can only point to archived objects, with:
 918   //      init_seen_objects_table();
 919   //      verify_reachable_objects_from(archived_obj, true);
 920   //      init_seen_objects_table();
 921   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 922   // won't do it here.
 923 }
 924 
 925 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 926   _num_total_verifications ++;
 927   if (!has_been_seen_during_subgraph_recording(obj)) {
 928     set_has_been_seen_during_subgraph_recording(obj);
 929 
 930     if (is_archived) {
 931       assert(is_archived_object(obj), "must be");
 932       assert(find_archived_heap_object(obj) == NULL, "must be");
 933     } else {
 934       assert(!is_archived_object(obj), "must be");
 935       assert(find_archived_heap_object(obj) != NULL, "must be");
 936     }
 937 
 938     VerifySharedOopClosure walker(is_archived);
 939     obj->oop_iterate(&walker);
 940   }
 941 }
 942 #endif
 943 
 944 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 945 int HeapShared::_num_new_walked_objs;
 946 int HeapShared::_num_new_archived_objs;
 947 int HeapShared::_num_old_recorded_klasses;
 948 
 949 int HeapShared::_num_total_subgraph_recordings = 0;
 950 int HeapShared::_num_total_walked_objs = 0;
 951 int HeapShared::_num_total_archived_objs = 0;
 952 int HeapShared::_num_total_recorded_klasses = 0;
 953 int HeapShared::_num_total_verifications = 0;
 954 
 955 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 956   return _seen_objects_table->get(obj) != NULL;
 957 }
 958 
 959 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 960   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 961   _seen_objects_table->put(obj, true);
 962   ++ _num_new_walked_objs;
 963 }
 964 
 965 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 966   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 967   init_seen_objects_table();
 968   _num_new_walked_objs = 0;
 969   _num_new_archived_objs = 0;
 970   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 971 }
 972 
 973 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 974   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 975     _num_old_recorded_klasses;
 976   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 977                       "walked %d objs, archived %d new objs, recorded %d classes",
 978                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 979                       num_new_recorded_klasses);
 980 
 981   delete_seen_objects_table();
 982 
 983   _num_total_subgraph_recordings ++;
 984   _num_total_walked_objs      += _num_new_walked_objs;
 985   _num_total_archived_objs    += _num_new_archived_objs;
 986   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 987 }
 988 
 989 class ArchivableStaticFieldFinder: public FieldClosure {
 990   InstanceKlass* _ik;
 991   Symbol* _field_name;
 992   bool _found;
 993   int _offset;
 994 public:
 995   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 996     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 997 
 998   virtual void do_field(fieldDescriptor* fd) {
 999     if (fd->name() == _field_name) {
1000       assert(!_found, "fields cannot be overloaded");
1001       assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
1002       _found = true;
1003       _offset = fd->offset();
1004     }
1005   }
1006   bool found()     { return _found;  }
1007   int offset()     { return _offset; }
1008 };
1009 
1010 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1011                                             int num, Thread* THREAD) {
1012   for (int i = 0; i < num; i++) {
1013     ArchivableStaticFieldInfo* info = &fields[i];
1014     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1015     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1016 
1017     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
1018     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
1019     InstanceKlass* ik = InstanceKlass::cast(k);
1020     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1021            "Only support boot classes");
1022     ik->initialize(THREAD);
1023     guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize");
1024 
1025     ArchivableStaticFieldFinder finder(ik, field_name);
1026     ik->do_local_static_fields(&finder);
1027     assert(finder.found(), "field must exist");
1028 
1029     info->klass = ik;
1030     info->offset = finder.offset();
1031   }
1032 }
1033 
1034 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
1035   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1036 
1037   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1038                              num_closed_archive_subgraph_entry_fields,
1039                              THREAD);
1040   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1041                              num_open_archive_subgraph_entry_fields,
1042                              THREAD);
1043 }
1044 
1045 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1046                                           int num, bool is_closed_archive,
1047                                           Thread* THREAD) {
1048   _num_total_subgraph_recordings = 0;
1049   _num_total_walked_objs = 0;
1050   _num_total_archived_objs = 0;
1051   _num_total_recorded_klasses = 0;
1052   _num_total_verifications = 0;
1053 
1054   // For each class X that has one or more archived fields:
1055   // [1] Dump the subgraph of each archived field
1056   // [2] Create a list of all the class of the objects that can be reached
1057   //     by any of these static fields.
1058   //     At runtime, these classes are initialized before X's archived fields
1059   //     are restored by HeapShared::initialize_from_archived_subgraph().
1060   int i;
1061   for (i = 0; i < num; ) {
1062     ArchivableStaticFieldInfo* info = &fields[i];
1063     const char* klass_name = info->klass_name;
1064     start_recording_subgraph(info->klass, klass_name);
1065 
1066     // If you have specified consecutive fields of the same klass in
1067     // fields[], these will be archived in the same
1068     // {start_recording_subgraph ... done_recording_subgraph} pass to
1069     // save time.
1070     for (; i < num; i++) {
1071       ArchivableStaticFieldInfo* f = &fields[i];
1072       if (f->klass_name != klass_name) {
1073         break;
1074       }
1075 
1076       if (!info->full_module_graph_only || MetaspaceShared::use_full_module_graph()) {
1077         archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1078                                                     f->offset, f->field_name,
1079                                                     is_closed_archive, CHECK);
1080       }
1081     }
1082     done_recording_subgraph(info->klass, klass_name);
1083   }
1084 
1085   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1086                       is_closed_archive ? "closed" : "open",
1087                       _num_total_subgraph_recordings);
1088   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1089   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1090   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1091 
1092 #ifndef PRODUCT
1093   for (int i = 0; i < num; i++) {
1094     ArchivableStaticFieldInfo* f = &fields[i];
1095     verify_subgraph_from_static_field(f->klass, f->offset);
1096   }
1097   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1098 #endif
1099 }
1100 
1101 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1102 // region. This way we can quickly relocate all the pointers without using
1103 // BasicOopIterateClosure at runtime.
1104 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1105   narrowOop* _start;
1106   BitMap *_oopmap;
1107   int _num_total_oops;
1108   int _num_null_oops;
1109  public:
1110   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1111     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1112 
1113   virtual bool should_verify_oops(void) {
1114     return false;
1115   }
1116   virtual void do_oop(narrowOop* p) {
1117     _num_total_oops ++;
1118     narrowOop v = *p;
1119     if (!CompressedOops::is_null(v)) {
1120       size_t idx = p - _start;
1121       _oopmap->set_bit(idx);
1122     } else {
1123       _num_null_oops ++;
1124     }
1125   }
1126   virtual void do_oop(oop *p) {
1127     ShouldNotReachHere();
1128   }
1129   int num_total_oops() const { return _num_total_oops; }
1130   int num_null_oops()  const { return _num_null_oops; }
1131 };
1132 
1133 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1134   assert(UseCompressedOops, "must be");
1135   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1136   ResourceBitMap oopmap(num_bits);
1137 
1138   HeapWord* p   = region.start();
1139   HeapWord* end = region.end();
1140   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1141 
1142   int num_objs = 0;
1143   while (p < end) {
1144     oop o = (oop)p;
1145     o->oop_iterate(&finder);
1146     p += o->size();
1147     ++ num_objs;
1148   }
1149 
1150   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1151                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1152   return oopmap;
1153 }
1154 
1155 // Patch all the embedded oop pointers inside an archived heap region,
1156 // to be consistent with the runtime oop encoding.
1157 class PatchEmbeddedPointers: public BitMapClosure {
1158   narrowOop* _start;
1159 
1160  public:
1161   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1162 
1163   bool do_bit(size_t offset) {
1164     narrowOop* p = _start + offset;
1165     narrowOop v = *p;
1166     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1167     oop o = HeapShared::decode_from_archive(v);
1168     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1169     return true;
1170   }
1171 };
1172 
1173 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1174                                                        size_t oopmap_size_in_bits) {
1175   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1176 
1177 #ifndef PRODUCT
1178   ResourceMark rm;
1179   ResourceBitMap checkBm = calculate_oopmap(region);
1180   assert(bm.is_same(checkBm), "sanity");
1181 #endif
1182 
1183   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1184   bm.iterate(&patcher);
1185 }
1186 
1187 #endif // INCLUDE_CDS_JAVA_HEAP