1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionaryShared.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "logging/log.hpp"
  32 #include "logging/logMessage.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "memory/archiveUtils.hpp"
  35 #include "memory/filemap.hpp"
  36 #include "memory/heapShared.inline.hpp"
  37 #include "memory/iterator.inline.hpp"
  38 #include "memory/metadataFactory.hpp"
  39 #include "memory/metaspaceClosure.hpp"
  40 #include "memory/metaspaceShared.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/compressedOops.inline.hpp"
  44 #include "oops/fieldStreams.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/fieldDescriptor.inline.hpp"
  47 #include "runtime/safepointVerifiers.hpp"
  48 #include "utilities/bitMap.inline.hpp"
  49 #if INCLUDE_G1GC
  50 #include "gc/g1/g1CollectedHeap.hpp"
  51 #endif
  52 
  53 #if INCLUDE_CDS_JAVA_HEAP
  54 
  55 bool HeapShared::_closed_archive_heap_region_mapped = false;
  56 bool HeapShared::_open_archive_heap_region_mapped = false;
  57 bool HeapShared::_archive_heap_region_fixed = false;
  58 
  59 address   HeapShared::_narrow_oop_base;
  60 int       HeapShared::_narrow_oop_shift;
  61 
  62 //
  63 // If you add new entries to the following tables, you should know what you're doing!
  64 //
  65 
  66 // Entry fields for shareable subgraphs archived in the closed archive heap
  67 // region. Warning: Objects in the subgraphs should not have reference fields
  68 // assigned at runtime.
  69 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  70   {"java/lang/Integer$IntegerCache",           "archivedCache"},
  71   {"java/lang/Long$LongCache",                 "archivedCache"},
  72   {"java/lang/Byte$ByteCache",                 "archivedCache"},
  73   {"java/lang/Short$ShortCache",               "archivedCache"},
  74   {"java/lang/Character$CharacterCache",       "archivedCache"},
  75   {"java/util/jar/Attributes$Name",            "KNOWN_NAMES"},
  76   {"sun/util/locale/BaseLocale",               "constantBaseLocales"},
  77 };
  78 // Entry fields for subgraphs archived in the open archive heap region.
  79 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
  80   {"jdk/internal/module/ArchivedModuleGraph",  "archivedModuleGraph"},
  81   {"java/util/ImmutableCollections$ListN",     "EMPTY_LIST"},
  82   {"java/util/ImmutableCollections$MapN",      "EMPTY_MAP"},
  83   {"java/util/ImmutableCollections$SetN",      "EMPTY_SET"},
  84   {"java/lang/module/Configuration",           "EMPTY_CONFIGURATION"},
  85   {"jdk/internal/math/FDBigInteger",           "archivedCaches"},
  86 };
  87 
  88 const static int num_closed_archive_subgraph_entry_fields =
  89   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  90 const static int num_open_archive_subgraph_entry_fields =
  91   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  92 
  93 ////////////////////////////////////////////////////////////////
  94 //
  95 // Java heap object archiving support
  96 //
  97 ////////////////////////////////////////////////////////////////
  98 void HeapShared::fixup_mapped_heap_regions() {
  99   FileMapInfo *mapinfo = FileMapInfo::current_info();
 100   mapinfo->fixup_mapped_heap_regions();
 101   set_archive_heap_region_fixed();
 102 }
 103 
 104 unsigned HeapShared::oop_hash(oop const& p) {
 105   assert(!p->mark().has_bias_pattern(),
 106          "this object should never have been locked");  // so identity_hash won't safepoin
 107   unsigned hash = (unsigned)p->identity_hash();
 108   return hash;
 109 }
 110 
 111 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 112 oop HeapShared::find_archived_heap_object(oop obj) {
 113   assert(DumpSharedSpaces, "dump-time only");
 114   ArchivedObjectCache* cache = archived_object_cache();
 115   oop* p = cache->get(obj);
 116   if (p != NULL) {
 117     return *p;
 118   } else {
 119     return NULL;
 120   }
 121 }
 122 
 123 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
 124   assert(DumpSharedSpaces, "dump-time only");
 125 
 126   oop ao = find_archived_heap_object(obj);
 127   if (ao != NULL) {
 128     // already archived
 129     return ao;
 130   }
 131 
 132   int len = obj->size();
 133   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 134     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 135                          p2i(obj), (size_t)obj->size());
 136     return NULL;
 137   }
 138 
 139   // Pre-compute object identity hash at CDS dump time.
 140   obj->identity_hash();
 141 
 142   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
 143   if (archived_oop != NULL) {
 144     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
 145     MetaspaceShared::relocate_klass_ptr(archived_oop);
 146     ArchivedObjectCache* cache = archived_object_cache();
 147     cache->put(obj, archived_oop);
 148     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
 149                          p2i(obj), p2i(archived_oop));
 150   } else {
 151     log_error(cds, heap)(
 152       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 153       p2i(obj));
 154     vm_exit(1);
 155   }
 156   return archived_oop;
 157 }
 158 
 159 oop HeapShared::materialize_archived_object(narrowOop v) {
 160   assert(archive_heap_region_fixed(),
 161          "must be called after archive heap regions are fixed");
 162   if (!CompressedOops::is_null(v)) {
 163     oop obj = HeapShared::decode_from_archive(v);
 164     return G1CollectedHeap::heap()->materialize_archived_object(obj);
 165   }
 166   return NULL;
 167 }
 168 
 169 void HeapShared::archive_klass_objects(Thread* THREAD) {
 170   GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
 171   assert(klasses != NULL, "sanity");
 172   for (int i = 0; i < klasses->length(); i++) {
 173     Klass* k = klasses->at(i);
 174 
 175     // archive mirror object
 176     java_lang_Class::archive_mirror(k, CHECK);
 177 
 178     // archive the resolved_referenes array
 179     if (k->is_instance_klass()) {
 180       InstanceKlass* ik = InstanceKlass::cast(k);
 181       ik->constants()->archive_resolved_references(THREAD);
 182     }
 183   }
 184 }
 185 
 186 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
 187                                            GrowableArray<MemRegion> *open) {
 188   if (!is_heap_object_archiving_allowed()) {
 189     if (log_is_enabled(Info, cds)) {
 190       log_info(cds)(
 191         "Archived java heap is not supported as UseG1GC, "
 192         "UseCompressedOops and UseCompressedClassPointers are required."
 193         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
 194         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
 195         BOOL_TO_STR(UseCompressedClassPointers));
 196     }
 197     return;
 198   }
 199 
 200   G1HeapVerifier::verify_ready_for_archiving();
 201 
 202   {
 203     NoSafepointVerifier nsv;
 204 
 205     // Cache for recording where the archived objects are copied to
 206     create_archived_object_cache();
 207 
 208     tty->print_cr("Dumping objects to closed archive heap region ...");
 209     NOT_PRODUCT(StringTable::verify());
 210     copy_closed_archive_heap_objects(closed);
 211 
 212     tty->print_cr("Dumping objects to open archive heap region ...");
 213     copy_open_archive_heap_objects(open);
 214 
 215     destroy_archived_object_cache();
 216   }
 217 
 218   G1HeapVerifier::verify_archive_regions();
 219 }
 220 
 221 void HeapShared::copy_closed_archive_heap_objects(
 222                                     GrowableArray<MemRegion> * closed_archive) {
 223   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 224 
 225   Thread* THREAD = Thread::current();
 226   G1CollectedHeap::heap()->begin_archive_alloc_range();
 227 
 228   // Archive interned string objects
 229   StringTable::write_to_archive();
 230 
 231   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 232                            num_closed_archive_subgraph_entry_fields,
 233                            true /* is_closed_archive */, THREAD);
 234 
 235   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
 236                                                    os::vm_allocation_granularity());
 237 }
 238 
 239 void HeapShared::copy_open_archive_heap_objects(
 240                                     GrowableArray<MemRegion> * open_archive) {
 241   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 242 
 243   Thread* THREAD = Thread::current();
 244   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 245 
 246   java_lang_Class::archive_basic_type_mirrors(THREAD);
 247 
 248   archive_klass_objects(THREAD);
 249 
 250   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 251                            num_open_archive_subgraph_entry_fields,
 252                            false /* is_closed_archive */,
 253                            THREAD);
 254 
 255   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
 256                                                    os::vm_allocation_granularity());
 257 }
 258 
 259 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 260   _narrow_oop_base = base;
 261   _narrow_oop_shift = shift;
 262 }
 263 
 264 //
 265 // Subgraph archiving support
 266 //
 267 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 268 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 269 
 270 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 271 // there is no existing one for k. The subgraph_info records the relocated
 272 // Klass* of the original k.
 273 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 274   assert(DumpSharedSpaces, "dump time only");
 275   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 276   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 277   if (info == NULL) {
 278     _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
 279     info = _dump_time_subgraph_info_table->get(relocated_k);
 280     ++ _dump_time_subgraph_info_table->_count;
 281   }
 282   return info;
 283 }
 284 
 285 // Add an entry field to the current KlassSubGraphInfo.
 286 void KlassSubGraphInfo::add_subgraph_entry_field(
 287       int static_field_offset, oop v, bool is_closed_archive) {
 288   assert(DumpSharedSpaces, "dump time only");
 289   if (_subgraph_entry_fields == NULL) {
 290     _subgraph_entry_fields =
 291       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
 292   }
 293   _subgraph_entry_fields->append((juint)static_field_offset);
 294   _subgraph_entry_fields->append(CompressedOops::encode(v));
 295   _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
 296 }
 297 
 298 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 299 // Only objects of boot classes can be included in sub-graph.
 300 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 301   assert(DumpSharedSpaces, "dump time only");
 302   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 303          "must be the relocated Klass in the shared space");
 304 
 305   if (_subgraph_object_klasses == NULL) {
 306     _subgraph_object_klasses =
 307       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
 308   }
 309 
 310   assert(relocated_k->is_shared(), "must be a shared class");
 311 
 312   if (_k == relocated_k) {
 313     // Don't add the Klass containing the sub-graph to it's own klass
 314     // initialization list.
 315     return;
 316   }
 317 
 318   if (relocated_k->is_instance_klass()) {
 319     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 320           "must be boot class");
 321     // SystemDictionary::xxx_klass() are not updated, need to check
 322     // the original Klass*
 323     if (orig_k == SystemDictionary::String_klass() ||
 324         orig_k == SystemDictionary::Object_klass()) {
 325       // Initialized early during VM initialization. No need to be added
 326       // to the sub-graph object class list.
 327       return;
 328     }
 329   } else if (relocated_k->is_objArray_klass()) {
 330     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 331     if (abk->is_instance_klass()) {
 332       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 333             "must be boot class");
 334     }
 335     if (relocated_k == Universe::objectArrayKlassObj()) {
 336       // Initialized early during Universe::genesis. No need to be added
 337       // to the list.
 338       return;
 339     }
 340   } else {
 341     assert(relocated_k->is_typeArray_klass(), "must be");
 342     // Primitive type arrays are created early during Universe::genesis.
 343     return;
 344   }
 345 
 346   if (log_is_enabled(Debug, cds, heap)) {
 347     if (!_subgraph_object_klasses->contains(relocated_k)) {
 348       ResourceMark rm;
 349       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 350     }
 351   }
 352 
 353   _subgraph_object_klasses->append_if_missing(relocated_k);
 354 }
 355 
 356 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 357 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 358   _k = info->klass();
 359   _entry_field_records = NULL;
 360   _subgraph_object_klasses = NULL;
 361 
 362   // populate the entry fields
 363   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 364   if (entry_fields != NULL) {
 365     int num_entry_fields = entry_fields->length();
 366     assert(num_entry_fields % 3 == 0, "sanity");
 367     _entry_field_records =
 368       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 369     for (int i = 0 ; i < num_entry_fields; i++) {
 370       _entry_field_records->at_put(i, entry_fields->at(i));
 371     }
 372   }
 373 
 374   // the Klasses of the objects in the sub-graphs
 375   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 376   if (subgraph_object_klasses != NULL) {
 377     int num_subgraphs_klasses = subgraph_object_klasses->length();
 378     _subgraph_object_klasses =
 379       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 380     for (int i = 0; i < num_subgraphs_klasses; i++) {
 381       Klass* subgraph_k = subgraph_object_klasses->at(i);
 382       if (log_is_enabled(Info, cds, heap)) {
 383         ResourceMark rm;
 384         log_info(cds, heap)(
 385           "Archived object klass %s (%2d) => %s",
 386           _k->external_name(), i, subgraph_k->external_name());
 387       }
 388       _subgraph_object_klasses->at_put(i, subgraph_k);
 389       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 390     }
 391   }
 392 
 393   ArchivePtrMarker::mark_pointer(&_k);
 394   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 395   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 396 }
 397 
 398 struct CopyKlassSubGraphInfoToArchive : StackObj {
 399   CompactHashtableWriter* _writer;
 400   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 401 
 402   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 403     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 404       ArchivedKlassSubGraphInfoRecord* record =
 405         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 406       record->init(&info);
 407 
 408       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass);
 409       u4 delta = MetaspaceShared::object_delta_u4(record);
 410       _writer->add(hash, delta);
 411     }
 412     return true; // keep on iterating
 413   }
 414 };
 415 
 416 // Build the records of archived subgraph infos, which include:
 417 // - Entry points to all subgraphs from the containing class mirror. The entry
 418 //   points are static fields in the mirror. For each entry point, the field
 419 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 420 //   info. The value is stored back to the corresponding field at runtime.
 421 // - A list of klasses that need to be loaded/initialized before archived
 422 //   java object sub-graph can be accessed at runtime.
 423 void HeapShared::write_subgraph_info_table() {
 424   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 425   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 426   CompactHashtableStats stats;
 427 
 428   _run_time_subgraph_info_table.reset();
 429 
 430   CompactHashtableWriter writer(d_table->_count, &stats);
 431   CopyKlassSubGraphInfoToArchive copy(&writer);
 432   d_table->iterate(&copy);
 433 
 434   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 435 }
 436 
 437 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 438   _run_time_subgraph_info_table.serialize_header(soc);
 439 }
 440 
 441 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 442   if (!open_archive_heap_region_mapped() || !MetaspaceObj::is_shared(k)) {
 443     return; // nothing to do
 444   }
 445   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 446 
 447   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
 448   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 449 
 450   // Initialize from archived data. Currently this is done only
 451   // during VM initialization time. No lock is needed.
 452   if (record != NULL) {
 453     Thread* THREAD = Thread::current();
 454 
 455     int i;
 456     // Load/link/initialize the klasses of the objects in the subgraph.
 457     // NULL class loader is used.
 458     Array<Klass*>* klasses = record->subgraph_object_klasses();
 459     if (klasses != NULL) {
 460       for (i = 0; i < klasses->length(); i++) {
 461         Klass* obj_k = klasses->at(i);
 462         Klass* resolved_k = SystemDictionary::resolve_or_null(
 463                                               (obj_k)->name(), THREAD);
 464         if (resolved_k != obj_k) {
 465           assert(!SystemDictionary::is_well_known_klass(resolved_k),
 466                  "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
 467           ResourceMark rm(THREAD);
 468           log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
 469                               resolved_k->external_name());
 470           return;
 471         }
 472         if ((obj_k)->is_instance_klass()) {
 473           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 474           ik->initialize(THREAD);
 475         } else if ((obj_k)->is_objArray_klass()) {
 476           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 477           oak->initialize(THREAD);
 478         }
 479       }
 480     }
 481 
 482     if (HAS_PENDING_EXCEPTION) {
 483       CLEAR_PENDING_EXCEPTION;
 484       // None of the field value will be set if there was an exception.
 485       // The java code will not see any of the archived objects in the
 486       // subgraphs referenced from k in this case.
 487       return;
 488     }
 489 
 490     // Load the subgraph entry fields from the record and store them back to
 491     // the corresponding fields within the mirror.
 492     oop m = k->java_mirror();
 493     Array<juint>* entry_field_records = record->entry_field_records();
 494     if (entry_field_records != NULL) {
 495       int efr_len = entry_field_records->length();
 496       assert(efr_len % 3 == 0, "sanity");
 497       for (i = 0; i < efr_len;) {
 498         int field_offset = entry_field_records->at(i);
 499         narrowOop nv = entry_field_records->at(i+1);
 500         int is_closed_archive = entry_field_records->at(i+2);
 501         oop v;
 502         if (is_closed_archive == 0) {
 503           // It's an archived object in the open archive heap regions, not shared.
 504           // The object refereced by the field becomes 'known' by GC from this
 505           // point. All objects in the subgraph reachable from the object are
 506           // also 'known' by GC.
 507           v = materialize_archived_object(nv);
 508         } else {
 509           // Shared object in the closed archive heap regions. Decode directly.
 510           assert(!CompressedOops::is_null(nv), "shared object is null");
 511           v = HeapShared::decode_from_archive(nv);
 512         }
 513         m->obj_field_put(field_offset, v);
 514         i += 3;
 515 
 516         log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 517       }
 518 
 519       // Done. Java code can see the archived sub-graphs referenced from k's
 520       // mirror after this point.
 521       if (log_is_enabled(Info, cds, heap)) {
 522         ResourceMark rm;
 523         log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
 524                             k->external_name(), p2i(k));
 525       }
 526     }
 527   }
 528 }
 529 
 530 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 531   int _level;
 532   bool _is_closed_archive;
 533   bool _record_klasses_only;
 534   KlassSubGraphInfo* _subgraph_info;
 535   oop _orig_referencing_obj;
 536   oop _archived_referencing_obj;
 537   Thread* _thread;
 538  public:
 539   WalkOopAndArchiveClosure(int level,
 540                            bool is_closed_archive,
 541                            bool record_klasses_only,
 542                            KlassSubGraphInfo* subgraph_info,
 543                            oop orig, oop archived, TRAPS) :
 544     _level(level), _is_closed_archive(is_closed_archive),
 545     _record_klasses_only(record_klasses_only),
 546     _subgraph_info(subgraph_info),
 547     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 548     _thread(THREAD) {}
 549   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 550   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 551 
 552  protected:
 553   template <class T> void do_oop_work(T *p) {
 554     oop obj = RawAccess<>::oop_load(p);
 555     if (!CompressedOops::is_null(obj)) {
 556       assert(!HeapShared::is_archived_object(obj),
 557              "original objects must not point to archived objects");
 558 
 559       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 560       T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
 561       Thread* THREAD = _thread;
 562 
 563       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 564         ResourceMark rm;
 565         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 566                              _orig_referencing_obj->klass()->external_name(), field_delta,
 567                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 568         LogTarget(Trace, cds, heap) log;
 569         LogStream out(log);
 570         obj->print_on(&out);
 571       }
 572 
 573       oop archived = HeapShared::archive_reachable_objects_from(
 574           _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD);
 575       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 576       assert(HeapShared::is_archived_object(archived), "must be");
 577 
 578       if (!_record_klasses_only) {
 579         // Update the reference in the archived copy of the referencing object.
 580         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 581                              _level, p2i(new_p), p2i(obj), p2i(archived));
 582         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 583       }
 584     }
 585   }
 586 };
 587 
 588 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k,
 589                                                          Thread* THREAD) {
 590   // Check fields in the object
 591   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 592     if (!fs.access_flags().is_static()) {
 593       BasicType ft = fs.field_descriptor().field_type();
 594       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
 595         ResourceMark rm(THREAD);
 596         log_warning(cds, heap)(
 597           "Please check reference field in %s instance in closed archive heap region: %s %s",
 598           k->external_name(), (fs.name())->as_C_string(),
 599           (fs.signature())->as_C_string());
 600       }
 601     }
 602   }
 603 }
 604 
 605 // (1) If orig_obj has not been archived yet, archive it.
 606 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 607 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 608 // (3) Record the klasses of all orig_obj and all reachable objects.
 609 oop HeapShared::archive_reachable_objects_from(int level,
 610                                                KlassSubGraphInfo* subgraph_info,
 611                                                oop orig_obj,
 612                                                bool is_closed_archive,
 613                                                TRAPS) {
 614   assert(orig_obj != NULL, "must be");
 615   assert(!is_archived_object(orig_obj), "sanity");
 616   DEBUG_ONLY({
 617       Klass* klass = orig_obj->klass();
 618       assert(klass != SystemDictionary::Module_klass() &&
 619              klass != SystemDictionary::ResolvedMethodName_klass() &&
 620              klass != SystemDictionary::MemberName_klass() &&
 621              klass != SystemDictionary::Context_klass() &&
 622              klass != SystemDictionary::ClassLoader_klass(), "we can only relocate metaspace object pointers inside java_lang_Class instances");
 623     });
 624   // java.lang.Class instances cannot be included in an archived
 625   // object sub-graph.
 626   if (java_lang_Class::is_instance(orig_obj)) {
 627     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 628     vm_exit(1);
 629   }
 630 
 631   oop archived_obj = find_archived_heap_object(orig_obj);
 632   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 633     // To save time, don't walk strings that are already archived. They just contain
 634     // pointers to a type array, whose klass doesn't need to be recorded.
 635     return archived_obj;
 636   }
 637 
 638   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 639     // orig_obj has already been archived and traced. Nothing more to do.
 640     return archived_obj;
 641   } else {
 642     set_has_been_seen_during_subgraph_recording(orig_obj);
 643   }
 644 
 645   bool record_klasses_only = (archived_obj != NULL);
 646   if (archived_obj == NULL) {
 647     ++_num_new_archived_objs;
 648     archived_obj = archive_heap_object(orig_obj, THREAD);
 649     if (archived_obj == NULL) {
 650       // Skip archiving the sub-graph referenced from the current entry field.
 651       ResourceMark rm;
 652       log_error(cds, heap)(
 653         "Cannot archive the sub-graph referenced from %s object ("
 654         PTR_FORMAT ") size %d, skipped.",
 655         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 656       if (level == 1) {
 657         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 658         // as the Java code will take care of initializing this field dynamically.
 659         return NULL;
 660       } else {
 661         // We don't know how to handle an object that has been archived, but some of its reachable
 662         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 663         // we have a real use case.
 664         vm_exit(1);
 665       }
 666     }
 667   }
 668 
 669   assert(archived_obj != NULL, "must be");
 670   Klass *orig_k = orig_obj->klass();
 671   Klass *relocated_k = archived_obj->klass();
 672   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 673 
 674   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
 675                                   subgraph_info, orig_obj, archived_obj, THREAD);
 676   orig_obj->oop_iterate(&walker);
 677   if (is_closed_archive && orig_k->is_instance_klass()) {
 678     check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD);
 679   }
 680   return archived_obj;
 681 }
 682 
 683 //
 684 // Start from the given static field in a java mirror and archive the
 685 // complete sub-graph of java heap objects that are reached directly
 686 // or indirectly from the starting object by following references.
 687 // Sub-graph archiving restrictions (current):
 688 //
 689 // - All classes of objects in the archived sub-graph (including the
 690 //   entry class) must be boot class only.
 691 // - No java.lang.Class instance (java mirror) can be included inside
 692 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 693 //
 694 // The Java heap object sub-graph archiving process (see
 695 // WalkOopAndArchiveClosure):
 696 //
 697 // 1) Java object sub-graph archiving starts from a given static field
 698 // within a Class instance (java mirror). If the static field is a
 699 // refererence field and points to a non-null java object, proceed to
 700 // the next step.
 701 //
 702 // 2) Archives the referenced java object. If an archived copy of the
 703 // current object already exists, updates the pointer in the archived
 704 // copy of the referencing object to point to the current archived object.
 705 // Otherwise, proceed to the next step.
 706 //
 707 // 3) Follows all references within the current java object and recursively
 708 // archive the sub-graph of objects starting from each reference.
 709 //
 710 // 4) Updates the pointer in the archived copy of referencing object to
 711 // point to the current archived object.
 712 //
 713 // 5) The Klass of the current java object is added to the list of Klasses
 714 // for loading and initialzing before any object in the archived graph can
 715 // be accessed at runtime.
 716 //
 717 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 718                                                              const char* klass_name,
 719                                                              int field_offset,
 720                                                              const char* field_name,
 721                                                              bool is_closed_archive,
 722                                                              TRAPS) {
 723   assert(DumpSharedSpaces, "dump time only");
 724   assert(k->is_shared_boot_class(), "must be boot class");
 725 
 726   oop m = k->java_mirror();
 727 
 728   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 729   oop f = m->obj_field(field_offset);
 730 
 731   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 732 
 733   if (!CompressedOops::is_null(f)) {
 734     if (log_is_enabled(Trace, cds, heap)) {
 735       LogTarget(Trace, cds, heap) log;
 736       LogStream out(log);
 737       f->print_on(&out);
 738     }
 739 
 740     oop af = archive_reachable_objects_from(1, subgraph_info, f,
 741                                             is_closed_archive, CHECK);
 742 
 743     if (af == NULL) {
 744       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 745                            klass_name, field_name);
 746     } else {
 747       // Note: the field value is not preserved in the archived mirror.
 748       // Record the field as a new subGraph entry point. The recorded
 749       // information is restored from the archive at runtime.
 750       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
 751       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 752     }
 753   } else {
 754     // The field contains null, we still need to record the entry point,
 755     // so it can be restored at runtime.
 756     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
 757   }
 758 }
 759 
 760 #ifndef PRODUCT
 761 class VerifySharedOopClosure: public BasicOopIterateClosure {
 762  private:
 763   bool _is_archived;
 764 
 765  public:
 766   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 767 
 768   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 769   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 770 
 771  protected:
 772   template <class T> void do_oop_work(T *p) {
 773     oop obj = RawAccess<>::oop_load(p);
 774     if (!CompressedOops::is_null(obj)) {
 775       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 776     }
 777   }
 778 };
 779 
 780 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 781   assert(DumpSharedSpaces, "dump time only");
 782   assert(k->is_shared_boot_class(), "must be boot class");
 783 
 784   oop m = k->java_mirror();
 785   oop f = m->obj_field(field_offset);
 786   if (!CompressedOops::is_null(f)) {
 787     verify_subgraph_from(f);
 788   }
 789 }
 790 
 791 void HeapShared::verify_subgraph_from(oop orig_obj) {
 792   oop archived_obj = find_archived_heap_object(orig_obj);
 793   if (archived_obj == NULL) {
 794     // It's OK for the root of a subgraph to be not archived. See comments in
 795     // archive_reachable_objects_from().
 796     return;
 797   }
 798 
 799   // Verify that all objects reachable from orig_obj are archived.
 800   init_seen_objects_table();
 801   verify_reachable_objects_from(orig_obj, false);
 802   delete_seen_objects_table();
 803 
 804   // Note: we could also verify that all objects reachable from the archived
 805   // copy of orig_obj can only point to archived objects, with:
 806   //      init_seen_objects_table();
 807   //      verify_reachable_objects_from(archived_obj, true);
 808   //      init_seen_objects_table();
 809   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 810   // won't do it here.
 811 }
 812 
 813 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 814   _num_total_verifications ++;
 815   if (!has_been_seen_during_subgraph_recording(obj)) {
 816     set_has_been_seen_during_subgraph_recording(obj);
 817 
 818     if (is_archived) {
 819       assert(is_archived_object(obj), "must be");
 820       assert(find_archived_heap_object(obj) == NULL, "must be");
 821     } else {
 822       assert(!is_archived_object(obj), "must be");
 823       assert(find_archived_heap_object(obj) != NULL, "must be");
 824     }
 825 
 826     VerifySharedOopClosure walker(is_archived);
 827     obj->oop_iterate(&walker);
 828   }
 829 }
 830 #endif
 831 
 832 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 833 int HeapShared::_num_new_walked_objs;
 834 int HeapShared::_num_new_archived_objs;
 835 int HeapShared::_num_old_recorded_klasses;
 836 
 837 int HeapShared::_num_total_subgraph_recordings = 0;
 838 int HeapShared::_num_total_walked_objs = 0;
 839 int HeapShared::_num_total_archived_objs = 0;
 840 int HeapShared::_num_total_recorded_klasses = 0;
 841 int HeapShared::_num_total_verifications = 0;
 842 
 843 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 844   return _seen_objects_table->get(obj) != NULL;
 845 }
 846 
 847 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 848   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 849   _seen_objects_table->put(obj, true);
 850   ++ _num_new_walked_objs;
 851 }
 852 
 853 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 854   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 855   init_seen_objects_table();
 856   _num_new_walked_objs = 0;
 857   _num_new_archived_objs = 0;
 858   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 859 }
 860 
 861 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 862   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 863     _num_old_recorded_klasses;
 864   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 865                       "walked %d objs, archived %d new objs, recorded %d classes",
 866                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 867                       num_new_recorded_klasses);
 868 
 869   delete_seen_objects_table();
 870 
 871   _num_total_subgraph_recordings ++;
 872   _num_total_walked_objs      += _num_new_walked_objs;
 873   _num_total_archived_objs    += _num_new_archived_objs;
 874   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 875 }
 876 
 877 class ArchivableStaticFieldFinder: public FieldClosure {
 878   InstanceKlass* _ik;
 879   Symbol* _field_name;
 880   bool _found;
 881   int _offset;
 882 public:
 883   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 884     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 885 
 886   virtual void do_field(fieldDescriptor* fd) {
 887     if (fd->name() == _field_name) {
 888       assert(!_found, "fields cannot be overloaded");
 889       assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
 890       _found = true;
 891       _offset = fd->offset();
 892     }
 893   }
 894   bool found()     { return _found;  }
 895   int offset()     { return _offset; }
 896 };
 897 
 898 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
 899                                             int num, Thread* THREAD) {
 900   for (int i = 0; i < num; i++) {
 901     ArchivableStaticFieldInfo* info = &fields[i];
 902     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
 903     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
 904 
 905     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
 906     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
 907     InstanceKlass* ik = InstanceKlass::cast(k);
 908     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
 909            "Only support boot classes");
 910     ik->initialize(THREAD);
 911     guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize");
 912 
 913     ArchivableStaticFieldFinder finder(ik, field_name);
 914     ik->do_local_static_fields(&finder);
 915     assert(finder.found(), "field must exist");
 916 
 917     info->klass = ik;
 918     info->offset = finder.offset();
 919   }
 920 }
 921 
 922 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
 923   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
 924 
 925   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
 926                              num_closed_archive_subgraph_entry_fields,
 927                              THREAD);
 928   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
 929                              num_open_archive_subgraph_entry_fields,
 930                              THREAD);
 931 }
 932 
 933 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
 934                                           int num, bool is_closed_archive,
 935                                           Thread* THREAD) {
 936   _num_total_subgraph_recordings = 0;
 937   _num_total_walked_objs = 0;
 938   _num_total_archived_objs = 0;
 939   _num_total_recorded_klasses = 0;
 940   _num_total_verifications = 0;
 941 
 942   // For each class X that has one or more archived fields:
 943   // [1] Dump the subgraph of each archived field
 944   // [2] Create a list of all the class of the objects that can be reached
 945   //     by any of these static fields.
 946   //     At runtime, these classes are initialized before X's archived fields
 947   //     are restored by HeapShared::initialize_from_archived_subgraph().
 948   int i;
 949   for (i = 0; i < num; ) {
 950     ArchivableStaticFieldInfo* info = &fields[i];
 951     const char* klass_name = info->klass_name;
 952     start_recording_subgraph(info->klass, klass_name);
 953 
 954     // If you have specified consecutive fields of the same klass in
 955     // fields[], these will be archived in the same
 956     // {start_recording_subgraph ... done_recording_subgraph} pass to
 957     // save time.
 958     for (; i < num; i++) {
 959       ArchivableStaticFieldInfo* f = &fields[i];
 960       if (f->klass_name != klass_name) {
 961         break;
 962       }
 963       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
 964                                                   f->offset, f->field_name,
 965                                                   is_closed_archive, CHECK);
 966     }
 967     done_recording_subgraph(info->klass, klass_name);
 968   }
 969 
 970   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
 971                       is_closed_archive ? "closed" : "open",
 972                       _num_total_subgraph_recordings);
 973   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
 974   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
 975   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
 976 
 977 #ifndef PRODUCT
 978   for (int i = 0; i < num; i++) {
 979     ArchivableStaticFieldInfo* f = &fields[i];
 980     verify_subgraph_from_static_field(f->klass, f->offset);
 981   }
 982   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
 983 #endif
 984 }
 985 
 986 // At dump-time, find the location of all the non-null oop pointers in an archived heap
 987 // region. This way we can quickly relocate all the pointers without using
 988 // BasicOopIterateClosure at runtime.
 989 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
 990   narrowOop* _start;
 991   BitMap *_oopmap;
 992   int _num_total_oops;
 993   int _num_null_oops;
 994  public:
 995   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
 996     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
 997 
 998   virtual bool should_verify_oops(void) {
 999     return false;
1000   }
1001   virtual void do_oop(narrowOop* p) {
1002     _num_total_oops ++;
1003     narrowOop v = *p;
1004     if (!CompressedOops::is_null(v)) {
1005       size_t idx = p - _start;
1006       _oopmap->set_bit(idx);
1007     } else {
1008       _num_null_oops ++;
1009     }
1010   }
1011   virtual void do_oop(oop *p) {
1012     ShouldNotReachHere();
1013   }
1014   int num_total_oops() const { return _num_total_oops; }
1015   int num_null_oops()  const { return _num_null_oops; }
1016 };
1017 
1018 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1019   assert(UseCompressedOops, "must be");
1020   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1021   ResourceBitMap oopmap(num_bits);
1022 
1023   HeapWord* p   = region.start();
1024   HeapWord* end = region.end();
1025   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1026 
1027   int num_objs = 0;
1028   while (p < end) {
1029     oop o = (oop)p;
1030     o->oop_iterate(&finder);
1031     p += o->size();
1032     ++ num_objs;
1033   }
1034 
1035   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1036                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1037   return oopmap;
1038 }
1039 
1040 // Patch all the embedded oop pointers inside an archived heap region,
1041 // to be consistent with the runtime oop encoding.
1042 class PatchEmbeddedPointers: public BitMapClosure {
1043   narrowOop* _start;
1044 
1045  public:
1046   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1047 
1048   bool do_bit(size_t offset) {
1049     narrowOop* p = _start + offset;
1050     narrowOop v = *p;
1051     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1052     oop o = HeapShared::decode_from_archive(v);
1053     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1054     return true;
1055   }
1056 };
1057 
1058 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1059                                                        size_t oopmap_size_in_bits) {
1060   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1061 
1062 #ifndef PRODUCT
1063   ResourceMark rm;
1064   ResourceBitMap checkBm = calculate_oopmap(region);
1065   assert(bm.is_same(checkBm), "sanity");
1066 #endif
1067 
1068   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1069   bm.iterate(&patcher);
1070 }
1071 
1072 #endif // INCLUDE_CDS_JAVA_HEAP