1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logMessage.hpp"
  32 #include "logging/logStream.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/heapShared.inline.hpp"
  35 #include "memory/iterator.inline.hpp"
  36 #include "memory/metadataFactory.hpp"
  37 #include "memory/metaspaceClosure.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/compressedOops.inline.hpp"
  41 #include "oops/fieldStreams.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/fieldDescriptor.inline.hpp"
  44 #include "runtime/safepointVerifiers.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #if INCLUDE_G1GC
  47 #include "gc/g1/g1CollectedHeap.hpp"
  48 #endif
  49 
  50 #if INCLUDE_CDS_JAVA_HEAP
  51 
  52 bool HeapShared::_closed_archive_heap_region_mapped = false;
  53 bool HeapShared::_open_archive_heap_region_mapped = false;
  54 bool HeapShared::_archive_heap_region_fixed = false;
  55 
  56 address   HeapShared::_narrow_oop_base;
  57 int       HeapShared::_narrow_oop_shift;
  58 
  59 //
  60 // If you add new entries to the following tables, you should know what you're doing!
  61 //
  62 
  63 // Entry fields for shareable subgraphs archived in the closed archive heap
  64 // region. Warning: Objects in the subgraphs should not have reference fields
  65 // assigned at runtime.
  66 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  67   {"java/lang/Integer$IntegerCache",           "archivedCache"},
  68   {"java/lang/Long$LongCache",                 "archivedCache"},
  69   {"java/lang/Byte$ByteCache",                 "archivedCache"},
  70   {"java/lang/Short$ShortCache",               "archivedCache"},
  71   {"java/lang/Character$CharacterCache",       "archivedCache"},
  72   {"java/util/jar/Attributes$Name",            "KNOWN_NAMES"},
  73   {"sun/util/locale/BaseLocale",               "constantBaseLocales"},
  74 };
  75 // Entry fields for subgraphs archived in the open archive heap region.
  76 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
  77   {"jdk/internal/module/ArchivedModuleGraph",  "archivedModuleGraph"},
  78   {"java/util/ImmutableCollections$ListN",     "EMPTY_LIST"},
  79   {"java/util/ImmutableCollections$MapN",      "EMPTY_MAP"},
  80   {"java/util/ImmutableCollections$SetN",      "EMPTY_SET"},
  81   {"java/lang/module/Configuration",           "EMPTY_CONFIGURATION"},
  82   {"jdk/internal/math/FDBigInteger",           "archivedCaches"},
  83 };
  84 
  85 const static int num_closed_archive_subgraph_entry_fields =
  86   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  87 const static int num_open_archive_subgraph_entry_fields =
  88   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  89 
  90 ////////////////////////////////////////////////////////////////
  91 //
  92 // Java heap object archiving support
  93 //
  94 ////////////////////////////////////////////////////////////////
  95 void HeapShared::fixup_mapped_heap_regions() {
  96   FileMapInfo *mapinfo = FileMapInfo::current_info();
  97   mapinfo->fixup_mapped_heap_regions();
  98   set_archive_heap_region_fixed();
  99 }
 100 
 101 unsigned HeapShared::oop_hash(oop const& p) {
 102   assert(!p->mark()->has_bias_pattern(),
 103          "this object should never have been locked");  // so identity_hash won't safepoin
 104   unsigned hash = (unsigned)p->identity_hash();
 105   return hash;
 106 }
 107 
 108 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 109 oop HeapShared::find_archived_heap_object(oop obj) {
 110   assert(DumpSharedSpaces, "dump-time only");
 111   ArchivedObjectCache* cache = archived_object_cache();
 112   oop* p = cache->get(obj);
 113   if (p != NULL) {
 114     return *p;
 115   } else {
 116     return NULL;
 117   }
 118 }
 119 
 120 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
 121   assert(DumpSharedSpaces, "dump-time only");
 122 
 123   oop ao = find_archived_heap_object(obj);
 124   if (ao != NULL) {
 125     // already archived
 126     return ao;
 127   }
 128 
 129   int len = obj->size();
 130   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 131     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 132                          p2i(obj), (size_t)obj->size());
 133     return NULL;
 134   }
 135 
 136   // Pre-compute object identity hash at CDS dump time.
 137   obj->identity_hash();
 138 
 139   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
 140   if (archived_oop != NULL) {
 141     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
 142     MetaspaceShared::relocate_klass_ptr(archived_oop);
 143     ArchivedObjectCache* cache = archived_object_cache();
 144     cache->put(obj, archived_oop);
 145     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
 146                          p2i(obj), p2i(archived_oop));
 147   } else {
 148     log_error(cds, heap)(
 149       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 150       p2i(obj));
 151     vm_exit(1);
 152   }
 153   return archived_oop;
 154 }
 155 
 156 oop HeapShared::materialize_archived_object(narrowOop v) {
 157   assert(archive_heap_region_fixed(),
 158          "must be called after archive heap regions are fixed");
 159   if (!CompressedOops::is_null(v)) {
 160     oop obj = HeapShared::decode_from_archive(v);
 161     return G1CollectedHeap::heap()->materialize_archived_object(obj);
 162   }
 163   return NULL;
 164 }
 165 
 166 void HeapShared::archive_klass_objects(Thread* THREAD) {
 167   GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
 168   assert(klasses != NULL, "sanity");
 169   for (int i = 0; i < klasses->length(); i++) {
 170     Klass* k = klasses->at(i);
 171 
 172     // archive mirror object
 173     java_lang_Class::archive_mirror(k, CHECK);
 174 
 175     // archive the resolved_referenes array
 176     if (k->is_instance_klass()) {
 177       InstanceKlass* ik = InstanceKlass::cast(k);
 178       ik->constants()->archive_resolved_references(THREAD);
 179     }
 180   }
 181 }
 182 
 183 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
 184                                            GrowableArray<MemRegion> *open) {
 185   if (!is_heap_object_archiving_allowed()) {
 186     if (log_is_enabled(Info, cds)) {
 187       log_info(cds)(
 188         "Archived java heap is not supported as UseG1GC, "
 189         "UseCompressedOops and UseCompressedClassPointers are required."
 190         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
 191         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
 192         BOOL_TO_STR(UseCompressedClassPointers));
 193     }
 194     return;
 195   }
 196 
 197   G1HeapVerifier::verify_ready_for_archiving();
 198 
 199   {
 200     NoSafepointVerifier nsv;
 201 
 202     // Cache for recording where the archived objects are copied to
 203     create_archived_object_cache();
 204 
 205     tty->print_cr("Dumping objects to closed archive heap region ...");
 206     NOT_PRODUCT(StringTable::verify());
 207     copy_closed_archive_heap_objects(closed);
 208 
 209     tty->print_cr("Dumping objects to open archive heap region ...");
 210     copy_open_archive_heap_objects(open);
 211 
 212     destroy_archived_object_cache();
 213   }
 214 
 215   G1HeapVerifier::verify_archive_regions();
 216 }
 217 
 218 void HeapShared::copy_closed_archive_heap_objects(
 219                                     GrowableArray<MemRegion> * closed_archive) {
 220   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 221 
 222   Thread* THREAD = Thread::current();
 223   G1CollectedHeap::heap()->begin_archive_alloc_range();
 224 
 225   // Archive interned string objects
 226   StringTable::write_to_archive();
 227 
 228   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 229                            num_closed_archive_subgraph_entry_fields,
 230                            true /* is_closed_archive */, THREAD);
 231 
 232   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
 233                                                    os::vm_allocation_granularity());
 234 }
 235 
 236 void HeapShared::copy_open_archive_heap_objects(
 237                                     GrowableArray<MemRegion> * open_archive) {
 238   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 239 
 240   Thread* THREAD = Thread::current();
 241   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 242 
 243   java_lang_Class::archive_basic_type_mirrors(THREAD);
 244 
 245   archive_klass_objects(THREAD);
 246 
 247   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 248                            num_open_archive_subgraph_entry_fields,
 249                            false /* is_closed_archive */,
 250                            THREAD);
 251 
 252   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
 253                                                    os::vm_allocation_granularity());
 254 }
 255 
 256 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 257   _narrow_oop_base = base;
 258   _narrow_oop_shift = shift;
 259 }
 260 
 261 //
 262 // Subgraph archiving support
 263 //
 264 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 265 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 266 
 267 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 268 // there is no existing one for k. The subgraph_info records the relocated
 269 // Klass* of the original k.
 270 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 271   assert(DumpSharedSpaces, "dump time only");
 272   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 273   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 274   if (info == NULL) {
 275     _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
 276     info = _dump_time_subgraph_info_table->get(relocated_k);
 277     ++ _dump_time_subgraph_info_table->_count;
 278   }
 279   return info;
 280 }
 281 
 282 // Add an entry field to the current KlassSubGraphInfo.
 283 void KlassSubGraphInfo::add_subgraph_entry_field(
 284       int static_field_offset, oop v, bool is_closed_archive) {
 285   assert(DumpSharedSpaces, "dump time only");
 286   if (_subgraph_entry_fields == NULL) {
 287     _subgraph_entry_fields =
 288       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
 289   }
 290   _subgraph_entry_fields->append((juint)static_field_offset);
 291   _subgraph_entry_fields->append(CompressedOops::encode(v));
 292   _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
 293 }
 294 
 295 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 296 // Only objects of boot classes can be included in sub-graph.
 297 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 298   assert(DumpSharedSpaces, "dump time only");
 299   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 300          "must be the relocated Klass in the shared space");
 301 
 302   if (_subgraph_object_klasses == NULL) {
 303     _subgraph_object_klasses =
 304       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
 305   }
 306 
 307   assert(relocated_k->is_shared(), "must be a shared class");
 308 
 309   if (_k == relocated_k) {
 310     // Don't add the Klass containing the sub-graph to it's own klass
 311     // initialization list.
 312     return;
 313   }
 314 
 315   if (relocated_k->is_instance_klass()) {
 316     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 317           "must be boot class");
 318     // SystemDictionary::xxx_klass() are not updated, need to check
 319     // the original Klass*
 320     if (orig_k == SystemDictionary::String_klass() ||
 321         orig_k == SystemDictionary::Object_klass()) {
 322       // Initialized early during VM initialization. No need to be added
 323       // to the sub-graph object class list.
 324       return;
 325     }
 326   } else if (relocated_k->is_objArray_klass()) {
 327     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 328     if (abk->is_instance_klass()) {
 329       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 330             "must be boot class");
 331     }
 332     if (relocated_k == Universe::objectArrayKlassObj()) {
 333       // Initialized early during Universe::genesis. No need to be added
 334       // to the list.
 335       return;
 336     }
 337   } else {
 338     assert(relocated_k->is_typeArray_klass(), "must be");
 339     // Primitive type arrays are created early during Universe::genesis.
 340     return;
 341   }
 342 
 343   if (log_is_enabled(Debug, cds, heap)) {
 344     if (!_subgraph_object_klasses->contains(relocated_k)) {
 345       ResourceMark rm;
 346       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 347     }
 348   }
 349 
 350   _subgraph_object_klasses->append_if_missing(relocated_k);
 351 }
 352 
 353 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 354 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 355   _k = info->klass();
 356   _entry_field_records = NULL;
 357   _subgraph_object_klasses = NULL;
 358 
 359   // populate the entry fields
 360   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 361   if (entry_fields != NULL) {
 362     int num_entry_fields = entry_fields->length();
 363     assert(num_entry_fields % 3 == 0, "sanity");
 364     _entry_field_records =
 365       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 366     for (int i = 0 ; i < num_entry_fields; i++) {
 367       _entry_field_records->at_put(i, entry_fields->at(i));
 368     }
 369   }
 370 
 371   // the Klasses of the objects in the sub-graphs
 372   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 373   if (subgraph_object_klasses != NULL) {
 374     int num_subgraphs_klasses = subgraph_object_klasses->length();
 375     _subgraph_object_klasses =
 376       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 377     for (int i = 0; i < num_subgraphs_klasses; i++) {
 378       Klass* subgraph_k = subgraph_object_klasses->at(i);
 379       if (log_is_enabled(Info, cds, heap)) {
 380         ResourceMark rm;
 381         log_info(cds, heap)(
 382           "Archived object klass %s (%2d) => %s",
 383           _k->external_name(), i, subgraph_k->external_name());
 384       }
 385       _subgraph_object_klasses->at_put(i, subgraph_k);
 386     }
 387   }
 388 }
 389 
 390 struct CopyKlassSubGraphInfoToArchive : StackObj {
 391   CompactHashtableWriter* _writer;
 392   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 393 
 394   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 395     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 396       ArchivedKlassSubGraphInfoRecord* record =
 397         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 398       record->init(&info);
 399 
 400       unsigned int hash = primitive_hash<Klass*>(klass);
 401       u4 delta = MetaspaceShared::object_delta_u4(record);
 402       _writer->add(hash, delta);
 403     }
 404     return true; // keep on iterating
 405   }
 406 };
 407 
 408 // Build the records of archived subgraph infos, which include:
 409 // - Entry points to all subgraphs from the containing class mirror. The entry
 410 //   points are static fields in the mirror. For each entry point, the field
 411 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 412 //   info. The value is stored back to the corresponding field at runtime.
 413 // - A list of klasses that need to be loaded/initialized before archived
 414 //   java object sub-graph can be accessed at runtime.
 415 void HeapShared::write_subgraph_info_table() {
 416   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 417   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 418   CompactHashtableStats stats;
 419 
 420   _run_time_subgraph_info_table.reset();
 421 
 422   CompactHashtableWriter writer(d_table->_count, &stats);
 423   CopyKlassSubGraphInfoToArchive copy(&writer);
 424   d_table->iterate(&copy);
 425 
 426   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 427 }
 428 
 429 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 430   _run_time_subgraph_info_table.serialize_header(soc);
 431 }
 432 
 433 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 434   if (!open_archive_heap_region_mapped()) {
 435     return; // nothing to do
 436   }
 437   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 438 
 439   unsigned int hash = primitive_hash<Klass*>(k);
 440   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 441 
 442   // Initialize from archived data. Currently this is done only
 443   // during VM initialization time. No lock is needed.
 444   if (record != NULL) {
 445     Thread* THREAD = Thread::current();
 446 
 447     int i;
 448     // Load/link/initialize the klasses of the objects in the subgraph.
 449     // NULL class loader is used.
 450     Array<Klass*>* klasses = record->subgraph_object_klasses();
 451     if (klasses != NULL) {
 452       for (i = 0; i < klasses->length(); i++) {
 453         Klass* obj_k = klasses->at(i);
 454         Klass* resolved_k = SystemDictionary::resolve_or_null(
 455                                               (obj_k)->name(), THREAD);
 456         if (resolved_k != obj_k) {
 457           assert(!SystemDictionary::is_well_known_klass(resolved_k),
 458                  "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
 459           ResourceMark rm(THREAD);
 460           log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
 461                               resolved_k->external_name());
 462           return;
 463         }
 464         if ((obj_k)->is_instance_klass()) {
 465           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 466           ik->initialize(THREAD);
 467         } else if ((obj_k)->is_objArray_klass()) {
 468           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 469           oak->initialize(THREAD);
 470         }
 471       }
 472     }
 473 
 474     if (HAS_PENDING_EXCEPTION) {
 475       CLEAR_PENDING_EXCEPTION;
 476       // None of the field value will be set if there was an exception.
 477       // The java code will not see any of the archived objects in the
 478       // subgraphs referenced from k in this case.
 479       return;
 480     }
 481 
 482     // Load the subgraph entry fields from the record and store them back to
 483     // the corresponding fields within the mirror.
 484     oop m = k->java_mirror();
 485     Array<juint>* entry_field_records = record->entry_field_records();
 486     if (entry_field_records != NULL) {
 487       int efr_len = entry_field_records->length();
 488       assert(efr_len % 3 == 0, "sanity");
 489       for (i = 0; i < efr_len;) {
 490         int field_offset = entry_field_records->at(i);
 491         narrowOop nv = entry_field_records->at(i+1);
 492         int is_closed_archive = entry_field_records->at(i+2);
 493         oop v;
 494         if (is_closed_archive == 0) {
 495           // It's an archived object in the open archive heap regions, not shared.
 496           // The object refereced by the field becomes 'known' by GC from this
 497           // point. All objects in the subgraph reachable from the object are
 498           // also 'known' by GC.
 499           v = materialize_archived_object(nv);
 500         } else {
 501           // Shared object in the closed archive heap regions. Decode directly.
 502           assert(!CompressedOops::is_null(nv), "shared object is null");
 503           v = HeapShared::decode_from_archive(nv);
 504         }
 505         m->obj_field_put(field_offset, v);
 506         i += 3;
 507 
 508         log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 509       }
 510 
 511       // Done. Java code can see the archived sub-graphs referenced from k's
 512       // mirror after this point.
 513       if (log_is_enabled(Info, cds, heap)) {
 514         ResourceMark rm;
 515         log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
 516                             k->external_name(), p2i(k));
 517       }
 518     }
 519   }
 520 }
 521 
 522 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 523   int _level;
 524   bool _is_closed_archive;
 525   bool _record_klasses_only;
 526   KlassSubGraphInfo* _subgraph_info;
 527   oop _orig_referencing_obj;
 528   oop _archived_referencing_obj;
 529   Thread* _thread;
 530  public:
 531   WalkOopAndArchiveClosure(int level,
 532                            bool is_closed_archive,
 533                            bool record_klasses_only,
 534                            KlassSubGraphInfo* subgraph_info,
 535                            oop orig, oop archived, TRAPS) :
 536     _level(level), _is_closed_archive(is_closed_archive),
 537     _record_klasses_only(record_klasses_only),
 538     _subgraph_info(subgraph_info),
 539     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 540     _thread(THREAD) {}
 541   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 542   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 543 
 544  protected:
 545   template <class T> void do_oop_work(T *p) {
 546     oop obj = RawAccess<>::oop_load(p);
 547     if (!CompressedOops::is_null(obj)) {
 548       assert(!HeapShared::is_archived_object(obj),
 549              "original objects must not point to archived objects");
 550 
 551       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 552       T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
 553       Thread* THREAD = _thread;
 554 
 555       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 556         ResourceMark rm;
 557         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 558                              _orig_referencing_obj->klass()->external_name(), field_delta,
 559                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 560         LogTarget(Trace, cds, heap) log;
 561         LogStream out(log);
 562         obj->print_on(&out);
 563       }
 564 
 565       oop archived = HeapShared::archive_reachable_objects_from(
 566           _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD);
 567       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 568       assert(HeapShared::is_archived_object(archived), "must be");
 569 
 570       if (!_record_klasses_only) {
 571         // Update the reference in the archived copy of the referencing object.
 572         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 573                              _level, p2i(new_p), p2i(obj), p2i(archived));
 574         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 575       }
 576     }
 577   }
 578 };
 579 
 580 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k,
 581                                                          Thread* THREAD) {
 582   // Check fields in the object
 583   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 584     if (!fs.access_flags().is_static()) {
 585       BasicType ft = fs.field_descriptor().field_type();
 586       if (!fs.access_flags().is_final() && (ft == T_ARRAY || ft == T_OBJECT)) {
 587         ResourceMark rm(THREAD);
 588         log_warning(cds, heap)(
 589           "Please check reference field in %s instance in closed archive heap region: %s %s",
 590           k->external_name(), (fs.name())->as_C_string(),
 591           (fs.signature())->as_C_string());
 592       }
 593     }
 594   }
 595 }
 596 
 597 // (1) If orig_obj has not been archived yet, archive it.
 598 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 599 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 600 // (3) Record the klasses of all orig_obj and all reachable objects.
 601 oop HeapShared::archive_reachable_objects_from(int level,
 602                                                KlassSubGraphInfo* subgraph_info,
 603                                                oop orig_obj,
 604                                                bool is_closed_archive,
 605                                                TRAPS) {
 606   assert(orig_obj != NULL, "must be");
 607   assert(!is_archived_object(orig_obj), "sanity");
 608 
 609   // java.lang.Class instances cannot be included in an archived
 610   // object sub-graph.
 611   if (java_lang_Class::is_instance(orig_obj)) {
 612     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 613     vm_exit(1);
 614   }
 615 
 616   oop archived_obj = find_archived_heap_object(orig_obj);
 617   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 618     // To save time, don't walk strings that are already archived. They just contain
 619     // pointers to a type array, whose klass doesn't need to be recorded.
 620     return archived_obj;
 621   }
 622 
 623   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 624     // orig_obj has already been archived and traced. Nothing more to do.
 625     return archived_obj;
 626   } else {
 627     set_has_been_seen_during_subgraph_recording(orig_obj);
 628   }
 629 
 630   bool record_klasses_only = (archived_obj != NULL);
 631   if (archived_obj == NULL) {
 632     ++_num_new_archived_objs;
 633     archived_obj = archive_heap_object(orig_obj, THREAD);
 634     if (archived_obj == NULL) {
 635       // Skip archiving the sub-graph referenced from the current entry field.
 636       ResourceMark rm;
 637       log_error(cds, heap)(
 638         "Cannot archive the sub-graph referenced from %s object ("
 639         PTR_FORMAT ") size %d, skipped.",
 640         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 641       if (level == 1) {
 642         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 643         // as the Java code will take care of initializing this field dynamically.
 644         return NULL;
 645       } else {
 646         // We don't know how to handle an object that has been archived, but some of its reachable
 647         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 648         // we have a real use case.
 649         vm_exit(1);
 650       }
 651     }
 652   }
 653 
 654   assert(archived_obj != NULL, "must be");
 655   Klass *orig_k = orig_obj->klass();
 656   Klass *relocated_k = archived_obj->klass();
 657   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 658 
 659   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
 660                                   subgraph_info, orig_obj, archived_obj, THREAD);
 661   orig_obj->oop_iterate(&walker);
 662   if (is_closed_archive && orig_k->is_instance_klass()) {
 663     check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD);
 664   }
 665   return archived_obj;
 666 }
 667 
 668 //
 669 // Start from the given static field in a java mirror and archive the
 670 // complete sub-graph of java heap objects that are reached directly
 671 // or indirectly from the starting object by following references.
 672 // Sub-graph archiving restrictions (current):
 673 //
 674 // - All classes of objects in the archived sub-graph (including the
 675 //   entry class) must be boot class only.
 676 // - No java.lang.Class instance (java mirror) can be included inside
 677 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 678 //
 679 // The Java heap object sub-graph archiving process (see
 680 // WalkOopAndArchiveClosure):
 681 //
 682 // 1) Java object sub-graph archiving starts from a given static field
 683 // within a Class instance (java mirror). If the static field is a
 684 // refererence field and points to a non-null java object, proceed to
 685 // the next step.
 686 //
 687 // 2) Archives the referenced java object. If an archived copy of the
 688 // current object already exists, updates the pointer in the archived
 689 // copy of the referencing object to point to the current archived object.
 690 // Otherwise, proceed to the next step.
 691 //
 692 // 3) Follows all references within the current java object and recursively
 693 // archive the sub-graph of objects starting from each reference.
 694 //
 695 // 4) Updates the pointer in the archived copy of referencing object to
 696 // point to the current archived object.
 697 //
 698 // 5) The Klass of the current java object is added to the list of Klasses
 699 // for loading and initialzing before any object in the archived graph can
 700 // be accessed at runtime.
 701 //
 702 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 703                                                              const char* klass_name,
 704                                                              int field_offset,
 705                                                              const char* field_name,
 706                                                              bool is_closed_archive,
 707                                                              TRAPS) {
 708   assert(DumpSharedSpaces, "dump time only");
 709   assert(k->is_shared_boot_class(), "must be boot class");
 710 
 711   oop m = k->java_mirror();
 712 
 713   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 714   oop f = m->obj_field(field_offset);
 715 
 716   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 717 
 718   if (!CompressedOops::is_null(f)) {
 719     if (log_is_enabled(Trace, cds, heap)) {
 720       LogTarget(Trace, cds, heap) log;
 721       LogStream out(log);
 722       f->print_on(&out);
 723     }
 724 
 725     oop af = archive_reachable_objects_from(1, subgraph_info, f,
 726                                             is_closed_archive, CHECK);
 727 
 728     if (af == NULL) {
 729       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 730                            klass_name, field_name);
 731     } else {
 732       // Note: the field value is not preserved in the archived mirror.
 733       // Record the field as a new subGraph entry point. The recorded
 734       // information is restored from the archive at runtime.
 735       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
 736       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 737     }
 738   } else {
 739     // The field contains null, we still need to record the entry point,
 740     // so it can be restored at runtime.
 741     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
 742   }
 743 }
 744 
 745 #ifndef PRODUCT
 746 class VerifySharedOopClosure: public BasicOopIterateClosure {
 747  private:
 748   bool _is_archived;
 749 
 750  public:
 751   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 752 
 753   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 754   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 755 
 756  protected:
 757   template <class T> void do_oop_work(T *p) {
 758     oop obj = RawAccess<>::oop_load(p);
 759     if (!CompressedOops::is_null(obj)) {
 760       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 761     }
 762   }
 763 };
 764 
 765 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 766   assert(DumpSharedSpaces, "dump time only");
 767   assert(k->is_shared_boot_class(), "must be boot class");
 768 
 769   oop m = k->java_mirror();
 770   oop f = m->obj_field(field_offset);
 771   if (!CompressedOops::is_null(f)) {
 772     verify_subgraph_from(f);
 773   }
 774 }
 775 
 776 void HeapShared::verify_subgraph_from(oop orig_obj) {
 777   oop archived_obj = find_archived_heap_object(orig_obj);
 778   if (archived_obj == NULL) {
 779     // It's OK for the root of a subgraph to be not archived. See comments in
 780     // archive_reachable_objects_from().
 781     return;
 782   }
 783 
 784   // Verify that all objects reachable from orig_obj are archived.
 785   init_seen_objects_table();
 786   verify_reachable_objects_from(orig_obj, false);
 787   delete_seen_objects_table();
 788 
 789   // Note: we could also verify that all objects reachable from the archived
 790   // copy of orig_obj can only point to archived objects, with:
 791   //      init_seen_objects_table();
 792   //      verify_reachable_objects_from(archived_obj, true);
 793   //      init_seen_objects_table();
 794   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 795   // won't do it here.
 796 }
 797 
 798 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 799   _num_total_verifications ++;
 800   if (!has_been_seen_during_subgraph_recording(obj)) {
 801     set_has_been_seen_during_subgraph_recording(obj);
 802 
 803     if (is_archived) {
 804       assert(is_archived_object(obj), "must be");
 805       assert(find_archived_heap_object(obj) == NULL, "must be");
 806     } else {
 807       assert(!is_archived_object(obj), "must be");
 808       assert(find_archived_heap_object(obj) != NULL, "must be");
 809     }
 810 
 811     VerifySharedOopClosure walker(is_archived);
 812     obj->oop_iterate(&walker);
 813   }
 814 }
 815 #endif
 816 
 817 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 818 int HeapShared::_num_new_walked_objs;
 819 int HeapShared::_num_new_archived_objs;
 820 int HeapShared::_num_old_recorded_klasses;
 821 
 822 int HeapShared::_num_total_subgraph_recordings = 0;
 823 int HeapShared::_num_total_walked_objs = 0;
 824 int HeapShared::_num_total_archived_objs = 0;
 825 int HeapShared::_num_total_recorded_klasses = 0;
 826 int HeapShared::_num_total_verifications = 0;
 827 
 828 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 829   return _seen_objects_table->get(obj) != NULL;
 830 }
 831 
 832 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 833   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 834   _seen_objects_table->put(obj, true);
 835   ++ _num_new_walked_objs;
 836 }
 837 
 838 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 839   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 840   init_seen_objects_table();
 841   _num_new_walked_objs = 0;
 842   _num_new_archived_objs = 0;
 843   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 844 }
 845 
 846 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 847   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 848     _num_old_recorded_klasses;
 849   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 850                       "walked %d objs, archived %d new objs, recorded %d classes",
 851                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 852                       num_new_recorded_klasses);
 853 
 854   delete_seen_objects_table();
 855 
 856   _num_total_subgraph_recordings ++;
 857   _num_total_walked_objs      += _num_new_walked_objs;
 858   _num_total_archived_objs    += _num_new_archived_objs;
 859   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 860 }
 861 
 862 class ArchivableStaticFieldFinder: public FieldClosure {
 863   InstanceKlass* _ik;
 864   Symbol* _field_name;
 865   bool _found;
 866   int _offset;
 867 public:
 868   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 869     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 870 
 871   virtual void do_field(fieldDescriptor* fd) {
 872     if (fd->name() == _field_name) {
 873       assert(!_found, "fields cannot be overloaded");
 874       assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields");
 875       _found = true;
 876       _offset = fd->offset();
 877     }
 878   }
 879   bool found()     { return _found;  }
 880   int offset()     { return _offset; }
 881 };
 882 
 883 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
 884                                             int num, Thread* THREAD) {
 885   for (int i = 0; i < num; i++) {
 886     ArchivableStaticFieldInfo* info = &fields[i];
 887     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
 888     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
 889 
 890     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
 891     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
 892     InstanceKlass* ik = InstanceKlass::cast(k);
 893     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
 894            "Only support boot classes");
 895     ik->initialize(THREAD);
 896     guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize");
 897 
 898     ArchivableStaticFieldFinder finder(ik, field_name);
 899     ik->do_local_static_fields(&finder);
 900     assert(finder.found(), "field must exist");
 901 
 902     info->klass = ik;
 903     info->offset = finder.offset();
 904   }
 905 }
 906 
 907 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
 908   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
 909 
 910   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
 911                              num_closed_archive_subgraph_entry_fields,
 912                              THREAD);
 913   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
 914                              num_open_archive_subgraph_entry_fields,
 915                              THREAD);
 916 }
 917 
 918 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
 919                                           int num, bool is_closed_archive,
 920                                           Thread* THREAD) {
 921   _num_total_subgraph_recordings = 0;
 922   _num_total_walked_objs = 0;
 923   _num_total_archived_objs = 0;
 924   _num_total_recorded_klasses = 0;
 925   _num_total_verifications = 0;
 926 
 927   // For each class X that has one or more archived fields:
 928   // [1] Dump the subgraph of each archived field
 929   // [2] Create a list of all the class of the objects that can be reached
 930   //     by any of these static fields.
 931   //     At runtime, these classes are initialized before X's archived fields
 932   //     are restored by HeapShared::initialize_from_archived_subgraph().
 933   int i;
 934   for (i = 0; i < num; ) {
 935     ArchivableStaticFieldInfo* info = &fields[i];
 936     const char* klass_name = info->klass_name;
 937     start_recording_subgraph(info->klass, klass_name);
 938 
 939     // If you have specified consecutive fields of the same klass in
 940     // fields[], these will be archived in the same
 941     // {start_recording_subgraph ... done_recording_subgraph} pass to
 942     // save time.
 943     for (; i < num; i++) {
 944       ArchivableStaticFieldInfo* f = &fields[i];
 945       if (f->klass_name != klass_name) {
 946         break;
 947       }
 948       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
 949                                                   f->offset, f->field_name,
 950                                                   is_closed_archive, CHECK);
 951     }
 952     done_recording_subgraph(info->klass, klass_name);
 953   }
 954 
 955   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
 956                       is_closed_archive ? "closed" : "open",
 957                       _num_total_subgraph_recordings);
 958   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
 959   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
 960   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
 961 
 962 #ifndef PRODUCT
 963   for (int i = 0; i < num; i++) {
 964     ArchivableStaticFieldInfo* f = &fields[i];
 965     verify_subgraph_from_static_field(f->klass, f->offset);
 966   }
 967   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
 968 #endif
 969 }
 970 
 971 // At dump-time, find the location of all the non-null oop pointers in an archived heap
 972 // region. This way we can quickly relocate all the pointers without using
 973 // BasicOopIterateClosure at runtime.
 974 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
 975   narrowOop* _start;
 976   BitMap *_oopmap;
 977   int _num_total_oops;
 978   int _num_null_oops;
 979  public:
 980   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
 981     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
 982 
 983   virtual bool should_verify_oops(void) {
 984     return false;
 985   }
 986   virtual void do_oop(narrowOop* p) {
 987     _num_total_oops ++;
 988     narrowOop v = *p;
 989     if (!CompressedOops::is_null(v)) {
 990       size_t idx = p - _start;
 991       _oopmap->set_bit(idx);
 992     } else {
 993       _num_null_oops ++;
 994     }
 995   }
 996   virtual void do_oop(oop *p) {
 997     ShouldNotReachHere();
 998   }
 999   int num_total_oops() const { return _num_total_oops; }
1000   int num_null_oops()  const { return _num_null_oops; }
1001 };
1002 
1003 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1004   assert(UseCompressedOops, "must be");
1005   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1006   ResourceBitMap oopmap(num_bits);
1007 
1008   HeapWord* p   = region.start();
1009   HeapWord* end = region.end();
1010   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1011 
1012   int num_objs = 0;
1013   while (p < end) {
1014     oop o = (oop)p;
1015     o->oop_iterate(&finder);
1016     p += o->size();
1017     ++ num_objs;
1018   }
1019 
1020   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1021                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1022   return oopmap;
1023 }
1024 
1025 // Patch all the embedded oop pointers inside an archived heap region,
1026 // to be consistent with the runtime oop encoding.
1027 class PatchEmbeddedPointers: public BitMapClosure {
1028   narrowOop* _start;
1029 
1030  public:
1031   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1032 
1033   bool do_bit(size_t offset) {
1034     narrowOop* p = _start + offset;
1035     narrowOop v = *p;
1036     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1037     oop o = HeapShared::decode_from_archive(v);
1038     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1039     return true;
1040   }
1041 };
1042 
1043 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1044                                                        size_t oopmap_size_in_bits) {
1045   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1046 
1047 #ifndef PRODUCT
1048   ResourceMark rm;
1049   ResourceBitMap checkBm = calculate_oopmap(region);
1050   assert(bm.is_same(checkBm), "sanity");
1051 #endif
1052 
1053   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1054   bm.iterate(&patcher);
1055 }
1056 
1057 #endif // INCLUDE_CDS_JAVA_HEAP