1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logMessage.hpp"
  32 #include "logging/logStream.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/heapShared.inline.hpp"
  35 #include "memory/iterator.inline.hpp"
  36 #include "memory/metadataFactory.hpp"
  37 #include "memory/metaspaceClosure.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compressedOops.inline.hpp"
  40 #include "oops/fieldStreams.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/fieldDescriptor.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "utilities/bitMap.inline.hpp"
  45 #if INCLUDE_G1GC
  46 #include "gc/g1/g1CollectedHeap.hpp"
  47 #endif
  48 
  49 #if INCLUDE_CDS_JAVA_HEAP
  50 
  51 bool HeapShared::_closed_archive_heap_region_mapped = false;
  52 bool HeapShared::_open_archive_heap_region_mapped = false;
  53 bool HeapShared::_archive_heap_region_fixed = false;
  54 
  55 address   HeapShared::_narrow_oop_base;
  56 int       HeapShared::_narrow_oop_shift;
  57 
  58 //
  59 // If you add new entries to the following tables, you should know what you're doing!
  60 //
  61 
  62 // Entry fields for shareable subgraphs archived in the closed archive heap
  63 // region. Warning: Objects in the subgraphs should not have reference fields
  64 // assigned at runtime.
  65 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  66   {"java/lang/Integer$IntegerCache",           "archivedCache"},
  67   {"java/lang/Long$LongCache",                 "archivedCache"},
  68   {"java/lang/Byte$ByteCache",                 "archivedCache"},
  69   {"java/lang/Short$ShortCache",               "archivedCache"},
  70   {"java/lang/Character$CharacterCache",       "archivedCache"},
  71   {"java/util/jar/Attributes$Name",            "KNOWN_NAMES"},
  72 };
  73 // Entry fields for subgraphs archived in the open archive heap region.
  74 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
  75   {"jdk/internal/module/ArchivedModuleGraph",  "archivedModuleGraph"},
  76   {"java/util/ImmutableCollections$ListN",     "EMPTY_LIST"},
  77   {"java/util/ImmutableCollections$MapN",      "EMPTY_MAP"},
  78   {"java/util/ImmutableCollections$SetN",      "EMPTY_SET"},
  79   {"java/lang/module/Configuration",           "EMPTY_CONFIGURATION"},
  80 };
  81 
  82 const static int num_closed_archive_subgraph_entry_fields =
  83   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  84 const static int num_open_archive_subgraph_entry_fields =
  85   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
  86 
  87 ////////////////////////////////////////////////////////////////
  88 //
  89 // Java heap object archiving support
  90 //
  91 ////////////////////////////////////////////////////////////////
  92 void HeapShared::fixup_mapped_heap_regions() {
  93   FileMapInfo *mapinfo = FileMapInfo::current_info();
  94   mapinfo->fixup_mapped_heap_regions();
  95   set_archive_heap_region_fixed();
  96 }
  97 
  98 unsigned HeapShared::oop_hash(oop const& p) {
  99   assert(!p->mark()->has_bias_pattern(),
 100          "this object should never have been locked");  // so identity_hash won't safepoin
 101   unsigned hash = (unsigned)p->identity_hash();
 102   return hash;
 103 }
 104 
 105 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 106 oop HeapShared::find_archived_heap_object(oop obj) {
 107   assert(DumpSharedSpaces, "dump-time only");
 108   ArchivedObjectCache* cache = archived_object_cache();
 109   oop* p = cache->get(obj);
 110   if (p != NULL) {
 111     return *p;
 112   } else {
 113     return NULL;
 114   }
 115 }
 116 
 117 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
 118   assert(DumpSharedSpaces, "dump-time only");
 119 
 120   oop ao = find_archived_heap_object(obj);
 121   if (ao != NULL) {
 122     // already archived
 123     return ao;
 124   }
 125 
 126   int len = obj->size();
 127   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 128     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 129                          p2i(obj), (size_t)obj->size());
 130     return NULL;
 131   }
 132 
 133   // Pre-compute object identity hash at CDS dump time.
 134   obj->identity_hash();
 135 
 136   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
 137   if (archived_oop != NULL) {
 138     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
 139     MetaspaceShared::relocate_klass_ptr(archived_oop);
 140     ArchivedObjectCache* cache = archived_object_cache();
 141     cache->put(obj, archived_oop);
 142     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
 143                          p2i(obj), p2i(archived_oop));
 144   } else {
 145     log_error(cds, heap)(
 146       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 147       p2i(obj));
 148     vm_exit(1);
 149   }
 150   return archived_oop;
 151 }
 152 
 153 oop HeapShared::materialize_archived_object(narrowOop v) {
 154   assert(archive_heap_region_fixed(),
 155          "must be called after archive heap regions are fixed");
 156   if (!CompressedOops::is_null(v)) {
 157     oop obj = HeapShared::decode_from_archive(v);
 158     return G1CollectedHeap::heap()->materialize_archived_object(obj);
 159   }
 160   return NULL;
 161 }
 162 
 163 void HeapShared::archive_klass_objects(Thread* THREAD) {
 164   GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
 165   assert(klasses != NULL, "sanity");
 166   for (int i = 0; i < klasses->length(); i++) {
 167     Klass* k = klasses->at(i);
 168 
 169     // archive mirror object
 170     java_lang_Class::archive_mirror(k, CHECK);
 171 
 172     // archive the resolved_referenes array
 173     if (k->is_instance_klass()) {
 174       InstanceKlass* ik = InstanceKlass::cast(k);
 175       ik->constants()->archive_resolved_references(THREAD);
 176     }
 177   }
 178 }
 179 
 180 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
 181                                            GrowableArray<MemRegion> *open) {
 182   if (!is_heap_object_archiving_allowed()) {
 183     if (log_is_enabled(Info, cds)) {
 184       log_info(cds)(
 185         "Archived java heap is not supported as UseG1GC, "
 186         "UseCompressedOops and UseCompressedClassPointers are required."
 187         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
 188         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
 189         BOOL_TO_STR(UseCompressedClassPointers));
 190     }
 191     return;
 192   }
 193 
 194   G1HeapVerifier::verify_ready_for_archiving();
 195 
 196   {
 197     NoSafepointVerifier nsv;
 198 
 199     // Cache for recording where the archived objects are copied to
 200     create_archived_object_cache();
 201 
 202     tty->print_cr("Dumping objects to closed archive heap region ...");
 203     NOT_PRODUCT(StringTable::verify());
 204     copy_closed_archive_heap_objects(closed);
 205 
 206     tty->print_cr("Dumping objects to open archive heap region ...");
 207     copy_open_archive_heap_objects(open);
 208 
 209     destroy_archived_object_cache();
 210   }
 211 
 212   G1HeapVerifier::verify_archive_regions();
 213 }
 214 
 215 void HeapShared::copy_closed_archive_heap_objects(
 216                                     GrowableArray<MemRegion> * closed_archive) {
 217   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 218 
 219   Thread* THREAD = Thread::current();
 220   G1CollectedHeap::heap()->begin_archive_alloc_range();
 221 
 222   // Archive interned string objects
 223   StringTable::write_to_archive();
 224 
 225   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 226                            num_closed_archive_subgraph_entry_fields,
 227                            true /* is_closed_archive */, THREAD);
 228 
 229   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
 230                                                    os::vm_allocation_granularity());
 231 }
 232 
 233 void HeapShared::copy_open_archive_heap_objects(
 234                                     GrowableArray<MemRegion> * open_archive) {
 235   assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
 236 
 237   Thread* THREAD = Thread::current();
 238   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 239 
 240   java_lang_Class::archive_basic_type_mirrors(THREAD);
 241 
 242   archive_klass_objects(THREAD);
 243 
 244   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 245                            num_open_archive_subgraph_entry_fields,
 246                            false /* is_closed_archive */,
 247                            THREAD);
 248 
 249   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
 250                                                    os::vm_allocation_granularity());
 251 }
 252 
 253 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 254   _narrow_oop_base = base;
 255   _narrow_oop_shift = shift;
 256 }
 257 
 258 //
 259 // Subgraph archiving support
 260 //
 261 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 262 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 263 
 264 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 265 // there is no existing one for k. The subgraph_info records the relocated
 266 // Klass* of the original k.
 267 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 268   assert(DumpSharedSpaces, "dump time only");
 269   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
 270   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 271   if (info == NULL) {
 272     _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
 273     info = _dump_time_subgraph_info_table->get(relocated_k);
 274     ++ _dump_time_subgraph_info_table->_count;
 275   }
 276   return info;
 277 }
 278 
 279 // Add an entry field to the current KlassSubGraphInfo.
 280 void KlassSubGraphInfo::add_subgraph_entry_field(
 281       int static_field_offset, oop v, bool is_closed_archive) {
 282   assert(DumpSharedSpaces, "dump time only");
 283   if (_subgraph_entry_fields == NULL) {
 284     _subgraph_entry_fields =
 285       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
 286   }
 287   _subgraph_entry_fields->append((juint)static_field_offset);
 288   _subgraph_entry_fields->append(CompressedOops::encode(v));
 289   _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
 290 }
 291 
 292 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 293 // Only objects of boot classes can be included in sub-graph.
 294 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 295   assert(DumpSharedSpaces, "dump time only");
 296   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 297          "must be the relocated Klass in the shared space");
 298 
 299   if (_subgraph_object_klasses == NULL) {
 300     _subgraph_object_klasses =
 301       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
 302   }
 303 
 304   assert(relocated_k->is_shared(), "must be a shared class");
 305 
 306   if (_k == relocated_k) {
 307     // Don't add the Klass containing the sub-graph to it's own klass
 308     // initialization list.
 309     return;
 310   }
 311 
 312   if (relocated_k->is_instance_klass()) {
 313     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 314           "must be boot class");
 315     // SystemDictionary::xxx_klass() are not updated, need to check
 316     // the original Klass*
 317     if (orig_k == SystemDictionary::String_klass() ||
 318         orig_k == SystemDictionary::Object_klass()) {
 319       // Initialized early during VM initialization. No need to be added
 320       // to the sub-graph object class list.
 321       return;
 322     }
 323   } else if (relocated_k->is_objArray_klass()) {
 324     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 325     if (abk->is_instance_klass()) {
 326       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 327             "must be boot class");
 328     }
 329     if (relocated_k == Universe::objectArrayKlassObj()) {
 330       // Initialized early during Universe::genesis. No need to be added
 331       // to the list.
 332       return;
 333     }
 334   } else {
 335     assert(relocated_k->is_typeArray_klass(), "must be");
 336     // Primitive type arrays are created early during Universe::genesis.
 337     return;
 338   }
 339 
 340   if (log_is_enabled(Debug, cds, heap)) {
 341     if (!_subgraph_object_klasses->contains(relocated_k)) {
 342       ResourceMark rm;
 343       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 344     }
 345   }
 346 
 347   _subgraph_object_klasses->append_if_missing(relocated_k);
 348 }
 349 
 350 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 351 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 352   _k = info->klass();
 353   _entry_field_records = NULL;
 354   _subgraph_object_klasses = NULL;
 355 
 356   // populate the entry fields
 357   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 358   if (entry_fields != NULL) {
 359     int num_entry_fields = entry_fields->length();
 360     assert(num_entry_fields % 3 == 0, "sanity");
 361     _entry_field_records =
 362       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 363     for (int i = 0 ; i < num_entry_fields; i++) {
 364       _entry_field_records->at_put(i, entry_fields->at(i));
 365     }
 366   }
 367 
 368   // the Klasses of the objects in the sub-graphs
 369   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 370   if (subgraph_object_klasses != NULL) {
 371     int num_subgraphs_klasses = subgraph_object_klasses->length();
 372     _subgraph_object_klasses =
 373       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 374     for (int i = 0; i < num_subgraphs_klasses; i++) {
 375       Klass* subgraph_k = subgraph_object_klasses->at(i);
 376       if (log_is_enabled(Info, cds, heap)) {
 377         ResourceMark rm;
 378         log_info(cds, heap)(
 379           "Archived object klass %s (%2d) => %s",
 380           _k->external_name(), i, subgraph_k->external_name());
 381       }
 382       _subgraph_object_klasses->at_put(i, subgraph_k);
 383     }
 384   }
 385 }
 386 
 387 struct CopyKlassSubGraphInfoToArchive : StackObj {
 388   CompactHashtableWriter* _writer;
 389   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 390 
 391   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 392     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 393       ArchivedKlassSubGraphInfoRecord* record =
 394         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 395       record->init(&info);
 396 
 397       unsigned int hash = primitive_hash<Klass*>(klass);
 398       u4 delta = MetaspaceShared::object_delta_u4(record);
 399       _writer->add(hash, delta);
 400     }
 401     return true; // keep on iterating
 402   }
 403 };
 404 
 405 // Build the records of archived subgraph infos, which include:
 406 // - Entry points to all subgraphs from the containing class mirror. The entry
 407 //   points are static fields in the mirror. For each entry point, the field
 408 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 409 //   info. The value is stored back to the corresponding field at runtime.
 410 // - A list of klasses that need to be loaded/initialized before archived
 411 //   java object sub-graph can be accessed at runtime.
 412 void HeapShared::write_subgraph_info_table() {
 413   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 414   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 415   CompactHashtableStats stats;
 416 
 417   _run_time_subgraph_info_table.reset();
 418 
 419   int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count);
 420   CompactHashtableWriter writer(num_buckets, &stats);
 421   CopyKlassSubGraphInfoToArchive copy(&writer);
 422   d_table->iterate(&copy);
 423 
 424   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 425 }
 426 
 427 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 428   _run_time_subgraph_info_table.serialize_header(soc);
 429 }
 430 
 431 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 432   if (!open_archive_heap_region_mapped()) {
 433     return; // nothing to do
 434   }
 435   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 436 
 437   unsigned int hash = primitive_hash<Klass*>(k);
 438   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 439 
 440   // Initialize from archived data. Currently this is done only
 441   // during VM initialization time. No lock is needed.
 442   if (record != NULL) {
 443     Thread* THREAD = Thread::current();
 444 
 445     int i;
 446     // Load/link/initialize the klasses of the objects in the subgraph.
 447     // NULL class loader is used.
 448     Array<Klass*>* klasses = record->subgraph_object_klasses();
 449     if (klasses != NULL) {
 450       for (i = 0; i < klasses->length(); i++) {
 451         Klass* obj_k = klasses->at(i);
 452         Klass* resolved_k = SystemDictionary::resolve_or_null(
 453                                               (obj_k)->name(), THREAD);
 454         if (resolved_k != obj_k) {
 455           assert(!SystemDictionary::is_well_known_klass(resolved_k),
 456                  "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
 457           ResourceMark rm(THREAD);
 458           log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
 459                               resolved_k->external_name());
 460           return;
 461         }
 462         if ((obj_k)->is_instance_klass()) {
 463           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 464           ik->initialize(THREAD);
 465         } else if ((obj_k)->is_objArray_klass()) {
 466           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 467           oak->initialize(THREAD);
 468         }
 469       }
 470     }
 471 
 472     if (HAS_PENDING_EXCEPTION) {
 473       CLEAR_PENDING_EXCEPTION;
 474       // None of the field value will be set if there was an exception.
 475       // The java code will not see any of the archived objects in the
 476       // subgraphs referenced from k in this case.
 477       return;
 478     }
 479 
 480     // Load the subgraph entry fields from the record and store them back to
 481     // the corresponding fields within the mirror.
 482     oop m = k->java_mirror();
 483     Array<juint>* entry_field_records = record->entry_field_records();
 484     if (entry_field_records != NULL) {
 485       int efr_len = entry_field_records->length();
 486       assert(efr_len % 3 == 0, "sanity");
 487       for (i = 0; i < efr_len;) {
 488         int field_offset = entry_field_records->at(i);
 489         narrowOop nv = entry_field_records->at(i+1);
 490         int is_closed_archive = entry_field_records->at(i+2);
 491         oop v;
 492         if (is_closed_archive == 0) {
 493           // It's an archived object in the open archive heap regions, not shared.
 494           // The object refereced by the field becomes 'known' by GC from this
 495           // point. All objects in the subgraph reachable from the object are
 496           // also 'known' by GC.
 497           v = materialize_archived_object(nv);
 498         } else {
 499           // Shared object in the closed archive heap regions. Decode directly.
 500           assert(!CompressedOops::is_null(nv), "shared object is null");
 501           v = HeapShared::decode_from_archive(nv);
 502         }
 503         m->obj_field_put(field_offset, v);
 504         i += 3;
 505 
 506         log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 507       }
 508 
 509       // Done. Java code can see the archived sub-graphs referenced from k's
 510       // mirror after this point.
 511       if (log_is_enabled(Info, cds, heap)) {
 512         ResourceMark rm;
 513         log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
 514                             k->external_name(), p2i(k));
 515       }
 516     }
 517   }
 518 }
 519 
 520 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 521   int _level;
 522   bool _is_closed_archive;
 523   bool _record_klasses_only;
 524   KlassSubGraphInfo* _subgraph_info;
 525   oop _orig_referencing_obj;
 526   oop _archived_referencing_obj;
 527   Thread* _thread;
 528  public:
 529   WalkOopAndArchiveClosure(int level,
 530                            bool is_closed_archive,
 531                            bool record_klasses_only,
 532                            KlassSubGraphInfo* subgraph_info,
 533                            oop orig, oop archived, TRAPS) :
 534     _level(level), _is_closed_archive(is_closed_archive),
 535     _record_klasses_only(record_klasses_only),
 536     _subgraph_info(subgraph_info),
 537     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 538     _thread(THREAD) {}
 539   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 540   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 541 
 542  protected:
 543   template <class T> void do_oop_work(T *p) {
 544     oop obj = RawAccess<>::oop_load(p);
 545     if (!CompressedOops::is_null(obj)) {
 546       assert(!HeapShared::is_archived_object(obj),
 547              "original objects must not point to archived objects");
 548 
 549       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 550       T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
 551       Thread* THREAD = _thread;
 552 
 553       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 554         ResourceMark rm;
 555         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 556                              _orig_referencing_obj->klass()->external_name(), field_delta,
 557                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 558         LogTarget(Trace, cds, heap) log;
 559         LogStream out(log);
 560         obj->print_on(&out);
 561       }
 562 
 563       oop archived = HeapShared::archive_reachable_objects_from(
 564           _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD);
 565       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 566       assert(HeapShared::is_archived_object(archived), "must be");
 567 
 568       if (!_record_klasses_only) {
 569         // Update the reference in the archived copy of the referencing object.
 570         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 571                              _level, p2i(new_p), p2i(obj), p2i(archived));
 572         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 573       }
 574     }
 575   }
 576 };
 577 
 578 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k,
 579                                                          Thread* THREAD) {
 580   // Check fields in the object
 581   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 582     if (!fs.access_flags().is_static()) {
 583       BasicType ft = fs.field_descriptor().field_type();
 584       if (!fs.access_flags().is_final() && (ft == T_ARRAY || ft == T_OBJECT)) {
 585         ResourceMark rm(THREAD);
 586         log_warning(cds, heap)(
 587           "Please check reference field in %s instance in closed archive heap region: %s %s",
 588           k->external_name(), (fs.name())->as_C_string(),
 589           (fs.signature())->as_C_string());
 590       }
 591     }
 592   }
 593 }
 594 
 595 // (1) If orig_obj has not been archived yet, archive it.
 596 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 597 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 598 // (3) Record the klasses of all orig_obj and all reachable objects.
 599 oop HeapShared::archive_reachable_objects_from(int level,
 600                                                KlassSubGraphInfo* subgraph_info,
 601                                                oop orig_obj,
 602                                                bool is_closed_archive,
 603                                                TRAPS) {
 604   assert(orig_obj != NULL, "must be");
 605   assert(!is_archived_object(orig_obj), "sanity");
 606 
 607   // java.lang.Class instances cannot be included in an archived
 608   // object sub-graph.
 609   if (java_lang_Class::is_instance(orig_obj)) {
 610     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 611     vm_exit(1);
 612   }
 613 
 614   oop archived_obj = find_archived_heap_object(orig_obj);
 615   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 616     // To save time, don't walk strings that are already archived. They just contain
 617     // pointers to a type array, whose klass doesn't need to be recorded.
 618     return archived_obj;
 619   }
 620 
 621   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 622     // orig_obj has already been archived and traced. Nothing more to do.
 623     return archived_obj;
 624   } else {
 625     set_has_been_seen_during_subgraph_recording(orig_obj);
 626   }
 627 
 628   bool record_klasses_only = (archived_obj != NULL);
 629   if (archived_obj == NULL) {
 630     ++_num_new_archived_objs;
 631     archived_obj = archive_heap_object(orig_obj, THREAD);
 632     if (archived_obj == NULL) {
 633       // Skip archiving the sub-graph referenced from the current entry field.
 634       ResourceMark rm;
 635       log_error(cds, heap)(
 636         "Cannot archive the sub-graph referenced from %s object ("
 637         PTR_FORMAT ") size %d, skipped.",
 638         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 639       if (level == 1) {
 640         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 641         // as the Java code will take care of initializing this field dynamically.
 642         return NULL;
 643       } else {
 644         // We don't know how to handle an object that has been archived, but some of its reachable
 645         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 646         // we have a real use case.
 647         vm_exit(1);
 648       }
 649     }
 650   }
 651 
 652   assert(archived_obj != NULL, "must be");
 653   Klass *orig_k = orig_obj->klass();
 654   Klass *relocated_k = archived_obj->klass();
 655   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 656 
 657   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
 658                                   subgraph_info, orig_obj, archived_obj, THREAD);
 659   orig_obj->oop_iterate(&walker);
 660   if (is_closed_archive && orig_k->is_instance_klass()) {
 661     check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD);
 662   }
 663   return archived_obj;
 664 }
 665 
 666 //
 667 // Start from the given static field in a java mirror and archive the
 668 // complete sub-graph of java heap objects that are reached directly
 669 // or indirectly from the starting object by following references.
 670 // Sub-graph archiving restrictions (current):
 671 //
 672 // - All classes of objects in the archived sub-graph (including the
 673 //   entry class) must be boot class only.
 674 // - No java.lang.Class instance (java mirror) can be included inside
 675 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 676 //
 677 // The Java heap object sub-graph archiving process (see
 678 // WalkOopAndArchiveClosure):
 679 //
 680 // 1) Java object sub-graph archiving starts from a given static field
 681 // within a Class instance (java mirror). If the static field is a
 682 // refererence field and points to a non-null java object, proceed to
 683 // the next step.
 684 //
 685 // 2) Archives the referenced java object. If an archived copy of the
 686 // current object already exists, updates the pointer in the archived
 687 // copy of the referencing object to point to the current archived object.
 688 // Otherwise, proceed to the next step.
 689 //
 690 // 3) Follows all references within the current java object and recursively
 691 // archive the sub-graph of objects starting from each reference.
 692 //
 693 // 4) Updates the pointer in the archived copy of referencing object to
 694 // point to the current archived object.
 695 //
 696 // 5) The Klass of the current java object is added to the list of Klasses
 697 // for loading and initialzing before any object in the archived graph can
 698 // be accessed at runtime.
 699 //
 700 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 701                                                              const char* klass_name,
 702                                                              int field_offset,
 703                                                              const char* field_name,
 704                                                              bool is_closed_archive,
 705                                                              TRAPS) {
 706   assert(DumpSharedSpaces, "dump time only");
 707   assert(k->is_shared_boot_class(), "must be boot class");
 708 
 709   oop m = k->java_mirror();
 710 
 711   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 712   oop f = m->obj_field(field_offset);
 713 
 714   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 715 
 716   if (!CompressedOops::is_null(f)) {
 717     if (log_is_enabled(Trace, cds, heap)) {
 718       LogTarget(Trace, cds, heap) log;
 719       LogStream out(log);
 720       f->print_on(&out);
 721     }
 722 
 723     oop af = archive_reachable_objects_from(1, subgraph_info, f,
 724                                             is_closed_archive, CHECK);
 725 
 726     if (af == NULL) {
 727       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 728                            klass_name, field_name);
 729     } else {
 730       // Note: the field value is not preserved in the archived mirror.
 731       // Record the field as a new subGraph entry point. The recorded
 732       // information is restored from the archive at runtime.
 733       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
 734       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 735     }
 736   } else {
 737     // The field contains null, we still need to record the entry point,
 738     // so it can be restored at runtime.
 739     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
 740   }
 741 }
 742 
 743 #ifndef PRODUCT
 744 class VerifySharedOopClosure: public BasicOopIterateClosure {
 745  private:
 746   bool _is_archived;
 747 
 748  public:
 749   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 750 
 751   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 752   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 753 
 754  protected:
 755   template <class T> void do_oop_work(T *p) {
 756     oop obj = RawAccess<>::oop_load(p);
 757     if (!CompressedOops::is_null(obj)) {
 758       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 759     }
 760   }
 761 };
 762 
 763 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 764   assert(DumpSharedSpaces, "dump time only");
 765   assert(k->is_shared_boot_class(), "must be boot class");
 766 
 767   oop m = k->java_mirror();
 768   oop f = m->obj_field(field_offset);
 769   if (!CompressedOops::is_null(f)) {
 770     verify_subgraph_from(f);
 771   }
 772 }
 773 
 774 void HeapShared::verify_subgraph_from(oop orig_obj) {
 775   oop archived_obj = find_archived_heap_object(orig_obj);
 776   if (archived_obj == NULL) {
 777     // It's OK for the root of a subgraph to be not archived. See comments in
 778     // archive_reachable_objects_from().
 779     return;
 780   }
 781 
 782   // Verify that all objects reachable from orig_obj are archived.
 783   init_seen_objects_table();
 784   verify_reachable_objects_from(orig_obj, false);
 785   delete_seen_objects_table();
 786 
 787   // Note: we could also verify that all objects reachable from the archived
 788   // copy of orig_obj can only point to archived objects, with:
 789   //      init_seen_objects_table();
 790   //      verify_reachable_objects_from(archived_obj, true);
 791   //      init_seen_objects_table();
 792   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 793   // won't do it here.
 794 }
 795 
 796 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 797   _num_total_verifications ++;
 798   if (!has_been_seen_during_subgraph_recording(obj)) {
 799     set_has_been_seen_during_subgraph_recording(obj);
 800 
 801     if (is_archived) {
 802       assert(is_archived_object(obj), "must be");
 803       assert(find_archived_heap_object(obj) == NULL, "must be");
 804     } else {
 805       assert(!is_archived_object(obj), "must be");
 806       assert(find_archived_heap_object(obj) != NULL, "must be");
 807     }
 808 
 809     VerifySharedOopClosure walker(is_archived);
 810     obj->oop_iterate(&walker);
 811   }
 812 }
 813 #endif
 814 
 815 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 816 int HeapShared::_num_new_walked_objs;
 817 int HeapShared::_num_new_archived_objs;
 818 int HeapShared::_num_old_recorded_klasses;
 819 
 820 int HeapShared::_num_total_subgraph_recordings = 0;
 821 int HeapShared::_num_total_walked_objs = 0;
 822 int HeapShared::_num_total_archived_objs = 0;
 823 int HeapShared::_num_total_recorded_klasses = 0;
 824 int HeapShared::_num_total_verifications = 0;
 825 
 826 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 827   return _seen_objects_table->get(obj) != NULL;
 828 }
 829 
 830 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 831   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 832   _seen_objects_table->put(obj, true);
 833   ++ _num_new_walked_objs;
 834 }
 835 
 836 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 837   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 838   init_seen_objects_table();
 839   _num_new_walked_objs = 0;
 840   _num_new_archived_objs = 0;
 841   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 842 }
 843 
 844 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 845   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 846     _num_old_recorded_klasses;
 847   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 848                       "walked %d objs, archived %d new objs, recorded %d classes",
 849                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 850                       num_new_recorded_klasses);
 851 
 852   delete_seen_objects_table();
 853 
 854   _num_total_subgraph_recordings ++;
 855   _num_total_walked_objs      += _num_new_walked_objs;
 856   _num_total_archived_objs    += _num_new_archived_objs;
 857   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 858 }
 859 
 860 class ArchivableStaticFieldFinder: public FieldClosure {
 861   InstanceKlass* _ik;
 862   Symbol* _field_name;
 863   bool _found;
 864   int _offset;
 865 public:
 866   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 867     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 868 
 869   virtual void do_field(fieldDescriptor* fd) {
 870     if (fd->name() == _field_name) {
 871       assert(!_found, "fields cannot be overloaded");
 872       assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields");
 873       _found = true;
 874       _offset = fd->offset();
 875     }
 876   }
 877   bool found()     { return _found;  }
 878   int offset()     { return _offset; }
 879 };
 880 
 881 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
 882                                             int num, Thread* THREAD) {
 883   for (int i = 0; i < num; i++) {
 884     ArchivableStaticFieldInfo* info = &fields[i];
 885     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name, THREAD);
 886     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name, THREAD);
 887 
 888     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
 889     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
 890     InstanceKlass* ik = InstanceKlass::cast(k);
 891     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
 892            "Only support boot classes");
 893     ik->initialize(THREAD);
 894     guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize");
 895 
 896     ArchivableStaticFieldFinder finder(ik, field_name);
 897     ik->do_local_static_fields(&finder);
 898     assert(finder.found(), "field must exist");
 899 
 900     info->klass = ik;
 901     info->offset = finder.offset();
 902   }
 903 }
 904 
 905 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
 906   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
 907 
 908   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
 909                              num_closed_archive_subgraph_entry_fields,
 910                              THREAD);
 911   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
 912                              num_open_archive_subgraph_entry_fields,
 913                              THREAD);
 914 }
 915 
 916 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
 917                                           int num, bool is_closed_archive,
 918                                           Thread* THREAD) {
 919   _num_total_subgraph_recordings = 0;
 920   _num_total_walked_objs = 0;
 921   _num_total_archived_objs = 0;
 922   _num_total_recorded_klasses = 0;
 923   _num_total_verifications = 0;
 924 
 925   // For each class X that has one or more archived fields:
 926   // [1] Dump the subgraph of each archived field
 927   // [2] Create a list of all the class of the objects that can be reached
 928   //     by any of these static fields.
 929   //     At runtime, these classes are initialized before X's archived fields
 930   //     are restored by HeapShared::initialize_from_archived_subgraph().
 931   int i;
 932   for (i = 0; i < num; ) {
 933     ArchivableStaticFieldInfo* info = &fields[i];
 934     const char* klass_name = info->klass_name;
 935     start_recording_subgraph(info->klass, klass_name);
 936 
 937     // If you have specified consecutive fields of the same klass in
 938     // fields[], these will be archived in the same
 939     // {start_recording_subgraph ... done_recording_subgraph} pass to
 940     // save time.
 941     for (; i < num; i++) {
 942       ArchivableStaticFieldInfo* f = &fields[i];
 943       if (f->klass_name != klass_name) {
 944         break;
 945       }
 946       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
 947                                                   f->offset, f->field_name,
 948                                                   is_closed_archive, CHECK);
 949     }
 950     done_recording_subgraph(info->klass, klass_name);
 951   }
 952 
 953   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
 954                       is_closed_archive ? "closed" : "open",
 955                       _num_total_subgraph_recordings);
 956   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
 957   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
 958   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
 959 
 960 #ifndef PRODUCT
 961   for (int i = 0; i < num; i++) {
 962     ArchivableStaticFieldInfo* f = &fields[i];
 963     verify_subgraph_from_static_field(f->klass, f->offset);
 964   }
 965   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
 966 #endif
 967 }
 968 
 969 // At dump-time, find the location of all the non-null oop pointers in an archived heap
 970 // region. This way we can quickly relocate all the pointers without using
 971 // BasicOopIterateClosure at runtime.
 972 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
 973   narrowOop* _start;
 974   BitMap *_oopmap;
 975   int _num_total_oops;
 976   int _num_null_oops;
 977  public:
 978   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
 979     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
 980 
 981   virtual bool should_verify_oops(void) {
 982     return false;
 983   }
 984   virtual void do_oop(narrowOop* p) {
 985     _num_total_oops ++;
 986     narrowOop v = *p;
 987     if (!CompressedOops::is_null(v)) {
 988       size_t idx = p - _start;
 989       _oopmap->set_bit(idx);
 990     } else {
 991       _num_null_oops ++;
 992     }
 993   }
 994   virtual void do_oop(oop *p) {
 995     ShouldNotReachHere();
 996   }
 997   int num_total_oops() const { return _num_total_oops; }
 998   int num_null_oops()  const { return _num_null_oops; }
 999 };
1000 
1001 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1002   assert(UseCompressedOops, "must be");
1003   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1004   ResourceBitMap oopmap(num_bits);
1005 
1006   HeapWord* p   = region.start();
1007   HeapWord* end = region.end();
1008   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1009 
1010   int num_objs = 0;
1011   while (p < end) {
1012     oop o = (oop)p;
1013     o->oop_iterate(&finder);
1014     p += o->size();
1015     ++ num_objs;
1016   }
1017 
1018   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1019                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1020   return oopmap;
1021 }
1022 
1023 // Patch all the embedded oop pointers inside an archived heap region,
1024 // to be consistent with the runtime oop encoding.
1025 class PatchEmbeddedPointers: public BitMapClosure {
1026   narrowOop* _start;
1027 
1028  public:
1029   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1030 
1031   bool do_bit(size_t offset) {
1032     narrowOop* p = _start + offset;
1033     narrowOop v = *p;
1034     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1035     oop o = HeapShared::decode_from_archive(v);
1036     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1037     return true;
1038   }
1039 };
1040 
1041 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1042                                                        size_t oopmap_size_in_bits) {
1043   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1044 
1045 #ifndef PRODUCT
1046   ResourceMark rm;
1047   ResourceBitMap checkBm = calculate_oopmap(region);
1048   assert(bm.is_same(checkBm), "sanity");
1049 #endif
1050 
1051   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1052   bm.iterate(&patcher);
1053 }
1054 
1055 #endif // INCLUDE_CDS_JAVA_HEAP