1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logMessage.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/heapShared.inline.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/metaspaceShared.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/fieldDescriptor.inline.hpp"
  41 #include "utilities/bitMap.inline.hpp"
  42 
  43 #if INCLUDE_CDS_JAVA_HEAP
  44 address   HeapShared::_narrow_oop_base;
  45 int       HeapShared::_narrow_oop_shift;
  46 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
  47 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
  48 
  49 // Get the subgraph_info for Klass k. A new subgraph_info is created if
  50 // there is no existing one for k. The subgraph_info records the relocated
  51 // Klass* of the original k.
  52 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
  53   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
  54   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
  55   if (info == NULL) {
  56     _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
  57     info = _dump_time_subgraph_info_table->get(relocated_k);
  58     ++ _dump_time_subgraph_info_table->_count;
  59   }
  60   return info;
  61 }
  62 
  63 // Add an entry field to the current KlassSubGraphInfo.
  64 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
  65   assert(DumpSharedSpaces, "dump time only");
  66   if (_subgraph_entry_fields == NULL) {
  67     _subgraph_entry_fields =
  68       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
  69   }
  70   _subgraph_entry_fields->append((juint)static_field_offset);
  71   _subgraph_entry_fields->append(CompressedOops::encode(v));
  72 }
  73 
  74 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
  75 // Only objects of boot classes can be included in sub-graph.
  76 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
  77   assert(DumpSharedSpaces, "dump time only");
  78   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
  79          "must be the relocated Klass in the shared space");
  80 
  81   if (_subgraph_object_klasses == NULL) {
  82     _subgraph_object_klasses =
  83       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
  84   }
  85 
  86   assert(relocated_k->is_shared(), "must be a shared class");
  87 
  88   if (_k == relocated_k) {
  89     // Don't add the Klass containing the sub-graph to it's own klass
  90     // initialization list.
  91     return;
  92   }
  93 
  94   if (relocated_k->is_instance_klass()) {
  95     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
  96           "must be boot class");
  97     // SystemDictionary::xxx_klass() are not updated, need to check
  98     // the original Klass*
  99     if (orig_k == SystemDictionary::String_klass() ||
 100         orig_k == SystemDictionary::Object_klass()) {
 101       // Initialized early during VM initialization. No need to be added
 102       // to the sub-graph object class list.
 103       return;
 104     }
 105   } else if (relocated_k->is_objArray_klass()) {
 106     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 107     if (abk->is_instance_klass()) {
 108       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 109             "must be boot class");
 110     }
 111     if (relocated_k == Universe::objectArrayKlassObj()) {
 112       // Initialized early during Universe::genesis. No need to be added
 113       // to the list.
 114       return;
 115     }
 116   } else {
 117     assert(relocated_k->is_typeArray_klass(), "must be");
 118     // Primitive type arrays are created early during Universe::genesis.
 119     return;
 120   }
 121 
 122   if (log_is_enabled(Debug, cds, heap)) {
 123     if (!_subgraph_object_klasses->contains(relocated_k)) {
 124       ResourceMark rm;
 125       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 126     }
 127   }
 128 
 129   _subgraph_object_klasses->append_if_missing(relocated_k);
 130 }
 131 
 132 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 133 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 134   _k = info->klass();
 135   _entry_field_records = NULL;
 136   _subgraph_object_klasses = NULL;
 137 
 138   // populate the entry fields
 139   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 140   if (entry_fields != NULL) {
 141     int num_entry_fields = entry_fields->length();
 142     assert(num_entry_fields % 2 == 0, "sanity");
 143     _entry_field_records =
 144       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 145     for (int i = 0 ; i < num_entry_fields; i++) {
 146       _entry_field_records->at_put(i, entry_fields->at(i));
 147     }
 148   }
 149 
 150   // the Klasses of the objects in the sub-graphs
 151   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 152   if (subgraph_object_klasses != NULL) {
 153     int num_subgraphs_klasses = subgraph_object_klasses->length();
 154     _subgraph_object_klasses =
 155       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 156     for (int i = 0; i < num_subgraphs_klasses; i++) {
 157       Klass* subgraph_k = subgraph_object_klasses->at(i);
 158       if (log_is_enabled(Info, cds, heap)) {
 159         ResourceMark rm;
 160         log_info(cds, heap)(
 161           "Archived object klass %s (%2d) => %s",
 162           _k->external_name(), i, subgraph_k->external_name());
 163       }
 164       _subgraph_object_klasses->at_put(i, subgraph_k);
 165     }
 166   }
 167 }
 168 
 169 struct CopyKlassSubGraphInfoToArchive : StackObj {
 170   CompactHashtableWriter* _writer;
 171   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 172 
 173   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 174 
 175     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 176       ArchivedKlassSubGraphInfoRecord* record =
 177         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 178       record->init(&info);
 179 
 180       unsigned int hash = primitive_hash<Klass*>(klass);
 181       uintx deltax = MetaspaceShared::object_delta(record);
 182       guarantee(deltax <= MAX_SHARED_DELTA, "must not be");
 183       u4 delta = u4(deltax);
 184       _writer->add(hash, delta);
 185     }
 186     return true; // keep on iterating
 187   }
 188 };
 189 
 190 // Build the records of archived subgraph infos, which include:
 191 // - Entry points to all subgraphs from the containing class mirror. The entry
 192 //   points are static fields in the mirror. For each entry point, the field
 193 //   offset and value are recorded in the sub-graph info. The value are stored
 194 //   back to the corresponding field at runtime.
 195 // - A list of klasses that need to be loaded/initialized before archived
 196 //   java object sub-graph can be accessed at runtime.
 197 void HeapShared::create_hashtables() {
 198   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 199   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 200   CompactHashtableStats stats;
 201 
 202   _run_time_subgraph_info_table.reset();
 203 
 204   int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count);
 205   CompactHashtableWriter writer(num_buckets, &stats);
 206   CopyKlassSubGraphInfoToArchive copy(&writer);
 207   _dump_time_subgraph_info_table->iterate(&copy);
 208 
 209   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 210 }
 211 
 212 // Read/write the headers of the hashtable(s) so they can be accessed quickly at runtime.
 213 void HeapShared::serialize_hashtables(SerializeClosure* soc) {
 214   _run_time_subgraph_info_table.serialize(soc);
 215 }
 216 
 217 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 218   if (!MetaspaceShared::open_archive_heap_region_mapped()) {
 219     return; // nothing to do
 220   }
 221 
 222   unsigned int hash = primitive_hash<Klass*>(k);
 223   ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 224 
 225   // Initialize from archived data. Currently this is done only
 226   // during VM initialization time. No lock is needed.
 227   if (record != NULL) {
 228     Thread* THREAD = Thread::current();
 229     if (log_is_enabled(Info, cds, heap)) {
 230       ResourceMark rm;
 231       log_info(cds, heap)("initialize_from_archived_subgraph %p %s", k,
 232                           k->external_name());
 233     }
 234 
 235     int i;
 236     // Load/link/initialize the klasses of the objects in the subgraph.
 237     // NULL class loader is used.
 238     Array<Klass*>* klasses = record->subgraph_object_klasses();
 239     if (klasses != NULL) {
 240       for (i = 0; i < klasses->length(); i++) {
 241         Klass* obj_k = klasses->at(i);
 242         Klass* resolved_k = SystemDictionary::resolve_or_null(
 243                                               (obj_k)->name(), THREAD);
 244         if (resolved_k != obj_k) {
 245           return;
 246         }
 247         if ((obj_k)->is_instance_klass()) {
 248           InstanceKlass* ik = InstanceKlass::cast(obj_k);
 249           ik->initialize(THREAD);
 250         } else if ((obj_k)->is_objArray_klass()) {
 251           ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 252           oak->initialize(THREAD);
 253         }
 254       }
 255     }
 256 
 257     if (HAS_PENDING_EXCEPTION) {
 258       CLEAR_PENDING_EXCEPTION;
 259       // None of the field value will be set if there was an exception.
 260       // The java code will not see any of the archived objects in the
 261       // subgraphs referenced from k in this case.
 262       return;
 263     }
 264 
 265     // Load the subgraph entry fields from the record and store them back to
 266     // the corresponding fields within the mirror.
 267     oop m = k->java_mirror();
 268     Array<juint>* entry_field_records = record->entry_field_records();
 269     if (entry_field_records != NULL) {
 270       int efr_len = entry_field_records->length();
 271       assert(efr_len % 2 == 0, "sanity");
 272       for (i = 0; i < efr_len;) {
 273         int field_offset = entry_field_records->at(i);
 274         // The object refereced by the field becomes 'known' by GC from this
 275         // point. All objects in the subgraph reachable from the object are
 276         // also 'known' by GC.
 277         oop v = MetaspaceShared::materialize_archived_object(
 278             entry_field_records->at(i+1));
 279         m->obj_field_put(field_offset, v);
 280         i += 2;
 281 
 282         log_debug(cds, heap)("  %p init field @ %2d = %p", k, field_offset, (address)v);
 283       }
 284 
 285     // Done. Java code can see the archived sub-graphs referenced from k's
 286     // mirror after this point.
 287     }
 288   }
 289 }
 290 
 291 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 292   int _level;
 293   bool _record_klasses_only;
 294   KlassSubGraphInfo* _subgraph_info;
 295   oop _orig_referencing_obj;
 296   oop _archived_referencing_obj;
 297   Thread* _thread;
 298  public:
 299   WalkOopAndArchiveClosure(int level, bool record_klasses_only,
 300                            KlassSubGraphInfo* subgraph_info,
 301                            oop orig, oop archived, TRAPS) :
 302     _level(level), _record_klasses_only(record_klasses_only),
 303     _subgraph_info(subgraph_info),
 304     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 305     _thread(THREAD) {}
 306   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 307   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 308 
 309  protected:
 310   template <class T> void do_oop_work(T *p) {
 311     oop obj = RawAccess<>::oop_load(p);
 312     if (!CompressedOops::is_null(obj)) {
 313       assert(!MetaspaceShared::is_archive_object(obj),
 314              "original objects must not point to archived objects");
 315 
 316       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 317       T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
 318       Thread* THREAD = _thread;
 319 
 320       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 321         ResourceMark rm;
 322         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 323                              _orig_referencing_obj->klass()->external_name(), field_delta,
 324                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 325         LogTarget(Trace, cds, heap) log;
 326         LogStream out(log);
 327         obj->print_on(&out);
 328       }
 329 
 330       oop archived = HeapShared::archive_reachable_objects_from(_level + 1, _subgraph_info, obj, THREAD);
 331       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 332       assert(MetaspaceShared::is_archive_object(archived), "must be");
 333 
 334       if (!_record_klasses_only) {
 335         // Update the reference in the archived copy of the referencing object.
 336         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 337                              _level, p2i(new_p), p2i(obj), p2i(archived));
 338         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 339       }
 340     }
 341   }
 342 };
 343 
 344 // (1) If orig_obj has not been archived yet, archive it.
 345 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 346 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 347 // (3) Record the klasses of all orig_obj and all reachable objects.
 348 oop HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, oop orig_obj, TRAPS) {
 349   assert(orig_obj != NULL, "must be");
 350   assert(!MetaspaceShared::is_archive_object(orig_obj), "sanity");
 351 
 352   // java.lang.Class instances cannot be included in an archived
 353   // object sub-graph.
 354   if (java_lang_Class::is_instance(orig_obj)) {
 355     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 356     vm_exit(1);
 357   }
 358 
 359   oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
 360   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 361     // To save time, don't walk strings that are already archived. They just contain
 362     // pointers to a type array, whose klass doesn't need to be recorded.
 363     return archived_obj;
 364   }
 365 
 366   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 367     // orig_obj has already been archived and traced. Nothing more to do.
 368     return archived_obj;
 369   } else {
 370     set_has_been_seen_during_subgraph_recording(orig_obj);
 371   }
 372 
 373   bool record_klasses_only = (archived_obj != NULL);
 374   if (archived_obj == NULL) {
 375     ++_num_new_archived_objs;
 376     archived_obj = MetaspaceShared::archive_heap_object(orig_obj, THREAD);
 377     if (archived_obj == NULL) {
 378       // Skip archiving the sub-graph referenced from the current entry field.
 379       ResourceMark rm;
 380       log_error(cds, heap)(
 381         "Cannot archive the sub-graph referenced from %s object ("
 382         PTR_FORMAT ") size %d, skipped.",
 383         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 384       if (level == 1) {
 385         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 386         // as the Java code will take care of initializing this field dynamically.
 387         return NULL;
 388       } else {
 389         // We don't know how to handle an object that has been archived, but some of its reachable
 390         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 391         // we have a real use case.
 392         vm_exit(1);
 393       }
 394     }
 395   }
 396 
 397   assert(archived_obj != NULL, "must be");
 398   Klass *orig_k = orig_obj->klass();
 399   Klass *relocated_k = archived_obj->klass();
 400   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 401 
 402   WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj, archived_obj, THREAD);
 403   orig_obj->oop_iterate(&walker);
 404   return archived_obj;
 405 }
 406 
 407 //
 408 // Start from the given static field in a java mirror and archive the
 409 // complete sub-graph of java heap objects that are reached directly
 410 // or indirectly from the starting object by following references.
 411 // Sub-graph archiving restrictions (current):
 412 //
 413 // - All classes of objects in the archived sub-graph (including the
 414 //   entry class) must be boot class only.
 415 // - No java.lang.Class instance (java mirror) can be included inside
 416 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 417 //
 418 // The Java heap object sub-graph archiving process (see
 419 // WalkOopAndArchiveClosure):
 420 //
 421 // 1) Java object sub-graph archiving starts from a given static field
 422 // within a Class instance (java mirror). If the static field is a
 423 // refererence field and points to a non-null java object, proceed to
 424 // the next step.
 425 //
 426 // 2) Archives the referenced java object. If an archived copy of the
 427 // current object already exists, updates the pointer in the archived
 428 // copy of the referencing object to point to the current archived object.
 429 // Otherwise, proceed to the next step.
 430 //
 431 // 3) Follows all references within the current java object and recursively
 432 // archive the sub-graph of objects starting from each reference.
 433 //
 434 // 4) Updates the pointer in the archived copy of referencing object to
 435 // point to the current archived object.
 436 //
 437 // 5) The Klass of the current java object is added to the list of Klasses
 438 // for loading and initialzing before any object in the archived graph can
 439 // be accessed at runtime.
 440 //
 441 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 442                                                              const char* klass_name,
 443                                                              int field_offset,
 444                                                              const char* field_name,
 445                                                              TRAPS) {
 446   assert(DumpSharedSpaces, "dump time only");
 447   assert(k->is_shared_boot_class(), "must be boot class");
 448 
 449   oop m = k->java_mirror();
 450   oop archived_m = MetaspaceShared::find_archived_heap_object(m);
 451   if (CompressedOops::is_null(archived_m)) {
 452     return;
 453   }
 454 
 455   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 456   oop f = m->obj_field(field_offset);
 457 
 458   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 459 
 460   if (!CompressedOops::is_null(f)) {
 461     if (log_is_enabled(Trace, cds, heap)) {
 462       LogTarget(Trace, cds, heap) log;
 463       LogStream out(log);
 464       f->print_on(&out);
 465     }
 466 
 467     oop af = archive_reachable_objects_from(1, subgraph_info, f, CHECK);
 468 
 469     if (af == NULL) {
 470       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 471                            klass_name, field_name);
 472     } else {
 473       // Note: the field value is not preserved in the archived mirror.
 474       // Record the field as a new subGraph entry point. The recorded
 475       // information is restored from the archive at runtime.
 476       subgraph_info->add_subgraph_entry_field(field_offset, af);
 477       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 478     }
 479   } else {
 480     // The field contains null, we still need to record the entry point,
 481     // so it can be restored at runtime.
 482     subgraph_info->add_subgraph_entry_field(field_offset, NULL);
 483   }
 484 }
 485 
 486 #ifndef PRODUCT
 487 class VerifySharedOopClosure: public BasicOopIterateClosure {
 488  private:
 489   bool _is_archived;
 490 
 491  public:
 492   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 493 
 494   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 495   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 496 
 497  protected:
 498   template <class T> void do_oop_work(T *p) {
 499     oop obj = RawAccess<>::oop_load(p);
 500     if (!CompressedOops::is_null(obj)) {
 501       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 502     }
 503   }
 504 };
 505 
 506 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 507   assert(DumpSharedSpaces, "dump time only");
 508   assert(k->is_shared_boot_class(), "must be boot class");
 509 
 510   oop m = k->java_mirror();
 511   oop archived_m = MetaspaceShared::find_archived_heap_object(m);
 512   if (CompressedOops::is_null(archived_m)) {
 513     return;
 514   }
 515   oop f = m->obj_field(field_offset);
 516   if (!CompressedOops::is_null(f)) {
 517     verify_subgraph_from(f);
 518   }
 519 }
 520 
 521 void HeapShared::verify_subgraph_from(oop orig_obj) {
 522   oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
 523   if (archived_obj == NULL) {
 524     // It's OK for the root of a subgraph to be not archived. See comments in
 525     // archive_reachable_objects_from().
 526     return;
 527   }
 528 
 529   // Verify that all objects reachable from orig_obj are archived.
 530   init_seen_objects_table();
 531   verify_reachable_objects_from(orig_obj, false);
 532   delete_seen_objects_table();
 533 
 534   // Note: we could also verify that all objects reachable from the archived
 535   // copy of orig_obj can only point to archived objects, with:
 536   //      init_seen_objects_table();
 537   //      verify_reachable_objects_from(archived_obj, true);
 538   //      init_seen_objects_table();
 539   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 540   // won't do it here.
 541 }
 542 
 543 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 544   _num_total_verifications ++;
 545   if (!has_been_seen_during_subgraph_recording(obj)) {
 546     set_has_been_seen_during_subgraph_recording(obj);
 547 
 548     if (is_archived) {
 549       assert(MetaspaceShared::is_archive_object(obj), "must be");
 550       assert(MetaspaceShared::find_archived_heap_object(obj) == NULL, "must be");
 551     } else {
 552       assert(!MetaspaceShared::is_archive_object(obj), "must be");
 553       assert(MetaspaceShared::find_archived_heap_object(obj) != NULL, "must be");
 554     }
 555 
 556     VerifySharedOopClosure walker(is_archived);
 557     obj->oop_iterate(&walker);
 558   }
 559 }
 560 #endif
 561 
 562 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 563 int HeapShared::_num_new_walked_objs;
 564 int HeapShared::_num_new_archived_objs;
 565 int HeapShared::_num_old_recorded_klasses;
 566 
 567 int HeapShared::_num_total_subgraph_recordings = 0;
 568 int HeapShared::_num_total_walked_objs = 0;
 569 int HeapShared::_num_total_archived_objs = 0;
 570 int HeapShared::_num_total_recorded_klasses = 0;
 571 int HeapShared::_num_total_verifications = 0;
 572 
 573 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 574   return _seen_objects_table->get(obj) != NULL;
 575 }
 576 
 577 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 578   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 579   _seen_objects_table->put(obj, true);
 580   ++ _num_new_walked_objs;
 581 }
 582 
 583 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 584   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 585   init_seen_objects_table();
 586   _num_new_walked_objs = 0;
 587   _num_new_archived_objs = 0;
 588   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 589 }
 590 
 591 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 592   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 593     _num_old_recorded_klasses;
 594   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 595                       "walked %d objs, archived %d new objs, recorded %d classes",
 596                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 597                       num_new_recorded_klasses);
 598 
 599   delete_seen_objects_table();
 600 
 601   _num_total_subgraph_recordings ++;
 602   _num_total_walked_objs      += _num_new_walked_objs;
 603   _num_total_archived_objs    += _num_new_archived_objs;
 604   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 605 }
 606 
 607 struct ArchivableStaticFieldInfo {
 608   const char* klass_name;
 609   const char* field_name;
 610   InstanceKlass* klass;
 611   int offset;
 612   BasicType type;
 613 };
 614 
 615 // If you add new entries to this table, you should know what you're doing!
 616 static ArchivableStaticFieldInfo archivable_static_fields[] = {
 617   {"jdk/internal/module/ArchivedModuleGraph",  "archivedSystemModules"},
 618   {"jdk/internal/module/ArchivedModuleGraph",  "archivedModuleFinder"},
 619   {"jdk/internal/module/ArchivedModuleGraph",  "archivedMainModule"},
 620   {"jdk/internal/module/ArchivedModuleGraph",  "archivedConfiguration"},
 621   {"java/util/ImmutableCollections$ListN",     "EMPTY_LIST"},
 622   {"java/util/ImmutableCollections$MapN",      "EMPTY_MAP"},
 623   {"java/util/ImmutableCollections$SetN",      "EMPTY_SET"},
 624   {"java/lang/Integer$IntegerCache",           "archivedCache"},
 625   {"java/lang/module/Configuration",           "EMPTY_CONFIGURATION"},
 626 };
 627 
 628 const static int num_archivable_static_fields =
 629   sizeof(archivable_static_fields) / sizeof(ArchivableStaticFieldInfo);
 630 
 631 class ArchivableStaticFieldFinder: public FieldClosure {
 632   InstanceKlass* _ik;
 633   Symbol* _field_name;
 634   bool _found;
 635   int _offset;
 636 public:
 637   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 638     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 639 
 640   virtual void do_field(fieldDescriptor* fd) {
 641     if (fd->name() == _field_name) {
 642       assert(!_found, "fields cannot be overloaded");
 643       assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields");
 644       _found = true;
 645       _offset = fd->offset();
 646     }
 647   }
 648   bool found()     { return _found;  }
 649   int offset()     { return _offset; }
 650 };
 651 
 652 void HeapShared::init_archivable_static_fields(Thread* THREAD) {
 653   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
 654 
 655   for (int i = 0; i < num_archivable_static_fields; i++) {
 656     ArchivableStaticFieldInfo* info = &archivable_static_fields[i];
 657     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name, THREAD);
 658     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name, THREAD);
 659 
 660     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
 661     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
 662     InstanceKlass* ik = InstanceKlass::cast(k);
 663 
 664     ArchivableStaticFieldFinder finder(ik, field_name);
 665     ik->do_local_static_fields(&finder);
 666     assert(finder.found(), "field must exist");
 667 
 668     info->klass = ik;
 669     info->offset = finder.offset();
 670   }
 671 }
 672 
 673 void HeapShared::archive_static_fields(Thread* THREAD) {
 674   // For each class X that has one or more archived fields:
 675   // [1] Dump the subgraph of each archived field
 676   // [2] Create a list of all the class of the objects that can be reached
 677   //     by any of these static fields.
 678   //     At runtime, these classes are initialized before X's archived fields
 679   //     are restored by HeapShared::initialize_from_archived_subgraph().
 680   int i;
 681   for (i = 0; i < num_archivable_static_fields; ) {
 682     ArchivableStaticFieldInfo* info = &archivable_static_fields[i];
 683     const char* klass_name = info->klass_name;
 684     start_recording_subgraph(info->klass, klass_name);
 685 
 686     // If you have specified consecutive fields of the same klass in
 687     // archivable_static_fields[], these will be archived in the same
 688     // {start_recording_subgraph ... done_recording_subgraph} pass to
 689     // save time.
 690     for (; i < num_archivable_static_fields; i++) {
 691       ArchivableStaticFieldInfo* f = &archivable_static_fields[i];
 692       if (f->klass_name != klass_name) {
 693         break;
 694       }
 695       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
 696                                                   f->offset, f->field_name, CHECK);
 697     }
 698     done_recording_subgraph(info->klass, klass_name);
 699   }
 700 
 701   log_info(cds, heap)("Performed subgraph records = %d times", _num_total_subgraph_recordings);
 702   log_info(cds, heap)("Walked %d objects", _num_total_walked_objs);
 703   log_info(cds, heap)("Archived %d objects", _num_total_archived_objs);
 704   log_info(cds, heap)("Recorded %d klasses", _num_total_recorded_klasses);
 705 
 706 
 707 #ifndef PRODUCT
 708   for (int i = 0; i < num_archivable_static_fields; i++) {
 709     ArchivableStaticFieldInfo* f = &archivable_static_fields[i];
 710     verify_subgraph_from_static_field(f->klass, f->offset);
 711   }
 712   log_info(cds, heap)("Verified %d references", _num_total_verifications);
 713 #endif
 714 }
 715 
 716 // At dump-time, find the location of all the non-null oop pointers in an archived heap
 717 // region. This way we can quickly relocate all the pointers without using
 718 // BasicOopIterateClosure at runtime.
 719 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
 720   narrowOop* _start;
 721   BitMap *_oopmap;
 722   int _num_total_oops;
 723   int _num_null_oops;
 724  public:
 725   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
 726     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
 727 
 728   virtual bool should_verify_oops(void) {
 729     return false;
 730   }
 731   virtual void do_oop(narrowOop* p) {
 732     _num_total_oops ++;
 733     narrowOop v = *p;
 734     if (!CompressedOops::is_null(v)) {
 735       size_t idx = p - _start;
 736       _oopmap->set_bit(idx);
 737     } else {
 738       _num_null_oops ++;
 739     }
 740   }
 741   virtual void do_oop(oop *p) {
 742     ShouldNotReachHere();
 743   }
 744   int num_total_oops() const { return _num_total_oops; }
 745   int num_null_oops()  const { return _num_null_oops; }
 746 };
 747 
 748 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
 749   assert(UseCompressedOops, "must be");
 750   size_t num_bits = region.byte_size() / sizeof(narrowOop);
 751   ResourceBitMap oopmap(num_bits);
 752 
 753   HeapWord* p   = region.start();
 754   HeapWord* end = region.end();
 755   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
 756 
 757   int num_objs = 0;
 758   while (p < end) {
 759     oop o = (oop)p;
 760     o->oop_iterate(&finder);
 761     p += o->size();
 762     ++ num_objs;
 763   }
 764 
 765   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
 766                       num_objs, finder.num_total_oops(), finder.num_null_oops());
 767   return oopmap;
 768 }
 769 
 770 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 771   _narrow_oop_base = base;
 772   _narrow_oop_shift = shift;
 773 }
 774 
 775 // Patch all the embedded oop pointers inside an archived heap region,
 776 // to be consistent with the runtime oop encoding.
 777 class PatchEmbeddedPointers: public BitMapClosure {
 778   narrowOop* _start;
 779 
 780  public:
 781   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
 782 
 783   bool do_bit(size_t offset) {
 784     narrowOop* p = _start + offset;
 785     narrowOop v = *p;
 786     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
 787     oop o = HeapShared::decode_from_archive(v);
 788     RawAccess<IS_NOT_NULL>::oop_store(p, o);
 789     return true;
 790   }
 791 };
 792 
 793 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
 794                                                        size_t oopmap_size_in_bits) {
 795   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
 796 
 797 #ifndef PRODUCT
 798   ResourceMark rm;
 799   ResourceBitMap checkBm = calculate_oopmap(region);
 800   assert(bm.is_same(checkBm), "sanity");
 801 #endif
 802 
 803   PatchEmbeddedPointers patcher((narrowOop*)region.start());
 804   bm.iterate(&patcher);
 805 }
 806 
 807 #endif // INCLUDE_CDS_JAVA_HEAP