1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logMessage.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/heapShared.inline.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/metaspaceShared.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/fieldDescriptor.inline.hpp"
  41 #include "utilities/bitMap.inline.hpp"
  42 
  43 #if INCLUDE_CDS_JAVA_HEAP
  44 KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL;
  45 int HeapShared::_num_archived_subgraph_info_records = 0;
  46 Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL;
  47 
  48 KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) {
  49   KlassSubGraphInfo* info = _subgraph_info_list;
  50   while (info != NULL) {
  51     if (info->klass() == k) {
  52       return info;
  53     }
  54     info = info->next();
  55   }
  56   return NULL;
  57 }
  58 
  59 // Get the subgraph_info for Klass k. A new subgraph_info is created if
  60 // there is no existing one for k. The subgraph_info records the relocated
  61 // Klass* of the original k.
  62 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
  63   Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
  64   KlassSubGraphInfo* info = find_subgraph_info(relocated_k);
  65   if (info != NULL) {
  66     return info;
  67   }
  68 
  69   info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list);
  70   _subgraph_info_list = info;
  71   return info;
  72 }
  73 
  74 address   HeapShared::_narrow_oop_base;
  75 int       HeapShared::_narrow_oop_shift;
  76 
  77 int HeapShared::num_of_subgraph_infos() {
  78   int num = 0;
  79   KlassSubGraphInfo* info = _subgraph_info_list;
  80   while (info != NULL) {
  81     num ++;
  82     info = info->next();
  83   }
  84   return num;
  85 }
  86 
  87 // Add an entry field to the current KlassSubGraphInfo.
  88 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
  89   assert(DumpSharedSpaces, "dump time only");
  90   if (_subgraph_entry_fields == NULL) {
  91     _subgraph_entry_fields =
  92       new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
  93   }
  94   _subgraph_entry_fields->append((juint)static_field_offset);
  95   _subgraph_entry_fields->append(CompressedOops::encode(v));
  96 }
  97 
  98 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
  99 // Only objects of boot classes can be included in sub-graph.
 100 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
 101   assert(DumpSharedSpaces, "dump time only");
 102   assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
 103          "must be the relocated Klass in the shared space");
 104 
 105   if (_subgraph_object_klasses == NULL) {
 106     _subgraph_object_klasses =
 107       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
 108   }
 109 
 110   assert(relocated_k->is_shared(), "must be a shared class");
 111 
 112   if (_k == relocated_k) {
 113     // Don't add the Klass containing the sub-graph to it's own klass
 114     // initialization list.
 115     return;
 116   }
 117 
 118   if (relocated_k->is_instance_klass()) {
 119     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 120           "must be boot class");
 121     // SystemDictionary::xxx_klass() are not updated, need to check
 122     // the original Klass*
 123     if (orig_k == SystemDictionary::String_klass() ||
 124         orig_k == SystemDictionary::Object_klass()) {
 125       // Initialized early during VM initialization. No need to be added
 126       // to the sub-graph object class list.
 127       return;
 128     }
 129   } else if (relocated_k->is_objArray_klass()) {
 130     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 131     if (abk->is_instance_klass()) {
 132       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 133             "must be boot class");
 134     }
 135     if (relocated_k == Universe::objectArrayKlassObj()) {
 136       // Initialized early during Universe::genesis. No need to be added
 137       // to the list.
 138       return;
 139     }
 140   } else {
 141     assert(relocated_k->is_typeArray_klass(), "must be");
 142     // Primitive type arrays are created early during Universe::genesis.
 143     return;
 144   }
 145 
 146   if (log_is_enabled(Debug, cds, heap)) {
 147     if (!_subgraph_object_klasses->contains(relocated_k)) {
 148       ResourceMark rm;
 149       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 150     }
 151   }
 152 
 153   _subgraph_object_klasses->append_if_missing(relocated_k);
 154 }
 155 
 156 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 157 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 158   _k = info->klass();
 159   _next = NULL;
 160   _entry_field_records = NULL;
 161   _subgraph_object_klasses = NULL;
 162 
 163   // populate the entry fields
 164   GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
 165   if (entry_fields != NULL) {
 166     int num_entry_fields = entry_fields->length();
 167     assert(num_entry_fields % 2 == 0, "sanity");
 168     _entry_field_records =
 169       MetaspaceShared::new_ro_array<juint>(num_entry_fields);
 170     for (int i = 0 ; i < num_entry_fields; i++) {
 171       _entry_field_records->at_put(i, entry_fields->at(i));
 172     }
 173   }
 174 
 175   // the Klasses of the objects in the sub-graphs
 176   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 177   if (subgraph_object_klasses != NULL) {
 178     int num_subgraphs_klasses = subgraph_object_klasses->length();
 179     _subgraph_object_klasses =
 180       MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
 181     for (int i = 0; i < num_subgraphs_klasses; i++) {
 182       Klass* subgraph_k = subgraph_object_klasses->at(i);
 183       if (log_is_enabled(Info, cds, heap)) {
 184         ResourceMark rm;
 185         log_info(cds, heap)(
 186           "Archived object klass %s (%2d) => %s",
 187           _k->external_name(), i, subgraph_k->external_name());
 188       }
 189       _subgraph_object_klasses->at_put(i, subgraph_k);
 190     }
 191   }
 192 }
 193 
 194 // Build the records of archived subgraph infos, which include:
 195 // - Entry points to all subgraphs from the containing class mirror. The entry
 196 //   points are static fields in the mirror. For each entry point, the field
 197 //   offset and value are recorded in the sub-graph info. The value are stored
 198 //   back to the corresponding field at runtime.
 199 // - A list of klasses that need to be loaded/initialized before archived
 200 //   java object sub-graph can be accessed at runtime.
 201 //
 202 // The records are saved in the archive file and reloaded at runtime.
 203 //
 204 // Layout of the archived subgraph info records:
 205 //
 206 // records_size | num_records | records*
 207 // ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_object_klasses
 208 size_t HeapShared::build_archived_subgraph_info_records(int num_records) {
 209   // remember the start address
 210   char* start_p = MetaspaceShared::read_only_space_top();
 211 
 212   // now populate the archived subgraph infos, which will be saved in the
 213   // archive file
 214   _archived_subgraph_info_records =
 215     MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records);
 216   KlassSubGraphInfo* info = _subgraph_info_list;
 217   int i = 0;
 218   while (info != NULL) {
 219     assert(i < _archived_subgraph_info_records->length(), "sanity");
 220     ArchivedKlassSubGraphInfoRecord* record =
 221       _archived_subgraph_info_records->adr_at(i);
 222     record->init(info);
 223     info = info->next();
 224     i ++;
 225   }
 226 
 227   // _subgraph_info_list is no longer needed
 228   delete _subgraph_info_list;
 229   _subgraph_info_list = NULL;
 230 
 231   char* end_p = MetaspaceShared::read_only_space_top();
 232   size_t records_size = end_p - start_p;
 233   return records_size;
 234 }
 235 
 236 // Write the subgraph info records in the shared _ro region
 237 void HeapShared::write_archived_subgraph_infos() {
 238   assert(DumpSharedSpaces, "dump time only");
 239 
 240   Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3);
 241 
 242   _num_archived_subgraph_info_records = num_of_subgraph_infos();
 243   size_t records_size = build_archived_subgraph_info_records(
 244                              _num_archived_subgraph_info_records);
 245 
 246   // Now write the header information:
 247   // records_size, num_records, _archived_subgraph_info_records
 248   assert(records_header != NULL, "sanity");
 249   intptr_t* p = (intptr_t*)(records_header->data());
 250   *p = (intptr_t)records_size;
 251   p ++;
 252   *p = (intptr_t)_num_archived_subgraph_info_records;
 253   p ++;
 254   *p = (intptr_t)_archived_subgraph_info_records;
 255 }
 256 
 257 char* HeapShared::read_archived_subgraph_infos(char* buffer) {
 258   Array<intptr_t>* records_header = (Array<intptr_t>*)buffer;
 259   intptr_t* p = (intptr_t*)(records_header->data());
 260   size_t records_size = (size_t)(*p);
 261   p ++;
 262   _num_archived_subgraph_info_records = *p;
 263   p ++;
 264   _archived_subgraph_info_records =
 265     (Array<ArchivedKlassSubGraphInfoRecord>*)(*p);
 266 
 267   buffer = (char*)_archived_subgraph_info_records + records_size;
 268   return buffer;
 269 }
 270 
 271 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
 272   if (!MetaspaceShared::open_archive_heap_region_mapped()) {
 273     return; // nothing to do
 274   }
 275 
 276   if (_num_archived_subgraph_info_records == 0) {
 277     return; // no subgraph info records
 278   }
 279 
 280   // Initialize from archived data. Currently this is done only
 281   // during VM initialization time. No lock is needed.
 282   Thread* THREAD = Thread::current();
 283   for (int i = 0; i < _archived_subgraph_info_records->length(); i++) {
 284     ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i);
 285     if (record->klass() == k) {
 286       int i;
 287       // Found the archived subgraph info record for the requesting klass.
 288       // Load/link/initialize the klasses of the objects in the subgraph.
 289       // NULL class loader is used.
 290       Array<Klass*>* klasses = record->subgraph_object_klasses();
 291       if (klasses != NULL) {
 292         for (i = 0; i < klasses->length(); i++) {
 293           Klass* obj_k = klasses->at(i);
 294           Klass* resolved_k = SystemDictionary::resolve_or_null(
 295                                                 (obj_k)->name(), THREAD);
 296           if (resolved_k != obj_k) {
 297             return;
 298           }
 299           if ((obj_k)->is_instance_klass()) {
 300             InstanceKlass* ik = InstanceKlass::cast(obj_k);
 301             ik->initialize(THREAD);
 302           } else if ((obj_k)->is_objArray_klass()) {
 303             ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
 304             oak->initialize(THREAD);
 305           }
 306         }
 307       }
 308 
 309       if (HAS_PENDING_EXCEPTION) {
 310         CLEAR_PENDING_EXCEPTION;
 311         // None of the field value will be set if there was an exception.
 312         // The java code will not see any of the archived objects in the
 313         // subgraphs referenced from k in this case.
 314         return;
 315       }
 316 
 317       // Load the subgraph entry fields from the record and store them back to
 318       // the corresponding fields within the mirror.
 319       oop m = k->java_mirror();
 320       Array<juint>* entry_field_records = record->entry_field_records();
 321       if (entry_field_records != NULL) {
 322         int efr_len = entry_field_records->length();
 323         assert(efr_len % 2 == 0, "sanity");
 324         for (i = 0; i < efr_len;) {
 325           int field_offset = entry_field_records->at(i);
 326           // The object refereced by the field becomes 'known' by GC from this
 327           // point. All objects in the subgraph reachable from the object are
 328           // also 'known' by GC.
 329           oop v = MetaspaceShared::materialize_archived_object(
 330             entry_field_records->at(i+1));
 331           m->obj_field_put(field_offset, v);
 332           i += 2;
 333         }
 334       }
 335 
 336       // Done. Java code can see the archived sub-graphs referenced from k's
 337       // mirror after this point.
 338       return;
 339     }
 340   }
 341 }
 342 
 343 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 344   int _level;
 345   bool _record_klasses_only;
 346   KlassSubGraphInfo* _subgraph_info;
 347   oop _orig_referencing_obj;
 348   oop _archived_referencing_obj;
 349   Thread* _thread;
 350  public:
 351   WalkOopAndArchiveClosure(int level, bool record_klasses_only,
 352                            KlassSubGraphInfo* subgraph_info,
 353                            oop orig, oop archived, TRAPS) :
 354     _level(level), _record_klasses_only(record_klasses_only),
 355     _subgraph_info(subgraph_info),
 356     _orig_referencing_obj(orig), _archived_referencing_obj(archived),
 357     _thread(THREAD) {}
 358   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 359   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 360 
 361  protected:
 362   template <class T> void do_oop_work(T *p) {
 363     oop obj = RawAccess<>::oop_load(p);
 364     if (!CompressedOops::is_null(obj)) {
 365       assert(!MetaspaceShared::is_archive_object(obj),
 366              "original objects must not point to archived objects");
 367 
 368       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 369       T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
 370       Thread* THREAD = _thread;
 371 
 372       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 373         ResourceMark rm;
 374         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
 375                              _orig_referencing_obj->klass()->external_name(), field_delta,
 376                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 377         LogTarget(Trace, cds, heap) log;
 378         LogStream out(log);
 379         obj->print_on(&out);
 380       }
 381 
 382       oop archived = HeapShared::archive_reachable_objects_from(_level + 1, _subgraph_info, obj, THREAD);
 383       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 384       assert(MetaspaceShared::is_archive_object(archived), "must be");
 385 
 386       if (!_record_klasses_only) {
 387         // Update the reference in the archived copy of the referencing object.
 388         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 389                              _level, p2i(new_p), p2i(obj), p2i(archived));
 390         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 391       }
 392     }
 393   }
 394 };
 395 
 396 // (1) If orig_obj has not been archived yet, archive it.
 397 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 398 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 399 // (3) Record the klasses of all orig_obj and all reachable objects.
 400 oop HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, oop orig_obj, TRAPS) {
 401   assert(orig_obj != NULL, "must be");
 402   assert(!MetaspaceShared::is_archive_object(orig_obj), "sanity");
 403 
 404   // java.lang.Class instances cannot be included in an archived
 405   // object sub-graph.
 406   if (java_lang_Class::is_instance(orig_obj)) {
 407     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
 408     vm_exit(1);
 409   }
 410 
 411   oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
 412   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
 413     // To save time, don't walk strings that are already archived. They just contain
 414     // pointers to a type array, whose klass doesn't need to be recorded.
 415     return archived_obj;
 416   }
 417 
 418   if (has_been_seen_during_subgraph_recording(orig_obj)) {
 419     // orig_obj has already been archived and traced. Nothing more to do.
 420     return archived_obj;
 421   } else {
 422     set_has_been_seen_during_subgraph_recording(orig_obj);
 423   }
 424 
 425   bool record_klasses_only = (archived_obj != NULL);
 426   if (archived_obj == NULL) {
 427     ++_num_new_archived_objs;
 428     archived_obj = MetaspaceShared::archive_heap_object(orig_obj, THREAD);
 429     if (archived_obj == NULL) {
 430       // Skip archiving the sub-graph referenced from the current entry field.
 431       ResourceMark rm;
 432       log_error(cds, heap)(
 433         "Cannot archive the sub-graph referenced from %s object ("
 434         PTR_FORMAT ") size %d, skipped.",
 435         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
 436       if (level == 1) {
 437         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
 438         // as the Java code will take care of initializing this field dynamically.
 439         return NULL;
 440       } else {
 441         // We don't know how to handle an object that has been archived, but some of its reachable
 442         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
 443         // we have a real use case.
 444         vm_exit(1);
 445       }
 446     }
 447   }
 448 
 449   assert(archived_obj != NULL, "must be");
 450   Klass *orig_k = orig_obj->klass();
 451   Klass *relocated_k = archived_obj->klass();
 452   subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
 453 
 454   WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj, archived_obj, THREAD);
 455   orig_obj->oop_iterate(&walker);
 456   return archived_obj;
 457 }
 458 
 459 //
 460 // Start from the given static field in a java mirror and archive the
 461 // complete sub-graph of java heap objects that are reached directly
 462 // or indirectly from the starting object by following references.
 463 // Sub-graph archiving restrictions (current):
 464 //
 465 // - All classes of objects in the archived sub-graph (including the
 466 //   entry class) must be boot class only.
 467 // - No java.lang.Class instance (java mirror) can be included inside
 468 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
 469 //
 470 // The Java heap object sub-graph archiving process (see
 471 // WalkOopAndArchiveClosure):
 472 //
 473 // 1) Java object sub-graph archiving starts from a given static field
 474 // within a Class instance (java mirror). If the static field is a
 475 // refererence field and points to a non-null java object, proceed to
 476 // the next step.
 477 //
 478 // 2) Archives the referenced java object. If an archived copy of the
 479 // current object already exists, updates the pointer in the archived
 480 // copy of the referencing object to point to the current archived object.
 481 // Otherwise, proceed to the next step.
 482 //
 483 // 3) Follows all references within the current java object and recursively
 484 // archive the sub-graph of objects starting from each reference.
 485 //
 486 // 4) Updates the pointer in the archived copy of referencing object to
 487 // point to the current archived object.
 488 //
 489 // 5) The Klass of the current java object is added to the list of Klasses
 490 // for loading and initialzing before any object in the archived graph can
 491 // be accessed at runtime.
 492 //
 493 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
 494                                                              const char* klass_name,
 495                                                              int field_offset,
 496                                                              const char* field_name,
 497                                                              TRAPS) {
 498   assert(DumpSharedSpaces, "dump time only");
 499   assert(k->is_shared_boot_class(), "must be boot class");
 500 
 501   oop m = k->java_mirror();
 502   oop archived_m = MetaspaceShared::find_archived_heap_object(m);
 503   if (CompressedOops::is_null(archived_m)) {
 504     return;
 505   }
 506 
 507   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
 508   oop f = m->obj_field(field_offset);
 509 
 510   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
 511 
 512   if (!CompressedOops::is_null(f)) {
 513     if (log_is_enabled(Trace, cds, heap)) {
 514       LogTarget(Trace, cds, heap) log;
 515       LogStream out(log);
 516       f->print_on(&out);
 517     }
 518 
 519     oop af = archive_reachable_objects_from(1, subgraph_info, f, CHECK);
 520 
 521     if (af == NULL) {
 522       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
 523                            klass_name, field_name);
 524     } else {
 525       // Note: the field value is not preserved in the archived mirror.
 526       // Record the field as a new subGraph entry point. The recorded
 527       // information is restored from the archive at runtime.
 528       subgraph_info->add_subgraph_entry_field(field_offset, af);
 529       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
 530     }
 531   } else {
 532     // The field contains null, we still need to record the entry point,
 533     // so it can be restored at runtime.
 534     subgraph_info->add_subgraph_entry_field(field_offset, NULL);
 535   }
 536 }
 537 
 538 #ifndef PRODUCT
 539 class VerifySharedOopClosure: public BasicOopIterateClosure {
 540  private:
 541   bool _is_archived;
 542 
 543  public:
 544   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
 545 
 546   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
 547   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
 548 
 549  protected:
 550   template <class T> void do_oop_work(T *p) {
 551     oop obj = RawAccess<>::oop_load(p);
 552     if (!CompressedOops::is_null(obj)) {
 553       HeapShared::verify_reachable_objects_from(obj, _is_archived);
 554     }
 555   }
 556 };
 557 
 558 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
 559   assert(DumpSharedSpaces, "dump time only");
 560   assert(k->is_shared_boot_class(), "must be boot class");
 561 
 562   oop m = k->java_mirror();
 563   oop archived_m = MetaspaceShared::find_archived_heap_object(m);
 564   if (CompressedOops::is_null(archived_m)) {
 565     return;
 566   }
 567   oop f = m->obj_field(field_offset);
 568   if (!CompressedOops::is_null(f)) {
 569     verify_subgraph_from(f);
 570   }
 571 }
 572 
 573 void HeapShared::verify_subgraph_from(oop orig_obj) {
 574   oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
 575   if (archived_obj == NULL) {
 576     // It's OK for the root of a subgraph to be not archived. See comments in
 577     // archive_reachable_objects_from().
 578     return;
 579   }
 580 
 581   // Verify that all objects reachable from orig_obj are archived.
 582   init_seen_objects_table();
 583   verify_reachable_objects_from(orig_obj, false);
 584   delete_seen_objects_table();
 585 
 586   // Note: we could also verify that all objects reachable from the archived
 587   // copy of orig_obj can only point to archived objects, with:
 588   //      init_seen_objects_table();
 589   //      verify_reachable_objects_from(archived_obj, true);
 590   //      init_seen_objects_table();
 591   // but that's already done in G1HeapVerifier::verify_archive_regions so we
 592   // won't do it here.
 593 }
 594 
 595 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
 596   _num_total_verifications ++;
 597   if (!has_been_seen_during_subgraph_recording(obj)) {
 598     set_has_been_seen_during_subgraph_recording(obj);
 599 
 600     if (is_archived) {
 601       assert(MetaspaceShared::is_archive_object(obj), "must be");
 602       assert(MetaspaceShared::find_archived_heap_object(obj) == NULL, "must be");
 603     } else {
 604       assert(!MetaspaceShared::is_archive_object(obj), "must be");
 605       assert(MetaspaceShared::find_archived_heap_object(obj) != NULL, "must be");
 606     }
 607 
 608     VerifySharedOopClosure walker(is_archived);
 609     obj->oop_iterate(&walker);
 610   }
 611 }
 612 #endif
 613 
 614 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
 615 int HeapShared::_num_new_walked_objs;
 616 int HeapShared::_num_new_archived_objs;
 617 int HeapShared::_num_old_recorded_klasses;
 618 
 619 int HeapShared::_num_total_subgraph_recordings = 0;
 620 int HeapShared::_num_total_walked_objs = 0;
 621 int HeapShared::_num_total_archived_objs = 0;
 622 int HeapShared::_num_total_recorded_klasses = 0;
 623 int HeapShared::_num_total_verifications = 0;
 624 
 625 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
 626   return _seen_objects_table->get(obj) != NULL;
 627 }
 628 
 629 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
 630   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
 631   _seen_objects_table->put(obj, true);
 632   ++ _num_new_walked_objs;
 633 }
 634 
 635 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
 636   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
 637   init_seen_objects_table();
 638   _num_new_walked_objs = 0;
 639   _num_new_archived_objs = 0;
 640   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
 641 }
 642 
 643 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
 644   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
 645     _num_old_recorded_klasses;
 646   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
 647                       "walked %d objs, archived %d new objs, recorded %d classes",
 648                       class_name, _num_new_walked_objs, _num_new_archived_objs,
 649                       num_new_recorded_klasses);
 650 
 651   delete_seen_objects_table();
 652 
 653   _num_total_subgraph_recordings ++;
 654   _num_total_walked_objs      += _num_new_walked_objs;
 655   _num_total_archived_objs    += _num_new_archived_objs;
 656   _num_total_recorded_klasses +=  num_new_recorded_klasses;
 657 }
 658 
 659 struct ArchivableStaticFieldInfo {
 660   const char* klass_name;
 661   const char* field_name;
 662   InstanceKlass* klass;
 663   int offset;
 664   BasicType type;
 665 };
 666 
 667 // If you add new entries to this table, you should know what you're doing!
 668 static ArchivableStaticFieldInfo archivable_static_fields[] = {
 669   {"jdk/internal/module/ArchivedModuleGraph",  "archivedSystemModules"},
 670   {"jdk/internal/module/ArchivedModuleGraph",  "archivedModuleFinder"},
 671   {"jdk/internal/module/ArchivedModuleGraph",  "archivedMainModule"},
 672   {"jdk/internal/module/ArchivedModuleGraph",  "archivedConfiguration"},
 673   {"java/util/ImmutableCollections$ListN",     "EMPTY_LIST"},
 674   {"java/util/ImmutableCollections$MapN",      "EMPTY_MAP"},
 675   {"java/util/ImmutableCollections$SetN",      "EMPTY_SET"},
 676   {"java/lang/Integer$IntegerCache",           "archivedCache"},
 677   {"java/lang/module/Configuration",           "EMPTY_CONFIGURATION"},
 678 };
 679 
 680 const static int num_archivable_static_fields =
 681   sizeof(archivable_static_fields) / sizeof(ArchivableStaticFieldInfo);
 682 
 683 class ArchivableStaticFieldFinder: public FieldClosure {
 684   InstanceKlass* _ik;
 685   Symbol* _field_name;
 686   bool _found;
 687   int _offset;
 688 public:
 689   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
 690     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
 691 
 692   virtual void do_field(fieldDescriptor* fd) {
 693     if (fd->name() == _field_name) {
 694       assert(!_found, "fields cannot be overloaded");
 695       assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields");
 696       _found = true;
 697       _offset = fd->offset();
 698     }
 699   }
 700   bool found()     { return _found;  }
 701   int offset()     { return _offset; }
 702 };
 703 
 704 void HeapShared::init_archivable_static_fields(Thread* THREAD) {
 705   for (int i = 0; i < num_archivable_static_fields; i++) {
 706     ArchivableStaticFieldInfo* info = &archivable_static_fields[i];
 707     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name, THREAD);
 708     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name, THREAD);
 709 
 710     Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD);
 711     assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist");
 712     InstanceKlass* ik = InstanceKlass::cast(k);
 713 
 714     ArchivableStaticFieldFinder finder(ik, field_name);
 715     ik->do_local_static_fields(&finder);
 716     assert(finder.found(), "field must exist");
 717 
 718     info->klass = ik;
 719     info->offset = finder.offset();
 720   }
 721 }
 722 
 723 void HeapShared::archive_static_fields(Thread* THREAD) {
 724   // For each class X that has one or more archived fields:
 725   // [1] Dump the subgraph of each archived field
 726   // [2] Create a list of all the class of the objects that can be reached
 727   //     by any of these static fields.
 728   //     At runtime, these classes are initialized before X's archived fields
 729   //     are restored by HeapShared::initialize_from_archived_subgraph().
 730   int i;
 731   for (i = 0; i < num_archivable_static_fields; ) {
 732     ArchivableStaticFieldInfo* info = &archivable_static_fields[i];
 733     const char* klass_name = info->klass_name;
 734     start_recording_subgraph(info->klass, klass_name);
 735 
 736     // If you have specified consecutive fields of the same klass in
 737     // archivable_static_fields[], these will be archived in the same
 738     // {start_recording_subgraph ... done_recording_subgraph} pass to
 739     // save time.
 740     for (; i < num_archivable_static_fields; i++) {
 741       ArchivableStaticFieldInfo* f = &archivable_static_fields[i];
 742       if (f->klass_name != klass_name) {
 743         break;
 744       }
 745       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
 746                                                   f->offset, f->field_name, CHECK);
 747     }
 748     done_recording_subgraph(info->klass, klass_name);
 749   }
 750 
 751   log_info(cds, heap)("Performed subgraph records = %d times", _num_total_subgraph_recordings);
 752   log_info(cds, heap)("Walked %d objects", _num_total_walked_objs);
 753   log_info(cds, heap)("Archived %d objects", _num_total_archived_objs);
 754   log_info(cds, heap)("Recorded %d klasses", _num_total_recorded_klasses);
 755 
 756 
 757 #ifndef PRODUCT
 758   for (int i = 0; i < num_archivable_static_fields; i++) {
 759     ArchivableStaticFieldInfo* f = &archivable_static_fields[i];
 760     verify_subgraph_from_static_field(f->klass, f->offset);
 761   }
 762   log_info(cds, heap)("Verified %d references", _num_total_verifications);
 763 #endif
 764 }
 765 
 766 // At dump-time, find the location of all the non-null oop pointers in an archived heap
 767 // region. This way we can quickly relocate all the pointers without using
 768 // BasicOopIterateClosure at runtime.
 769 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
 770   narrowOop* _start;
 771   BitMap *_oopmap;
 772   int _num_total_oops;
 773   int _num_null_oops;
 774  public:
 775   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
 776     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
 777 
 778   virtual bool should_verify_oops(void) {
 779     return false;
 780   }
 781   virtual void do_oop(narrowOop* p) {
 782     _num_total_oops ++;
 783     narrowOop v = *p;
 784     if (!CompressedOops::is_null(v)) {
 785       size_t idx = p - _start;
 786       _oopmap->set_bit(idx);
 787     } else {
 788       _num_null_oops ++;
 789     }
 790   }
 791   virtual void do_oop(oop *p) {
 792     ShouldNotReachHere();
 793   }
 794   int num_total_oops() const { return _num_total_oops; }
 795   int num_null_oops()  const { return _num_null_oops; }
 796 };
 797 
 798 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
 799   assert(UseCompressedOops, "must be");
 800   size_t num_bits = region.byte_size() / sizeof(narrowOop);
 801   ResourceBitMap oopmap(num_bits);
 802 
 803   HeapWord* p   = region.start();
 804   HeapWord* end = region.end();
 805   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
 806 
 807   int num_objs = 0;
 808   while (p < end) {
 809     oop o = (oop)p;
 810     o->oop_iterate(&finder);
 811     p += o->size();
 812     ++ num_objs;
 813   }
 814 
 815   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
 816                       num_objs, finder.num_total_oops(), finder.num_null_oops());
 817   return oopmap;
 818 }
 819 
 820 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 821   _narrow_oop_base = base;
 822   _narrow_oop_shift = shift;
 823 }
 824 
 825 // Patch all the embedded oop pointers inside an archived heap region,
 826 // to be consistent with the runtime oop encoding.
 827 class PatchEmbeddedPointers: public BitMapClosure {
 828   narrowOop* _start;
 829 
 830  public:
 831   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
 832 
 833   bool do_bit(size_t offset) {
 834     narrowOop* p = _start + offset;
 835     narrowOop v = *p;
 836     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
 837     oop o = HeapShared::decode_from_archive(v);
 838     RawAccess<IS_NOT_NULL>::oop_store(p, o);
 839     return true;
 840   }
 841 };
 842 
 843 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
 844                                                        size_t oopmap_size_in_bits) {
 845   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
 846 
 847 #ifndef PRODUCT
 848   ResourceMark rm;
 849   ResourceBitMap checkBm = calculate_oopmap(region);
 850   assert(bm.is_same(checkBm), "sanity");
 851 #endif
 852 
 853   PatchEmbeddedPointers patcher((narrowOop*)region.start());
 854   bm.iterate(&patcher);
 855 }
 856 
 857 #endif // INCLUDE_CDS_JAVA_HEAP