1 /* 2 * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/moduleEntry.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/systemDictionaryShared.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logMessage.hpp" 36 #include "logging/logStream.hpp" 37 #include "memory/archiveUtils.hpp" 38 #include "memory/filemap.hpp" 39 #include "memory/heapShared.inline.hpp" 40 #include "memory/iterator.inline.hpp" 41 #include "memory/metadataFactory.hpp" 42 #include "memory/metaspaceClosure.hpp" 43 #include "memory/metaspaceShared.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "oops/compressedOops.inline.hpp" 47 #include "oops/fieldStreams.inline.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/fieldDescriptor.inline.hpp" 50 #include "runtime/javaCalls.hpp" 51 #include "runtime/safepointVerifiers.hpp" 52 #include "utilities/bitMap.inline.hpp" 53 #if INCLUDE_G1GC 54 #include "gc/g1/g1CollectedHeap.hpp" 55 #endif 56 57 #if INCLUDE_CDS_JAVA_HEAP 58 59 bool HeapShared::_closed_archive_heap_region_mapped = false; 60 bool HeapShared::_open_archive_heap_region_mapped = false; 61 bool HeapShared::_archive_heap_region_fixed = false; 62 63 address HeapShared::_narrow_oop_base; 64 int HeapShared::_narrow_oop_shift; 65 66 // 67 // If you add new entries to the following tables, you should know what you're doing! 68 // 69 70 // Entry fields for shareable subgraphs archived in the closed archive heap 71 // region. Warning: Objects in the subgraphs should not have reference fields 72 // assigned at runtime. 73 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 74 {"java/lang/Integer$IntegerCache", 0, "archivedCache"}, 75 {"java/lang/Long$LongCache", 0, "archivedCache"}, 76 {"java/lang/Byte$ByteCache", 0, "archivedCache"}, 77 {"java/lang/Short$ShortCache", 0, "archivedCache"}, 78 {"java/lang/Character$CharacterCache", 0, "archivedCache"}, 79 {"java/util/jar/Attributes$Name", 0, "KNOWN_NAMES"}, 80 {"sun/util/locale/BaseLocale", 0, "constantBaseLocales"}, 81 }; 82 // Entry fields for subgraphs archived in the open archive heap region. 83 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 84 {"jdk/internal/loader/BuiltinClassLoader$ArchivedData", 1, "packageToModule"}, 85 {"jdk/internal/loader/BootLoader$ArchivedData", 1, "servicesCatalog"}, 86 {"jdk/internal/loader/ClassLoaders$ArchivedData", 1, "singleton"}, 87 {"jdk/internal/module/ModuleBootstrap$ArchivedBootLayer", 1, "archivedBootLayer"}, 88 {"jdk/internal/module/ArchivedModuleGraph", 0, "archivedModuleGraph"}, 89 {"java/util/ImmutableCollections", 0, "archivedObjects"}, 90 {"java/lang/Module$ArchivedData", 1, "singleton"}, 91 {"java/lang/module/Configuration", 0, "EMPTY_CONFIGURATION"}, 92 {"jdk/internal/math/FDBigInteger", 0, "archivedCaches"}, 93 }; 94 95 const static int num_closed_archive_subgraph_entry_fields = 96 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 97 const static int num_open_archive_subgraph_entry_fields = 98 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 99 100 //////////////////////////////////////////////////////////////// 101 // 102 // Java heap object archiving support 103 // 104 //////////////////////////////////////////////////////////////// 105 void HeapShared::fixup_mapped_heap_regions() { 106 FileMapInfo *mapinfo = FileMapInfo::current_info(); 107 mapinfo->fixup_mapped_heap_regions(); 108 set_archive_heap_region_fixed(); 109 SystemDictionaryShared::update_archived_mirror_native_pointers(); 110 } 111 112 unsigned HeapShared::oop_hash(oop const& p) { 113 assert(!p->mark().has_bias_pattern(), 114 "this object should never have been locked"); // so identity_hash won't safepoin 115 unsigned hash = (unsigned)p->identity_hash(); 116 return hash; 117 } 118 119 static void reset_states(oop obj, TRAPS) { 120 Handle h_obj(THREAD, obj); 121 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 122 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 123 Symbol* method_sig = vmSymbols::void_method_signature(); 124 125 while (klass != NULL) { 126 Method* method = klass->find_method(method_name, method_sig); 127 if (method != NULL) { 128 assert(method->is_private(), "must be"); 129 if (log_is_enabled(Debug, cds)) { 130 ResourceMark rm(THREAD); 131 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 132 } 133 JavaValue result(T_VOID); 134 JavaCalls::call_special(&result, h_obj, klass, 135 method_name, method_sig, CHECK); 136 } 137 klass = klass->java_super(); 138 } 139 } 140 141 void HeapShared::reset_archived_object_states(TRAPS) { 142 log_debug(cds)("Resetting platform loader"); 143 reset_states(SystemDictionary::java_platform_loader(), THREAD); 144 log_debug(cds)("Resetting system loader"); 145 reset_states(SystemDictionary::java_system_loader(), THREAD); 146 } 147 148 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 149 oop HeapShared::find_archived_heap_object(oop obj) { 150 assert(DumpSharedSpaces, "dump-time only"); 151 ArchivedObjectCache* cache = archived_object_cache(); 152 oop* p = cache->get(obj); 153 if (p != NULL) { 154 return *p; 155 } else { 156 return NULL; 157 } 158 } 159 160 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { 161 assert(DumpSharedSpaces, "dump-time only"); 162 163 oop ao = find_archived_heap_object(obj); 164 if (ao != NULL) { 165 // already archived 166 return ao; 167 } 168 169 int len = obj->size(); 170 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 171 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 172 p2i(obj), (size_t)obj->size()); 173 return NULL; 174 } 175 176 // Pre-compute object identity hash at CDS dump time. 177 obj->identity_hash(); 178 179 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 180 if (archived_oop != NULL) { 181 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 182 MetaspaceShared::relocate_klass_ptr(archived_oop); 183 // Clear age -- it might have been set if a GC happened during -Xshare:dump 184 markWord mark = archived_oop->mark_raw(); 185 mark = mark.set_age(0); 186 archived_oop->set_mark_raw(mark); 187 ArchivedObjectCache* cache = archived_object_cache(); 188 cache->put(obj, archived_oop); 189 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, 190 p2i(obj), p2i(archived_oop)); 191 } else { 192 log_error(cds, heap)( 193 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 194 p2i(obj)); 195 vm_exit(1); 196 } 197 return archived_oop; 198 } 199 200 oop HeapShared::materialize_archived_object(narrowOop v) { 201 assert(archive_heap_region_fixed(), 202 "must be called after archive heap regions are fixed"); 203 if (!CompressedOops::is_null(v)) { 204 oop obj = HeapShared::decode_from_archive(v); 205 return G1CollectedHeap::heap()->materialize_archived_object(obj); 206 } 207 return NULL; 208 } 209 210 void HeapShared::archive_klass_objects(Thread* THREAD) { 211 GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses(); 212 assert(klasses != NULL, "sanity"); 213 for (int i = 0; i < klasses->length(); i++) { 214 Klass* k = klasses->at(i); 215 216 // archive mirror object 217 java_lang_Class::archive_mirror(k, CHECK); 218 219 // archive the resolved_referenes array 220 if (k->is_instance_klass()) { 221 InstanceKlass* ik = InstanceKlass::cast(k); 222 ik->constants()->archive_resolved_references(THREAD); 223 } 224 } 225 } 226 227 void HeapShared::run_full_gc_in_vm_thread() { 228 if (is_heap_object_archiving_allowed()) { 229 // Avoid fragmentation while archiving heap objects. 230 // We do this inside a safepoint, so that no further allocation can happen after GC 231 // has finished. 232 if (GCLocker::is_active()) { 233 // Just checking for safety ... 234 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 235 // has been modified such that JNI code is executed in some clean up threads after 236 // we have finished class loading. 237 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 238 } else { 239 log_info(cds)("Run GC ..."); 240 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 241 log_info(cds)("Run GC done"); 242 } 243 } 244 } 245 246 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed, 247 GrowableArray<MemRegion> *open) { 248 if (!is_heap_object_archiving_allowed()) { 249 log_info(cds)( 250 "Archived java heap is not supported as UseG1GC, " 251 "UseCompressedOops and UseCompressedClassPointers are required." 252 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 253 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 254 BOOL_TO_STR(UseCompressedClassPointers)); 255 return; 256 } 257 258 G1HeapVerifier::verify_ready_for_archiving(); 259 260 { 261 NoSafepointVerifier nsv; 262 263 // Cache for recording where the archived objects are copied to 264 create_archived_object_cache(); 265 266 log_info(cds)("Dumping objects to closed archive heap region ..."); 267 NOT_PRODUCT(StringTable::verify()); 268 copy_closed_archive_heap_objects(closed); 269 270 log_info(cds)("Dumping objects to open archive heap region ..."); 271 copy_open_archive_heap_objects(open); 272 273 ClassLoaderData::init_archived_oops(); 274 275 destroy_archived_object_cache(); 276 } 277 278 G1HeapVerifier::verify_archive_regions(); 279 } 280 281 void HeapShared::copy_closed_archive_heap_objects( 282 GrowableArray<MemRegion> * closed_archive) { 283 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 284 285 Thread* THREAD = Thread::current(); 286 G1CollectedHeap::heap()->begin_archive_alloc_range(); 287 288 // Archive interned string objects 289 StringTable::write_to_archive(); 290 291 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 292 num_closed_archive_subgraph_entry_fields, 293 true /* is_closed_archive */, THREAD); 294 295 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 296 os::vm_allocation_granularity()); 297 } 298 299 void HeapShared::copy_open_archive_heap_objects( 300 GrowableArray<MemRegion> * open_archive) { 301 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 302 303 Thread* THREAD = Thread::current(); 304 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 305 306 java_lang_Class::archive_basic_type_mirrors(THREAD); 307 308 archive_klass_objects(THREAD); 309 310 archive_object_subgraphs(open_archive_subgraph_entry_fields, 311 num_open_archive_subgraph_entry_fields, 312 false /* is_closed_archive */, 313 THREAD); 314 315 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 316 os::vm_allocation_granularity()); 317 } 318 319 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 320 _narrow_oop_base = base; 321 _narrow_oop_shift = shift; 322 } 323 324 // 325 // Subgraph archiving support 326 // 327 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 328 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 329 330 // Get the subgraph_info for Klass k. A new subgraph_info is created if 331 // there is no existing one for k. The subgraph_info records the relocated 332 // Klass* of the original k. 333 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 334 assert(DumpSharedSpaces, "dump time only"); 335 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 336 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 337 if (info == NULL) { 338 _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k)); 339 info = _dump_time_subgraph_info_table->get(relocated_k); 340 ++ _dump_time_subgraph_info_table->_count; 341 } 342 return info; 343 } 344 345 // Add an entry field to the current KlassSubGraphInfo. 346 void KlassSubGraphInfo::add_subgraph_entry_field( 347 int static_field_offset, oop v, bool is_closed_archive) { 348 assert(DumpSharedSpaces, "dump time only"); 349 if (_subgraph_entry_fields == NULL) { 350 _subgraph_entry_fields = 351 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass); 352 } 353 _subgraph_entry_fields->append((juint)static_field_offset); 354 _subgraph_entry_fields->append(CompressedOops::encode(v)); 355 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); 356 } 357 358 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 359 // Only objects of boot classes can be included in sub-graph. 360 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { 361 assert(DumpSharedSpaces, "dump time only"); 362 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 363 "must be the relocated Klass in the shared space"); 364 365 if (_subgraph_object_klasses == NULL) { 366 _subgraph_object_klasses = 367 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 368 } 369 370 assert(relocated_k->is_shared(), "must be a shared class"); 371 372 if (_k == relocated_k) { 373 // Don't add the Klass containing the sub-graph to it's own klass 374 // initialization list. 375 return; 376 } 377 378 if (relocated_k->is_instance_klass()) { 379 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 380 "must be boot class"); 381 // SystemDictionary::xxx_klass() are not updated, need to check 382 // the original Klass* 383 if (orig_k == SystemDictionary::String_klass() || 384 orig_k == SystemDictionary::Object_klass()) { 385 // Initialized early during VM initialization. No need to be added 386 // to the sub-graph object class list. 387 return; 388 } 389 } else if (relocated_k->is_objArray_klass()) { 390 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 391 if (abk->is_instance_klass()) { 392 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 393 "must be boot class"); 394 } 395 if (relocated_k == Universe::objectArrayKlassObj()) { 396 // Initialized early during Universe::genesis. No need to be added 397 // to the list. 398 return; 399 } 400 } else { 401 assert(relocated_k->is_typeArray_klass(), "must be"); 402 // Primitive type arrays are created early during Universe::genesis. 403 return; 404 } 405 406 if (log_is_enabled(Debug, cds, heap)) { 407 if (!_subgraph_object_klasses->contains(relocated_k)) { 408 ResourceMark rm; 409 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 410 } 411 } 412 413 _subgraph_object_klasses->append_if_missing(relocated_k); 414 } 415 416 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 417 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 418 _k = info->klass(); 419 _entry_field_records = NULL; 420 _subgraph_object_klasses = NULL; 421 422 // populate the entry fields 423 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 424 if (entry_fields != NULL) { 425 int num_entry_fields = entry_fields->length(); 426 assert(num_entry_fields % 3 == 0, "sanity"); 427 _entry_field_records = 428 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 429 for (int i = 0 ; i < num_entry_fields; i++) { 430 _entry_field_records->at_put(i, entry_fields->at(i)); 431 } 432 } 433 434 // the Klasses of the objects in the sub-graphs 435 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 436 if (subgraph_object_klasses != NULL) { 437 int num_subgraphs_klasses = subgraph_object_klasses->length(); 438 _subgraph_object_klasses = 439 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 440 for (int i = 0; i < num_subgraphs_klasses; i++) { 441 Klass* subgraph_k = subgraph_object_klasses->at(i); 442 if (log_is_enabled(Info, cds, heap)) { 443 ResourceMark rm; 444 log_info(cds, heap)( 445 "Archived object klass %s (%2d) => %s", 446 _k->external_name(), i, subgraph_k->external_name()); 447 } 448 _subgraph_object_klasses->at_put(i, subgraph_k); 449 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 450 } 451 } 452 453 ArchivePtrMarker::mark_pointer(&_k); 454 ArchivePtrMarker::mark_pointer(&_entry_field_records); 455 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 456 } 457 458 struct CopyKlassSubGraphInfoToArchive : StackObj { 459 CompactHashtableWriter* _writer; 460 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 461 462 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 463 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 464 ArchivedKlassSubGraphInfoRecord* record = 465 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 466 record->init(&info); 467 468 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass); 469 u4 delta = MetaspaceShared::object_delta_u4(record); 470 _writer->add(hash, delta); 471 } 472 return true; // keep on iterating 473 } 474 }; 475 476 // Build the records of archived subgraph infos, which include: 477 // - Entry points to all subgraphs from the containing class mirror. The entry 478 // points are static fields in the mirror. For each entry point, the field 479 // offset, value and is_closed_archive flag are recorded in the sub-graph 480 // info. The value is stored back to the corresponding field at runtime. 481 // - A list of klasses that need to be loaded/initialized before archived 482 // java object sub-graph can be accessed at runtime. 483 void HeapShared::write_subgraph_info_table() { 484 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 485 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 486 CompactHashtableStats stats; 487 488 _run_time_subgraph_info_table.reset(); 489 490 CompactHashtableWriter writer(d_table->_count, &stats); 491 CopyKlassSubGraphInfoToArchive copy(&writer); 492 d_table->iterate(©); 493 494 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 495 } 496 497 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 498 _run_time_subgraph_info_table.serialize_header(soc); 499 } 500 501 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 502 if (!open_archive_heap_region_mapped()) { 503 return; // nothing to do 504 } 505 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 506 507 if (!MetaspaceShared::use_full_module_graph()) { 508 for (int i = 0; i < num_open_archive_subgraph_entry_fields; i++) { 509 const ArchivableStaticFieldInfo* info = &open_archive_subgraph_entry_fields[i]; 510 if (info->full_module_graph_only && k->name()->equals(info->klass_name)) { 511 return; 512 } 513 } 514 } 515 516 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); 517 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 518 519 // Initialize from archived data. Currently this is done only 520 // during VM initialization time. No lock is needed. 521 if (record != NULL) { 522 Thread* THREAD = Thread::current(); 523 524 int i; 525 // Load/link/initialize the klasses of the objects in the subgraph. 526 // NULL class loader is used. 527 Array<Klass*>* klasses = record->subgraph_object_klasses(); 528 if (klasses != NULL) { 529 for (i = 0; i < klasses->length(); i++) { 530 Klass* obj_k = klasses->at(i); 531 Klass* resolved_k = SystemDictionary::resolve_or_null( 532 (obj_k)->name(), THREAD); 533 if (resolved_k != obj_k) { 534 assert(!SystemDictionary::is_well_known_klass(resolved_k), 535 "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook"); 536 ResourceMark rm(THREAD); 537 log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive", 538 resolved_k->external_name()); 539 return; 540 } 541 if ((obj_k)->is_instance_klass()) { 542 InstanceKlass* ik = InstanceKlass::cast(obj_k); 543 ik->initialize(THREAD); 544 } else if ((obj_k)->is_objArray_klass()) { 545 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 546 oak->initialize(THREAD); 547 } 548 } 549 } 550 551 if (HAS_PENDING_EXCEPTION) { 552 CLEAR_PENDING_EXCEPTION; 553 // None of the field value will be set if there was an exception. 554 // The java code will not see any of the archived objects in the 555 // subgraphs referenced from k in this case. 556 return; 557 } 558 559 // Load the subgraph entry fields from the record and store them back to 560 // the corresponding fields within the mirror. 561 oop m = k->java_mirror(); 562 Array<juint>* entry_field_records = record->entry_field_records(); 563 if (entry_field_records != NULL) { 564 int efr_len = entry_field_records->length(); 565 assert(efr_len % 3 == 0, "sanity"); 566 for (i = 0; i < efr_len;) { 567 int field_offset = entry_field_records->at(i); 568 narrowOop nv = entry_field_records->at(i+1); 569 int is_closed_archive = entry_field_records->at(i+2); 570 oop v; 571 if (is_closed_archive == 0) { 572 // It's an archived object in the open archive heap regions, not shared. 573 // The object refereced by the field becomes 'known' by GC from this 574 // point. All objects in the subgraph reachable from the object are 575 // also 'known' by GC. 576 v = materialize_archived_object(nv); 577 } else { 578 // Shared object in the closed archive heap regions. Decode directly. 579 assert(!CompressedOops::is_null(nv), "shared object is null"); 580 v = HeapShared::decode_from_archive(nv); 581 } 582 m->obj_field_put(field_offset, v); 583 i += 3; 584 585 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 586 } 587 588 // Done. Java code can see the archived sub-graphs referenced from k's 589 // mirror after this point. 590 if (log_is_enabled(Info, cds, heap)) { 591 ResourceMark rm; 592 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT, 593 k->external_name(), p2i(k)); 594 } 595 } 596 } 597 } 598 599 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 600 int _level; 601 bool _is_closed_archive; 602 bool _record_klasses_only; 603 KlassSubGraphInfo* _subgraph_info; 604 oop _orig_referencing_obj; 605 oop _archived_referencing_obj; 606 Thread* _thread; 607 public: 608 WalkOopAndArchiveClosure(int level, 609 bool is_closed_archive, 610 bool record_klasses_only, 611 KlassSubGraphInfo* subgraph_info, 612 oop orig, oop archived, TRAPS) : 613 _level(level), _is_closed_archive(is_closed_archive), 614 _record_klasses_only(record_klasses_only), 615 _subgraph_info(subgraph_info), 616 _orig_referencing_obj(orig), _archived_referencing_obj(archived), 617 _thread(THREAD) {} 618 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 619 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 620 621 protected: 622 template <class T> void do_oop_work(T *p) { 623 oop obj = RawAccess<>::oop_load(p); 624 if (!CompressedOops::is_null(obj)) { 625 assert(!HeapShared::is_archived_object(obj), 626 "original objects must not point to archived objects"); 627 628 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 629 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 630 Thread* THREAD = _thread; 631 632 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 633 ResourceMark rm; 634 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 635 _orig_referencing_obj->klass()->external_name(), field_delta, 636 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 637 LogTarget(Trace, cds, heap) log; 638 LogStream out(log); 639 obj->print_on(&out); 640 } 641 642 oop archived = HeapShared::archive_reachable_objects_from( 643 _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD); 644 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 645 assert(HeapShared::is_archived_object(archived), "must be"); 646 647 if (!_record_klasses_only) { 648 // Update the reference in the archived copy of the referencing object. 649 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 650 _level, p2i(new_p), p2i(obj), p2i(archived)); 651 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 652 } 653 } 654 } 655 }; 656 657 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k, 658 Thread* THREAD) { 659 // Check fields in the object 660 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 661 if (!fs.access_flags().is_static()) { 662 BasicType ft = fs.field_descriptor().field_type(); 663 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 664 ResourceMark rm(THREAD); 665 log_warning(cds, heap)( 666 "Please check reference field in %s instance in closed archive heap region: %s %s", 667 k->external_name(), (fs.name())->as_C_string(), 668 (fs.signature())->as_C_string()); 669 } 670 } 671 } 672 } 673 674 void HeapShared::check_module_oop(oop orig_module_obj) { 675 assert(DumpSharedSpaces, "must be"); 676 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 677 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 678 if (orig_module_ent == NULL) { 679 // These special Module objects are created in Java code. They are not 680 // defined via Modules::define_module(), so they don't have a ModuleEntry: 681 // java.lang.Module::ALL_UNNAMED_MODULE 682 // java.lang.Module::EVERYONE_MODULE 683 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 684 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 685 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 686 } else { 687 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 688 assert(loader_data->is_builtin_class_loader_data(), "must be"); 689 } 690 } 691 692 693 // (1) If orig_obj has not been archived yet, archive it. 694 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 695 // trace all objects that are reachable from it, and make sure these objects are archived. 696 // (3) Record the klasses of all orig_obj and all reachable objects. 697 oop HeapShared::archive_reachable_objects_from(int level, 698 KlassSubGraphInfo* subgraph_info, 699 oop orig_obj, 700 bool is_closed_archive, 701 TRAPS) { 702 assert(orig_obj != NULL, "must be"); 703 assert(!is_archived_object(orig_obj), "sanity"); 704 705 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 706 // This object has injected fields that cannot be supported easily, so we disallow them for now. 707 // If you get an error here, you probably made a change in the JDK library that has added 708 // these objects that are referenced (directly or indirectly) by static fields. 709 ResourceMark rm; 710 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 711 vm_exit(1); 712 } 713 714 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 715 // them as Klass::_archived_mirror because they need to be specially restored at run time. 716 // 717 // If you get an error here, you probably made a change in the JDK library that has added a Class 718 // object that is referenced (directly or indirectly) by static fields. 719 if (java_lang_Class::is_instance(orig_obj)) { 720 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 721 vm_exit(1); 722 } 723 724 oop archived_obj = find_archived_heap_object(orig_obj); 725 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 726 // To save time, don't walk strings that are already archived. They just contain 727 // pointers to a type array, whose klass doesn't need to be recorded. 728 return archived_obj; 729 } 730 731 if (has_been_seen_during_subgraph_recording(orig_obj)) { 732 // orig_obj has already been archived and traced. Nothing more to do. 733 return archived_obj; 734 } else { 735 set_has_been_seen_during_subgraph_recording(orig_obj); 736 } 737 738 bool record_klasses_only = (archived_obj != NULL); 739 if (archived_obj == NULL) { 740 ++_num_new_archived_objs; 741 archived_obj = archive_heap_object(orig_obj, THREAD); 742 if (archived_obj == NULL) { 743 // Skip archiving the sub-graph referenced from the current entry field. 744 ResourceMark rm; 745 log_error(cds, heap)( 746 "Cannot archive the sub-graph referenced from %s object (" 747 PTR_FORMAT ") size %d, skipped.", 748 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 749 if (level == 1) { 750 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 751 // as the Java code will take care of initializing this field dynamically. 752 return NULL; 753 } else { 754 // We don't know how to handle an object that has been archived, but some of its reachable 755 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 756 // we have a real use case. 757 vm_exit(1); 758 } 759 } 760 761 if (java_lang_Module::is_instance(orig_obj)) { 762 check_module_oop(orig_obj); 763 } 764 765 if (java_lang_Module::is_instance(orig_obj)) { 766 java_lang_Module::set_module_entry(archived_obj, NULL); 767 java_lang_Module::set_loader(archived_obj, NULL); 768 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 769 // class_data will be restored explicitly at run time. 770 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 771 orig_obj == SystemDictionary::java_system_loader() || 772 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be"); 773 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 774 } 775 } 776 777 assert(archived_obj != NULL, "must be"); 778 Klass *orig_k = orig_obj->klass(); 779 Klass *relocated_k = archived_obj->klass(); 780 subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 781 782 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 783 subgraph_info, orig_obj, archived_obj, THREAD); 784 orig_obj->oop_iterate(&walker); 785 if (is_closed_archive && orig_k->is_instance_klass()) { 786 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD); 787 } 788 return archived_obj; 789 } 790 791 // 792 // Start from the given static field in a java mirror and archive the 793 // complete sub-graph of java heap objects that are reached directly 794 // or indirectly from the starting object by following references. 795 // Sub-graph archiving restrictions (current): 796 // 797 // - All classes of objects in the archived sub-graph (including the 798 // entry class) must be boot class only. 799 // - No java.lang.Class instance (java mirror) can be included inside 800 // an archived sub-graph. Mirror can only be the sub-graph entry object. 801 // 802 // The Java heap object sub-graph archiving process (see 803 // WalkOopAndArchiveClosure): 804 // 805 // 1) Java object sub-graph archiving starts from a given static field 806 // within a Class instance (java mirror). If the static field is a 807 // refererence field and points to a non-null java object, proceed to 808 // the next step. 809 // 810 // 2) Archives the referenced java object. If an archived copy of the 811 // current object already exists, updates the pointer in the archived 812 // copy of the referencing object to point to the current archived object. 813 // Otherwise, proceed to the next step. 814 // 815 // 3) Follows all references within the current java object and recursively 816 // archive the sub-graph of objects starting from each reference. 817 // 818 // 4) Updates the pointer in the archived copy of referencing object to 819 // point to the current archived object. 820 // 821 // 5) The Klass of the current java object is added to the list of Klasses 822 // for loading and initialzing before any object in the archived graph can 823 // be accessed at runtime. 824 // 825 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 826 const char* klass_name, 827 int field_offset, 828 const char* field_name, 829 bool is_closed_archive, 830 TRAPS) { 831 assert(DumpSharedSpaces, "dump time only"); 832 assert(k->is_shared_boot_class(), "must be boot class"); 833 834 oop m = k->java_mirror(); 835 836 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 837 oop f = m->obj_field(field_offset); 838 839 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 840 841 if (!CompressedOops::is_null(f)) { 842 if (log_is_enabled(Trace, cds, heap)) { 843 LogTarget(Trace, cds, heap) log; 844 LogStream out(log); 845 f->print_on(&out); 846 } 847 848 oop af = archive_reachable_objects_from(1, subgraph_info, f, 849 is_closed_archive, CHECK); 850 851 if (af == NULL) { 852 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 853 klass_name, field_name); 854 } else { 855 // Note: the field value is not preserved in the archived mirror. 856 // Record the field as a new subGraph entry point. The recorded 857 // information is restored from the archive at runtime. 858 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 859 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 860 } 861 } else { 862 // The field contains null, we still need to record the entry point, 863 // so it can be restored at runtime. 864 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 865 } 866 } 867 868 #ifndef PRODUCT 869 class VerifySharedOopClosure: public BasicOopIterateClosure { 870 private: 871 bool _is_archived; 872 873 public: 874 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 875 876 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 877 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 878 879 protected: 880 template <class T> void do_oop_work(T *p) { 881 oop obj = RawAccess<>::oop_load(p); 882 if (!CompressedOops::is_null(obj)) { 883 HeapShared::verify_reachable_objects_from(obj, _is_archived); 884 } 885 } 886 }; 887 888 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 889 assert(DumpSharedSpaces, "dump time only"); 890 assert(k->is_shared_boot_class(), "must be boot class"); 891 892 oop m = k->java_mirror(); 893 oop f = m->obj_field(field_offset); 894 if (!CompressedOops::is_null(f)) { 895 verify_subgraph_from(f); 896 } 897 } 898 899 void HeapShared::verify_subgraph_from(oop orig_obj) { 900 oop archived_obj = find_archived_heap_object(orig_obj); 901 if (archived_obj == NULL) { 902 // It's OK for the root of a subgraph to be not archived. See comments in 903 // archive_reachable_objects_from(). 904 return; 905 } 906 907 // Verify that all objects reachable from orig_obj are archived. 908 init_seen_objects_table(); 909 verify_reachable_objects_from(orig_obj, false); 910 delete_seen_objects_table(); 911 912 // Note: we could also verify that all objects reachable from the archived 913 // copy of orig_obj can only point to archived objects, with: 914 // init_seen_objects_table(); 915 // verify_reachable_objects_from(archived_obj, true); 916 // init_seen_objects_table(); 917 // but that's already done in G1HeapVerifier::verify_archive_regions so we 918 // won't do it here. 919 } 920 921 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 922 _num_total_verifications ++; 923 if (!has_been_seen_during_subgraph_recording(obj)) { 924 set_has_been_seen_during_subgraph_recording(obj); 925 926 if (is_archived) { 927 assert(is_archived_object(obj), "must be"); 928 assert(find_archived_heap_object(obj) == NULL, "must be"); 929 } else { 930 assert(!is_archived_object(obj), "must be"); 931 assert(find_archived_heap_object(obj) != NULL, "must be"); 932 } 933 934 VerifySharedOopClosure walker(is_archived); 935 obj->oop_iterate(&walker); 936 } 937 } 938 #endif 939 940 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 941 int HeapShared::_num_new_walked_objs; 942 int HeapShared::_num_new_archived_objs; 943 int HeapShared::_num_old_recorded_klasses; 944 945 int HeapShared::_num_total_subgraph_recordings = 0; 946 int HeapShared::_num_total_walked_objs = 0; 947 int HeapShared::_num_total_archived_objs = 0; 948 int HeapShared::_num_total_recorded_klasses = 0; 949 int HeapShared::_num_total_verifications = 0; 950 951 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 952 return _seen_objects_table->get(obj) != NULL; 953 } 954 955 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 956 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 957 _seen_objects_table->put(obj, true); 958 ++ _num_new_walked_objs; 959 } 960 961 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) { 962 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 963 init_seen_objects_table(); 964 _num_new_walked_objs = 0; 965 _num_new_archived_objs = 0; 966 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 967 } 968 969 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 970 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 971 _num_old_recorded_klasses; 972 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 973 "walked %d objs, archived %d new objs, recorded %d classes", 974 class_name, _num_new_walked_objs, _num_new_archived_objs, 975 num_new_recorded_klasses); 976 977 delete_seen_objects_table(); 978 979 _num_total_subgraph_recordings ++; 980 _num_total_walked_objs += _num_new_walked_objs; 981 _num_total_archived_objs += _num_new_archived_objs; 982 _num_total_recorded_klasses += num_new_recorded_klasses; 983 } 984 985 class ArchivableStaticFieldFinder: public FieldClosure { 986 InstanceKlass* _ik; 987 Symbol* _field_name; 988 bool _found; 989 int _offset; 990 public: 991 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 992 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 993 994 virtual void do_field(fieldDescriptor* fd) { 995 if (fd->name() == _field_name) { 996 assert(!_found, "fields cannot be overloaded"); 997 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 998 _found = true; 999 _offset = fd->offset(); 1000 } 1001 } 1002 bool found() { return _found; } 1003 int offset() { return _offset; } 1004 }; 1005 1006 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1007 int num, Thread* THREAD) { 1008 for (int i = 0; i < num; i++) { 1009 ArchivableStaticFieldInfo* info = &fields[i]; 1010 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1011 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1012 1013 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 1014 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 1015 InstanceKlass* ik = InstanceKlass::cast(k); 1016 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1017 "Only support boot classes"); 1018 ik->initialize(THREAD); 1019 guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize"); 1020 1021 ArchivableStaticFieldFinder finder(ik, field_name); 1022 ik->do_local_static_fields(&finder); 1023 assert(finder.found(), "field must exist"); 1024 1025 info->klass = ik; 1026 info->offset = finder.offset(); 1027 } 1028 } 1029 1030 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) { 1031 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1032 1033 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1034 num_closed_archive_subgraph_entry_fields, 1035 THREAD); 1036 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1037 num_open_archive_subgraph_entry_fields, 1038 THREAD); 1039 } 1040 1041 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1042 int num, bool is_closed_archive, 1043 Thread* THREAD) { 1044 _num_total_subgraph_recordings = 0; 1045 _num_total_walked_objs = 0; 1046 _num_total_archived_objs = 0; 1047 _num_total_recorded_klasses = 0; 1048 _num_total_verifications = 0; 1049 1050 // For each class X that has one or more archived fields: 1051 // [1] Dump the subgraph of each archived field 1052 // [2] Create a list of all the class of the objects that can be reached 1053 // by any of these static fields. 1054 // At runtime, these classes are initialized before X's archived fields 1055 // are restored by HeapShared::initialize_from_archived_subgraph(). 1056 int i; 1057 for (i = 0; i < num; ) { 1058 ArchivableStaticFieldInfo* info = &fields[i]; 1059 const char* klass_name = info->klass_name; 1060 start_recording_subgraph(info->klass, klass_name); 1061 1062 // If you have specified consecutive fields of the same klass in 1063 // fields[], these will be archived in the same 1064 // {start_recording_subgraph ... done_recording_subgraph} pass to 1065 // save time. 1066 for (; i < num; i++) { 1067 ArchivableStaticFieldInfo* f = &fields[i]; 1068 if (f->klass_name != klass_name) { 1069 break; 1070 } 1071 1072 if (!info->full_module_graph_only || MetaspaceShared::use_full_module_graph()) { 1073 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1074 f->offset, f->field_name, 1075 is_closed_archive, CHECK); 1076 } 1077 } 1078 done_recording_subgraph(info->klass, klass_name); 1079 } 1080 1081 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1082 is_closed_archive ? "closed" : "open", 1083 _num_total_subgraph_recordings); 1084 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1085 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1086 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1087 1088 #ifndef PRODUCT 1089 for (int i = 0; i < num; i++) { 1090 ArchivableStaticFieldInfo* f = &fields[i]; 1091 verify_subgraph_from_static_field(f->klass, f->offset); 1092 } 1093 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1094 #endif 1095 } 1096 1097 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1098 // region. This way we can quickly relocate all the pointers without using 1099 // BasicOopIterateClosure at runtime. 1100 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1101 narrowOop* _start; 1102 BitMap *_oopmap; 1103 int _num_total_oops; 1104 int _num_null_oops; 1105 public: 1106 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 1107 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1108 1109 virtual bool should_verify_oops(void) { 1110 return false; 1111 } 1112 virtual void do_oop(narrowOop* p) { 1113 _num_total_oops ++; 1114 narrowOop v = *p; 1115 if (!CompressedOops::is_null(v)) { 1116 size_t idx = p - _start; 1117 _oopmap->set_bit(idx); 1118 } else { 1119 _num_null_oops ++; 1120 } 1121 } 1122 virtual void do_oop(oop *p) { 1123 ShouldNotReachHere(); 1124 } 1125 int num_total_oops() const { return _num_total_oops; } 1126 int num_null_oops() const { return _num_null_oops; } 1127 }; 1128 1129 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1130 assert(UseCompressedOops, "must be"); 1131 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1132 ResourceBitMap oopmap(num_bits); 1133 1134 HeapWord* p = region.start(); 1135 HeapWord* end = region.end(); 1136 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1137 1138 int num_objs = 0; 1139 while (p < end) { 1140 oop o = (oop)p; 1141 o->oop_iterate(&finder); 1142 p += o->size(); 1143 ++ num_objs; 1144 } 1145 1146 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1147 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1148 return oopmap; 1149 } 1150 1151 // Patch all the embedded oop pointers inside an archived heap region, 1152 // to be consistent with the runtime oop encoding. 1153 class PatchEmbeddedPointers: public BitMapClosure { 1154 narrowOop* _start; 1155 1156 public: 1157 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1158 1159 bool do_bit(size_t offset) { 1160 narrowOop* p = _start + offset; 1161 narrowOop v = *p; 1162 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1163 oop o = HeapShared::decode_from_archive(v); 1164 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1165 return true; 1166 } 1167 }; 1168 1169 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1170 size_t oopmap_size_in_bits) { 1171 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1172 1173 #ifndef PRODUCT 1174 ResourceMark rm; 1175 ResourceBitMap checkBm = calculate_oopmap(region); 1176 assert(bm.is_same(checkBm), "sanity"); 1177 #endif 1178 1179 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1180 bm.iterate(&patcher); 1181 } 1182 1183 #endif // INCLUDE_CDS_JAVA_HEAP