1 /* 2 * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/classLoaderDataShared.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/moduleEntry.hpp" 30 #include "classfile/stringTable.hpp" 31 #include "classfile/symbolTable.hpp" 32 #include "classfile/systemDictionary.hpp" 33 #include "classfile/systemDictionaryShared.hpp" 34 #include "classfile/vmSymbols.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logMessage.hpp" 38 #include "logging/logStream.hpp" 39 #include "memory/archiveBuilder.hpp" 40 #include "memory/archiveUtils.hpp" 41 #include "memory/filemap.hpp" 42 #include "memory/heapShared.inline.hpp" 43 #include "memory/iterator.inline.hpp" 44 #include "memory/metadataFactory.hpp" 45 #include "memory/metaspaceClosure.hpp" 46 #include "memory/metaspaceShared.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/fieldStreams.inline.hpp" 51 #include "oops/objArrayOop.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/oopHandle.hpp" 54 #include "runtime/fieldDescriptor.inline.hpp" 55 #include "runtime/init.hpp" 56 #include "runtime/javaCalls.hpp" 57 #include "runtime/safepointVerifiers.hpp" 58 #include "utilities/bitMap.inline.hpp" 59 #if INCLUDE_G1GC 60 #include "gc/g1/g1CollectedHeap.hpp" 61 #endif 62 63 #if INCLUDE_CDS_JAVA_HEAP 64 65 bool HeapShared::_closed_archive_heap_region_mapped = false; 66 bool HeapShared::_open_archive_heap_region_mapped = false; 67 bool HeapShared::_archive_heap_region_fixed = false; 68 address HeapShared::_narrow_oop_base; 69 int HeapShared::_narrow_oop_shift; 70 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 71 72 // 73 // If you add new entries to the following tables, you should know what you're doing! 74 // 75 76 // Entry fields for shareable subgraphs archived in the closed archive heap 77 // region. Warning: Objects in the subgraphs should not have reference fields 78 // assigned at runtime. 79 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 80 {"java/lang/Integer$IntegerCache", "archivedCache"}, 81 {"java/lang/Long$LongCache", "archivedCache"}, 82 {"java/lang/Byte$ByteCache", "archivedCache"}, 83 {"java/lang/Short$ShortCache", "archivedCache"}, 84 {"java/lang/Character$CharacterCache", "archivedCache"}, 85 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 86 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 87 }; 88 // Entry fields for subgraphs archived in the open archive heap region. 89 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 90 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 91 {"java/util/ImmutableCollections", "archivedObjects"}, 92 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 93 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 94 }; 95 96 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 97 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 98 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 99 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 100 {"java/lang/Module$ArchivedData", "archivedData"}, 101 }; 102 103 const static int num_closed_archive_subgraph_entry_fields = 104 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 105 const static int num_open_archive_subgraph_entry_fields = 106 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 107 const static int num_fmg_open_archive_subgraph_entry_fields = 108 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 109 110 static GrowableArrayCHeap<oop, mtClassShared>* _pending_roots = NULL; 111 static objArrayOop _dumptime_roots = NULL; // FIXME -- combine this with _runtime_roots?? 112 static narrowOop _runtime_roots_narrow; 113 static OopHandle _runtime_roots; 114 115 //////////////////////////////////////////////////////////////// 116 // 117 // Java heap object archiving support 118 // 119 //////////////////////////////////////////////////////////////// 120 void HeapShared::fixup_mapped_heap_regions() { 121 FileMapInfo *mapinfo = FileMapInfo::current_info(); 122 mapinfo->fixup_mapped_heap_regions(); 123 set_archive_heap_region_fixed(); 124 if (is_mapped()) { 125 _runtime_roots = OopHandle(Universe::vm_global(), HeapShared::materialize_archived_object(_runtime_roots_narrow)); 126 if (!MetaspaceShared::use_full_module_graph()) { 127 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 128 ClassLoaderDataShared::clear_archived_oops(); 129 } 130 } 131 SystemDictionaryShared::update_archived_mirror_native_pointers(); 132 } 133 134 unsigned HeapShared::oop_hash(oop const& p) { 135 assert(!p->mark().has_bias_pattern(), 136 "this object should never have been locked"); // so identity_hash won't safepoin 137 unsigned hash = (unsigned)p->identity_hash(); 138 return hash; 139 } 140 141 static void reset_states(oop obj, TRAPS) { 142 Handle h_obj(THREAD, obj); 143 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 144 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 145 Symbol* method_sig = vmSymbols::void_method_signature(); 146 147 while (klass != NULL) { 148 Method* method = klass->find_method(method_name, method_sig); 149 if (method != NULL) { 150 assert(method->is_private(), "must be"); 151 if (log_is_enabled(Debug, cds)) { 152 ResourceMark rm(THREAD); 153 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 154 } 155 JavaValue result(T_VOID); 156 JavaCalls::call_special(&result, h_obj, klass, 157 method_name, method_sig, CHECK); 158 } 159 klass = klass->java_super(); 160 } 161 } 162 163 void HeapShared::reset_archived_object_states(TRAPS) { 164 assert(DumpSharedSpaces, "dump-time only"); 165 log_debug(cds)("Resetting platform loader"); 166 reset_states(SystemDictionary::java_platform_loader(), THREAD); 167 log_debug(cds)("Resetting system loader"); 168 reset_states(SystemDictionary::java_system_loader(), THREAD); 169 } 170 171 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 172 oop HeapShared::find_archived_heap_object(oop obj) { 173 assert(DumpSharedSpaces, "dump-time only"); 174 ArchivedObjectCache* cache = archived_object_cache(); 175 oop* p = cache->get(obj); 176 if (p != NULL) { 177 return *p; 178 } else { 179 return NULL; 180 } 181 } 182 183 int HeapShared::append_root(oop obj) { 184 assert(DumpSharedSpaces, "dump-time only"); 185 186 // No GC should happen since we aren't scanning _pending_roots. 187 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 188 189 if (_pending_roots == NULL) { 190 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 191 } 192 193 return _pending_roots->append(obj); 194 } 195 196 objArrayOop HeapShared::roots() { 197 if (DumpSharedSpaces) { 198 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 199 return _dumptime_roots; 200 } else { 201 assert(UseSharedSpaces, "must be"); 202 objArrayOop roots = (objArrayOop)_runtime_roots.resolve(); 203 assert(roots != NULL, "should have been initialized"); 204 return roots; 205 } 206 } 207 208 void HeapShared::set_roots(narrowOop roots) { 209 assert(UseSharedSpaces, "runtime only"); 210 assert(open_archive_heap_region_mapped(), "must be"); 211 _runtime_roots_narrow = roots; 212 } 213 214 oop HeapShared::get_root(int index, bool clear) { 215 assert(index >= 0, "sanity"); 216 if (DumpSharedSpaces) { 217 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 218 assert(_pending_roots != NULL, "sanity"); 219 return _pending_roots->at(index); 220 } else { 221 assert(UseSharedSpaces, "must be"); 222 assert(!_runtime_roots.is_empty(), "must have loaded shared heap"); 223 oop result = roots()->obj_at(index); 224 if (clear) { 225 clear_root(index); 226 } 227 return result; 228 } 229 } 230 231 void HeapShared::clear_root(int index) { 232 assert(index >= 0, "sanity"); 233 assert(UseSharedSpaces, "must be"); 234 if (open_archive_heap_region_mapped()) { 235 if (log_is_enabled(Debug, cds, heap)) { 236 oop old = roots()->obj_at(index); 237 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 238 } 239 roots()->obj_at_put(index, NULL); 240 } 241 } 242 243 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { 244 assert(DumpSharedSpaces, "dump-time only"); 245 246 oop ao = find_archived_heap_object(obj); 247 if (ao != NULL) { 248 // already archived 249 return ao; 250 } 251 252 int len = obj->size(); 253 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 254 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 255 p2i(obj), (size_t)obj->size()); 256 return NULL; 257 } 258 259 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 260 if (archived_oop != NULL) { 261 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 262 MetaspaceShared::relocate_klass_ptr(archived_oop); 263 // Reinitialize markword to remove age/marking/locking/etc. 264 // 265 // We need to retain the identity_hash, because it may have been used by some hashtables 266 // in the shared heap. This also has the side effect of pre-initializing the 267 // identity_hash for all shared objects, so they are less likely to be written 268 // into during run time, increasing the potential of memory sharing. 269 int hash_original = obj->identity_hash(); 270 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original)); 271 assert(archived_oop->mark().is_unlocked(), "sanity"); 272 273 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 274 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 275 276 ArchivedObjectCache* cache = archived_object_cache(); 277 cache->put(obj, archived_oop); 278 if (log_is_enabled(Debug, cds, heap)) { 279 ResourceMark rm; 280 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 281 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 282 } 283 } else { 284 log_error(cds, heap)( 285 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 286 p2i(obj)); 287 vm_exit(1); 288 } 289 return archived_oop; 290 } 291 292 oop HeapShared::materialize_archived_object(narrowOop v) { 293 assert(archive_heap_region_fixed(), 294 "must be called after archive heap regions are fixed"); 295 if (!CompressedOops::is_null(v)) { 296 oop obj = HeapShared::decode_from_archive(v); 297 return G1CollectedHeap::heap()->materialize_archived_object(obj); 298 } 299 return NULL; 300 } 301 302 void HeapShared::archive_klass_objects(Thread* THREAD) { 303 GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses(); 304 assert(klasses != NULL, "sanity"); 305 for (int i = 0; i < klasses->length(); i++) { 306 Klass* k = klasses->at(i); 307 308 // archive mirror object 309 java_lang_Class::archive_mirror(k, CHECK); 310 311 // archive the resolved_referenes array 312 if (k->is_instance_klass()) { 313 InstanceKlass* ik = InstanceKlass::cast(k); 314 ik->constants()->archive_resolved_references(THREAD); 315 } 316 } 317 } 318 319 void HeapShared::run_full_gc_in_vm_thread() { 320 if (is_heap_object_archiving_allowed()) { 321 // Avoid fragmentation while archiving heap objects. 322 // We do this inside a safepoint, so that no further allocation can happen after GC 323 // has finished. 324 if (GCLocker::is_active()) { 325 // Just checking for safety ... 326 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 327 // has been modified such that JNI code is executed in some clean up threads after 328 // we have finished class loading. 329 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 330 } else { 331 log_info(cds)("Run GC ..."); 332 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 333 log_info(cds)("Run GC done"); 334 } 335 } 336 } 337 338 // Returns an objArray that contains all the roots of the archived objects 339 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed, 340 GrowableArray<MemRegion> *open) { 341 if (!is_heap_object_archiving_allowed()) { 342 log_info(cds)( 343 "Archived java heap is not supported as UseG1GC, " 344 "UseCompressedOops and UseCompressedClassPointers are required." 345 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 346 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 347 BOOL_TO_STR(UseCompressedClassPointers)); 348 return; 349 } 350 351 G1HeapVerifier::verify_ready_for_archiving(); 352 353 { 354 NoSafepointVerifier nsv; 355 356 // Cache for recording where the archived objects are copied to 357 create_archived_object_cache(); 358 359 log_info(cds)("Dumping objects to closed archive heap region ..."); 360 copy_closed_archive_heap_objects(closed); 361 362 log_info(cds)("Dumping objects to open archive heap region ..."); 363 copy_open_archive_heap_objects(open); 364 365 destroy_archived_object_cache(); 366 } 367 368 G1HeapVerifier::verify_archive_regions(); 369 } 370 371 void HeapShared::copy_closed_archive_heap_objects( 372 GrowableArray<MemRegion> * closed_archive) { 373 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 374 375 Thread* THREAD = Thread::current(); 376 G1CollectedHeap::heap()->begin_archive_alloc_range(); 377 378 // Archive interned string objects 379 StringTable::write_to_archive(_dumped_interned_strings); 380 381 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 382 num_closed_archive_subgraph_entry_fields, 383 true /* is_closed_archive */, 384 false /* is_full_module_graph */, 385 THREAD); 386 387 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 388 os::vm_allocation_granularity()); 389 } 390 391 void HeapShared::copy_open_archive_heap_objects( 392 GrowableArray<MemRegion> * open_archive) { 393 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 394 395 Thread* THREAD = Thread::current(); 396 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 397 398 java_lang_Class::archive_basic_type_mirrors(THREAD); 399 400 archive_klass_objects(THREAD); 401 402 archive_object_subgraphs(open_archive_subgraph_entry_fields, 403 num_open_archive_subgraph_entry_fields, 404 false /* is_closed_archive */, 405 false /* is_full_module_graph */, 406 THREAD); 407 if (MetaspaceShared::use_full_module_graph()) { 408 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 409 num_fmg_open_archive_subgraph_entry_fields, 410 false /* is_closed_archive */, 411 true /* is_full_module_graph */, 412 THREAD); 413 ClassLoaderDataShared::init_archived_oops(); 414 } 415 416 copy_roots(); 417 418 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 419 os::vm_allocation_granularity()); 420 } 421 422 // Copy _pending_archive_roots into an objArray 423 void HeapShared::copy_roots() { 424 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 425 int size = objArrayOopDesc::object_size(length); 426 Klass *k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 427 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 428 429 memset(mem, 0, size * BytesPerWord); // Is this correct?? 430 { 431 // This is copied from MemAllocator::finish 432 if (UseBiasedLocking) { 433 oopDesc::set_mark(mem, k->prototype_header()); 434 } else { 435 oopDesc::set_mark(mem, markWord::prototype()); 436 } 437 oopDesc::release_set_klass(mem, k); 438 } 439 { 440 // This is copied from ObjArrayAllocator::initialize 441 arrayOopDesc::set_length(mem, length); 442 } 443 444 _dumptime_roots = (objArrayOop)mem; 445 for (int i = 0; i < length; i++) { 446 _dumptime_roots->obj_at_put(i, _pending_roots->at(i)); 447 } 448 log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem); 449 } 450 451 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 452 _narrow_oop_base = base; 453 _narrow_oop_shift = shift; 454 } 455 456 // 457 // Subgraph archiving support 458 // 459 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 460 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 461 462 // Get the subgraph_info for Klass k. A new subgraph_info is created if 463 // there is no existing one for k. The subgraph_info records the relocated 464 // Klass* of the original k. 465 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 466 assert(DumpSharedSpaces, "dump time only"); 467 bool created; 468 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 469 KlassSubGraphInfo* info = 470 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph), 471 &created); 472 assert(created, "must not initialize twice"); 473 return info; 474 } 475 476 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 477 assert(DumpSharedSpaces, "dump time only"); 478 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 479 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 480 assert(info != NULL, "must have been initialized"); 481 return info; 482 } 483 484 // Add an entry field to the current KlassSubGraphInfo. 485 void KlassSubGraphInfo::add_subgraph_entry_field( 486 int static_field_offset, oop v, bool is_closed_archive) { 487 assert(DumpSharedSpaces, "dump time only"); 488 if (_subgraph_entry_fields == NULL) { 489 _subgraph_entry_fields = 490 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass); 491 } 492 _subgraph_entry_fields->append(static_field_offset); 493 _subgraph_entry_fields->append(HeapShared::append_root(v)); 494 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); 495 } 496 497 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 498 // Only objects of boot classes can be included in sub-graph. 499 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { 500 assert(DumpSharedSpaces, "dump time only"); 501 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 502 "must be the relocated Klass in the shared space"); 503 504 if (_subgraph_object_klasses == NULL) { 505 _subgraph_object_klasses = 506 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 507 } 508 509 assert(ArchiveBuilder::singleton()->is_in_buffer_space(relocated_k), "must be a shared class"); 510 511 if (_k == relocated_k) { 512 // Don't add the Klass containing the sub-graph to it's own klass 513 // initialization list. 514 return; 515 } 516 517 if (relocated_k->is_instance_klass()) { 518 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 519 "must be boot class"); 520 // SystemDictionary::xxx_klass() are not updated, need to check 521 // the original Klass* 522 if (orig_k == SystemDictionary::String_klass() || 523 orig_k == SystemDictionary::Object_klass()) { 524 // Initialized early during VM initialization. No need to be added 525 // to the sub-graph object class list. 526 return; 527 } 528 } else if (relocated_k->is_objArray_klass()) { 529 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 530 if (abk->is_instance_klass()) { 531 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 532 "must be boot class"); 533 } 534 if (relocated_k == Universe::objectArrayKlassObj()) { 535 // Initialized early during Universe::genesis. No need to be added 536 // to the list. 537 return; 538 } 539 } else { 540 assert(relocated_k->is_typeArray_klass(), "must be"); 541 // Primitive type arrays are created early during Universe::genesis. 542 return; 543 } 544 545 if (log_is_enabled(Debug, cds, heap)) { 546 if (!_subgraph_object_klasses->contains(relocated_k)) { 547 ResourceMark rm; 548 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 549 } 550 } 551 552 _subgraph_object_klasses->append_if_missing(relocated_k); 553 _has_non_early_klasses |= is_non_early_klass(orig_k); 554 } 555 556 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 557 if (k->is_objArray_klass()) { 558 k = ObjArrayKlass::cast(k)->bottom_klass(); 559 } 560 if (k->is_instance_klass()) { 561 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 562 ResourceMark rm; 563 log_info(cds, heap)("non-early: %s", k->external_name()); 564 return true; 565 } else { 566 return false; 567 } 568 } else { 569 return false; 570 } 571 } 572 573 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 574 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 575 _k = info->klass(); 576 _entry_field_records = NULL; 577 _subgraph_object_klasses = NULL; 578 _is_full_module_graph = info->is_full_module_graph(); 579 _has_non_early_klasses = info->has_non_early_klasses(); 580 581 if (_has_non_early_klasses) { 582 ResourceMark rm; 583 log_info(cds, heap)( 584 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 585 _k->external_name()); 586 } 587 588 // populate the entry fields 589 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 590 if (entry_fields != NULL) { 591 int num_entry_fields = entry_fields->length(); 592 assert(num_entry_fields % 3 == 0, "sanity"); 593 _entry_field_records = 594 MetaspaceShared::new_ro_array<int>(num_entry_fields); 595 for (int i = 0 ; i < num_entry_fields; i++) { 596 _entry_field_records->at_put(i, entry_fields->at(i)); 597 } 598 } 599 600 // the Klasses of the objects in the sub-graphs 601 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 602 if (subgraph_object_klasses != NULL) { 603 int num_subgraphs_klasses = subgraph_object_klasses->length(); 604 _subgraph_object_klasses = 605 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 606 for (int i = 0; i < num_subgraphs_klasses; i++) { 607 Klass* subgraph_k = subgraph_object_klasses->at(i); 608 if (log_is_enabled(Info, cds, heap)) { 609 ResourceMark rm; 610 log_info(cds, heap)( 611 "Archived object klass %s (%2d) => %s", 612 _k->external_name(), i, subgraph_k->external_name()); 613 } 614 _subgraph_object_klasses->at_put(i, subgraph_k); 615 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 616 } 617 } 618 619 ArchivePtrMarker::mark_pointer(&_k); 620 ArchivePtrMarker::mark_pointer(&_entry_field_records); 621 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 622 } 623 624 struct CopyKlassSubGraphInfoToArchive : StackObj { 625 CompactHashtableWriter* _writer; 626 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 627 628 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 629 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 630 ArchivedKlassSubGraphInfoRecord* record = 631 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 632 record->init(&info); 633 634 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass); 635 u4 delta = MetaspaceShared::object_delta_u4(record); 636 _writer->add(hash, delta); 637 } 638 return true; // keep on iterating 639 } 640 }; 641 642 // Build the records of archived subgraph infos, which include: 643 // - Entry points to all subgraphs from the containing class mirror. The entry 644 // points are static fields in the mirror. For each entry point, the field 645 // offset, value and is_closed_archive flag are recorded in the sub-graph 646 // info. The value is stored back to the corresponding field at runtime. 647 // - A list of klasses that need to be loaded/initialized before archived 648 // java object sub-graph can be accessed at runtime. 649 void HeapShared::write_subgraph_info_table() { 650 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 651 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 652 CompactHashtableStats stats; 653 654 _run_time_subgraph_info_table.reset(); 655 656 CompactHashtableWriter writer(d_table->_count, &stats); 657 CopyKlassSubGraphInfoToArchive copy(&writer); 658 d_table->iterate(©); 659 660 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 661 } 662 663 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 664 _run_time_subgraph_info_table.serialize_header(soc); 665 } 666 667 static void verify_the_heap(Klass* k, const char* which) { 668 if (VerifyArchivedFields) { 669 ResourceMark rm; 670 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 671 which, k->external_name()); 672 VM_Verify verify_op; 673 VMThread::execute(&verify_op); 674 #if 0 675 // For some reason, this causes jtreg to lock up with 676 // "jtreg -vmoptions:-XX:+VerifyArchivedFields HelloTest.java" 677 if (is_init_completed()) { 678 FlagSetting fs1(VerifyBeforeGC, true); 679 FlagSetting fs2(VerifyDuringGC, true); 680 FlagSetting fs3(VerifyAfterGC, true); 681 Universe::heap()->collect(GCCause::_java_lang_system_gc); 682 } 683 #endif 684 } 685 } 686 687 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 688 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 689 // 690 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 691 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 692 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 693 void HeapShared::resolve_classes(TRAPS) { 694 if (!is_mapped()) { 695 return; // nothing to do 696 } 697 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, 698 num_closed_archive_subgraph_entry_fields, 699 CHECK); 700 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, 701 num_open_archive_subgraph_entry_fields, 702 CHECK); 703 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, 704 num_fmg_open_archive_subgraph_entry_fields, 705 CHECK); 706 } 707 708 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], 709 int num, TRAPS) { 710 for (int i = 0; i < num; i++) { 711 ArchivableStaticFieldInfo* info = &fields[i]; 712 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 713 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 714 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 715 resolve_classes_for_subgraph_of(k, CHECK); 716 } 717 } 718 719 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, TRAPS) { 720 const ArchivedKlassSubGraphInfoRecord* record = resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 721 if (HAS_PENDING_EXCEPTION) { 722 CLEAR_PENDING_EXCEPTION; 723 } 724 if (record == NULL) { 725 clear_archived_roots_of(k); 726 } 727 } 728 729 void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) { 730 if (!is_mapped()) { 731 return; // nothing to do 732 } 733 734 const ArchivedKlassSubGraphInfoRecord* record = 735 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 736 737 if (HAS_PENDING_EXCEPTION) { 738 CLEAR_PENDING_EXCEPTION; 739 // None of the field value will be set if there was an exception when initializing the classes. 740 // The java code will not see any of the archived objects in the 741 // subgraphs referenced from k in this case. 742 return; 743 } 744 745 if (record != NULL) { 746 init_archived_fields_for(k, record, THREAD); 747 } 748 } 749 750 const ArchivedKlassSubGraphInfoRecord* 751 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 752 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 753 754 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); 755 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 756 757 // Initialize from archived data. Currently this is done only 758 // during VM initialization time. No lock is needed. 759 if (record != NULL) { 760 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 761 if (log_is_enabled(Info, cds, heap)) { 762 ResourceMark rm; 763 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 764 k->external_name()); 765 } 766 return NULL; 767 } 768 769 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 770 if (log_is_enabled(Info, cds, heap)) { 771 ResourceMark rm; 772 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 773 k->external_name()); 774 } 775 return NULL; 776 } 777 778 resolve_or_init(k, do_init, CHECK_NULL); 779 780 // Load/link/initialize the klasses of the objects in the subgraph. 781 // NULL class loader is used. 782 Array<Klass*>* klasses = record->subgraph_object_klasses(); 783 if (klasses != NULL) { 784 for (int i = 0; i < klasses->length(); i++) { 785 resolve_or_init(klasses->at(i), do_init, CHECK_NULL); 786 } 787 } 788 } 789 790 return record; 791 } 792 793 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 794 if (!do_init) { 795 if (k->class_loader_data() == NULL) { 796 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 797 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 798 } 799 } else { 800 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 801 if (k->is_instance_klass()) { 802 InstanceKlass* ik = InstanceKlass::cast(k); 803 ik->initialize(CHECK); 804 } else if (k->is_objArray_klass()) { 805 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 806 oak->initialize(CHECK); 807 } 808 } 809 } 810 811 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS) { 812 verify_the_heap(k, "before"); 813 814 // Load the subgraph entry fields from the record and store them back to 815 // the corresponding fields within the mirror. 816 oop m = k->java_mirror(); 817 Array<int>* entry_field_records = record->entry_field_records(); 818 if (entry_field_records != NULL) { 819 int efr_len = entry_field_records->length(); 820 assert(efr_len % 3 == 0, "sanity"); 821 for (int i = 0; i < efr_len; i += 3) { 822 int field_offset = entry_field_records->at(i); 823 int root_index = entry_field_records->at(i+1); 824 int is_closed_archive = entry_field_records->at(i+2); 825 oop v = get_root(root_index, /*clear=*/true); 826 m->obj_field_put(field_offset, v); 827 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 828 } 829 830 // Done. Java code can see the archived sub-graphs referenced from k's 831 // mirror after this point. 832 if (log_is_enabled(Info, cds, heap)) { 833 ResourceMark rm; 834 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 835 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 836 } 837 } 838 839 verify_the_heap(k, "after "); 840 } 841 842 void HeapShared::clear_archived_roots_of(Klass* k) { 843 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); 844 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 845 if (record != NULL) { 846 Array<int>* entry_field_records = record->entry_field_records(); 847 if (entry_field_records != NULL) { 848 int efr_len = entry_field_records->length(); 849 assert(efr_len % 3 == 0, "sanity"); 850 for (int i = 0; i < efr_len; i += 3) { 851 int root_index = entry_field_records->at(i+1); 852 clear_root(root_index); 853 } 854 } 855 } 856 } 857 858 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 859 int _level; 860 bool _is_closed_archive; 861 bool _record_klasses_only; 862 KlassSubGraphInfo* _subgraph_info; 863 oop _orig_referencing_obj; 864 oop _archived_referencing_obj; 865 Thread* _thread; 866 public: 867 WalkOopAndArchiveClosure(int level, 868 bool is_closed_archive, 869 bool record_klasses_only, 870 KlassSubGraphInfo* subgraph_info, 871 oop orig, oop archived, TRAPS) : 872 _level(level), _is_closed_archive(is_closed_archive), 873 _record_klasses_only(record_klasses_only), 874 _subgraph_info(subgraph_info), 875 _orig_referencing_obj(orig), _archived_referencing_obj(archived), 876 _thread(THREAD) {} 877 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 878 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 879 880 protected: 881 template <class T> void do_oop_work(T *p) { 882 oop obj = RawAccess<>::oop_load(p); 883 if (!CompressedOops::is_null(obj)) { 884 assert(!HeapShared::is_archived_object(obj), 885 "original objects must not point to archived objects"); 886 887 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 888 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 889 Thread* THREAD = _thread; 890 891 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 892 ResourceMark rm; 893 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 894 _orig_referencing_obj->klass()->external_name(), field_delta, 895 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 896 LogTarget(Trace, cds, heap) log; 897 LogStream out(log); 898 obj->print_on(&out); 899 } 900 901 oop archived = HeapShared::archive_reachable_objects_from( 902 _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD); 903 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 904 assert(HeapShared::is_archived_object(archived), "must be"); 905 906 if (!_record_klasses_only) { 907 // Update the reference in the archived copy of the referencing object. 908 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 909 _level, p2i(new_p), p2i(obj), p2i(archived)); 910 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 911 } 912 } 913 } 914 }; 915 916 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k, 917 Thread* THREAD) { 918 // Check fields in the object 919 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 920 if (!fs.access_flags().is_static()) { 921 BasicType ft = fs.field_descriptor().field_type(); 922 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 923 ResourceMark rm(THREAD); 924 log_warning(cds, heap)( 925 "Please check reference field in %s instance in closed archive heap region: %s %s", 926 k->external_name(), (fs.name())->as_C_string(), 927 (fs.signature())->as_C_string()); 928 } 929 } 930 } 931 } 932 933 void HeapShared::check_module_oop(oop orig_module_obj) { 934 assert(DumpSharedSpaces, "must be"); 935 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 936 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 937 if (orig_module_ent == NULL) { 938 // These special Module objects are created in Java code. They are not 939 // defined via Modules::define_module(), so they don't have a ModuleEntry: 940 // java.lang.Module::ALL_UNNAMED_MODULE 941 // java.lang.Module::EVERYONE_MODULE 942 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 943 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 944 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 945 } else { 946 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 947 assert(loader_data->is_builtin_class_loader_data(), "must be"); 948 } 949 } 950 951 952 // (1) If orig_obj has not been archived yet, archive it. 953 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 954 // trace all objects that are reachable from it, and make sure these objects are archived. 955 // (3) Record the klasses of all orig_obj and all reachable objects. 956 oop HeapShared::archive_reachable_objects_from(int level, 957 KlassSubGraphInfo* subgraph_info, 958 oop orig_obj, 959 bool is_closed_archive, 960 TRAPS) { 961 assert(orig_obj != NULL, "must be"); 962 assert(!is_archived_object(orig_obj), "sanity"); 963 964 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 965 // This object has injected fields that cannot be supported easily, so we disallow them for now. 966 // If you get an error here, you probably made a change in the JDK library that has added 967 // these objects that are referenced (directly or indirectly) by static fields. 968 ResourceMark rm; 969 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 970 vm_exit(1); 971 } 972 973 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 974 // them as Klass::_archived_mirror because they need to be specially restored at run time. 975 // 976 // If you get an error here, you probably made a change in the JDK library that has added a Class 977 // object that is referenced (directly or indirectly) by static fields. 978 if (java_lang_Class::is_instance(orig_obj)) { 979 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 980 vm_exit(1); 981 } 982 983 oop archived_obj = find_archived_heap_object(orig_obj); 984 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 985 // To save time, don't walk strings that are already archived. They just contain 986 // pointers to a type array, whose klass doesn't need to be recorded. 987 return archived_obj; 988 } 989 990 if (has_been_seen_during_subgraph_recording(orig_obj)) { 991 // orig_obj has already been archived and traced. Nothing more to do. 992 return archived_obj; 993 } else { 994 set_has_been_seen_during_subgraph_recording(orig_obj); 995 } 996 997 bool record_klasses_only = (archived_obj != NULL); 998 if (archived_obj == NULL) { 999 ++_num_new_archived_objs; 1000 archived_obj = archive_heap_object(orig_obj, THREAD); 1001 if (archived_obj == NULL) { 1002 // Skip archiving the sub-graph referenced from the current entry field. 1003 ResourceMark rm; 1004 log_error(cds, heap)( 1005 "Cannot archive the sub-graph referenced from %s object (" 1006 PTR_FORMAT ") size %d, skipped.", 1007 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1008 if (level == 1) { 1009 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1010 // as the Java code will take care of initializing this field dynamically. 1011 return NULL; 1012 } else { 1013 // We don't know how to handle an object that has been archived, but some of its reachable 1014 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1015 // we have a real use case. 1016 vm_exit(1); 1017 } 1018 } 1019 1020 if (java_lang_Module::is_instance(orig_obj)) { 1021 check_module_oop(orig_obj); 1022 java_lang_Module::set_module_entry(archived_obj, NULL); 1023 java_lang_Module::set_loader(archived_obj, NULL); 1024 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1025 // class_data will be restored explicitly at run time. 1026 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1027 orig_obj == SystemDictionary::java_system_loader() || 1028 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be"); 1029 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1030 } 1031 } 1032 1033 assert(archived_obj != NULL, "must be"); 1034 Klass *orig_k = orig_obj->klass(); 1035 Klass *relocated_k = archived_obj->klass(); 1036 subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 1037 1038 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1039 subgraph_info, orig_obj, archived_obj, THREAD); 1040 orig_obj->oop_iterate(&walker); 1041 if (is_closed_archive && orig_k->is_instance_klass()) { 1042 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD); 1043 } 1044 return archived_obj; 1045 } 1046 1047 // 1048 // Start from the given static field in a java mirror and archive the 1049 // complete sub-graph of java heap objects that are reached directly 1050 // or indirectly from the starting object by following references. 1051 // Sub-graph archiving restrictions (current): 1052 // 1053 // - All classes of objects in the archived sub-graph (including the 1054 // entry class) must be boot class only. 1055 // - No java.lang.Class instance (java mirror) can be included inside 1056 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1057 // 1058 // The Java heap object sub-graph archiving process (see 1059 // WalkOopAndArchiveClosure): 1060 // 1061 // 1) Java object sub-graph archiving starts from a given static field 1062 // within a Class instance (java mirror). If the static field is a 1063 // refererence field and points to a non-null java object, proceed to 1064 // the next step. 1065 // 1066 // 2) Archives the referenced java object. If an archived copy of the 1067 // current object already exists, updates the pointer in the archived 1068 // copy of the referencing object to point to the current archived object. 1069 // Otherwise, proceed to the next step. 1070 // 1071 // 3) Follows all references within the current java object and recursively 1072 // archive the sub-graph of objects starting from each reference. 1073 // 1074 // 4) Updates the pointer in the archived copy of referencing object to 1075 // point to the current archived object. 1076 // 1077 // 5) The Klass of the current java object is added to the list of Klasses 1078 // for loading and initialzing before any object in the archived graph can 1079 // be accessed at runtime. 1080 // 1081 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1082 const char* klass_name, 1083 int field_offset, 1084 const char* field_name, 1085 bool is_closed_archive, 1086 TRAPS) { 1087 assert(DumpSharedSpaces, "dump time only"); 1088 assert(k->is_shared_boot_class(), "must be boot class"); 1089 1090 oop m = k->java_mirror(); 1091 1092 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1093 oop f = m->obj_field(field_offset); 1094 1095 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1096 1097 if (!CompressedOops::is_null(f)) { 1098 if (log_is_enabled(Trace, cds, heap)) { 1099 LogTarget(Trace, cds, heap) log; 1100 LogStream out(log); 1101 f->print_on(&out); 1102 } 1103 1104 oop af = archive_reachable_objects_from(1, subgraph_info, f, 1105 is_closed_archive, CHECK); 1106 1107 if (af == NULL) { 1108 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1109 klass_name, field_name); 1110 } else { 1111 // Note: the field value is not preserved in the archived mirror. 1112 // Record the field as a new subGraph entry point. The recorded 1113 // information is restored from the archive at runtime. 1114 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1115 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1116 } 1117 } else { 1118 // The field contains null, we still need to record the entry point, 1119 // so it can be restored at runtime. 1120 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1121 } 1122 } 1123 1124 #ifndef PRODUCT 1125 class VerifySharedOopClosure: public BasicOopIterateClosure { 1126 private: 1127 bool _is_archived; 1128 1129 public: 1130 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1131 1132 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1133 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1134 1135 protected: 1136 template <class T> void do_oop_work(T *p) { 1137 oop obj = RawAccess<>::oop_load(p); 1138 if (!CompressedOops::is_null(obj)) { 1139 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1140 } 1141 } 1142 }; 1143 1144 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1145 assert(DumpSharedSpaces, "dump time only"); 1146 assert(k->is_shared_boot_class(), "must be boot class"); 1147 1148 oop m = k->java_mirror(); 1149 oop f = m->obj_field(field_offset); 1150 if (!CompressedOops::is_null(f)) { 1151 verify_subgraph_from(f); 1152 } 1153 } 1154 1155 void HeapShared::verify_subgraph_from(oop orig_obj) { 1156 oop archived_obj = find_archived_heap_object(orig_obj); 1157 if (archived_obj == NULL) { 1158 // It's OK for the root of a subgraph to be not archived. See comments in 1159 // archive_reachable_objects_from(). 1160 return; 1161 } 1162 1163 // Verify that all objects reachable from orig_obj are archived. 1164 init_seen_objects_table(); 1165 verify_reachable_objects_from(orig_obj, false); 1166 delete_seen_objects_table(); 1167 1168 // Note: we could also verify that all objects reachable from the archived 1169 // copy of orig_obj can only point to archived objects, with: 1170 // init_seen_objects_table(); 1171 // verify_reachable_objects_from(archived_obj, true); 1172 // init_seen_objects_table(); 1173 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1174 // won't do it here. 1175 } 1176 1177 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1178 _num_total_verifications ++; 1179 if (!has_been_seen_during_subgraph_recording(obj)) { 1180 set_has_been_seen_during_subgraph_recording(obj); 1181 1182 if (is_archived) { 1183 assert(is_archived_object(obj), "must be"); 1184 assert(find_archived_heap_object(obj) == NULL, "must be"); 1185 } else { 1186 assert(!is_archived_object(obj), "must be"); 1187 assert(find_archived_heap_object(obj) != NULL, "must be"); 1188 } 1189 1190 VerifySharedOopClosure walker(is_archived); 1191 obj->oop_iterate(&walker); 1192 } 1193 } 1194 #endif 1195 1196 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1197 int HeapShared::_num_new_walked_objs; 1198 int HeapShared::_num_new_archived_objs; 1199 int HeapShared::_num_old_recorded_klasses; 1200 1201 int HeapShared::_num_total_subgraph_recordings = 0; 1202 int HeapShared::_num_total_walked_objs = 0; 1203 int HeapShared::_num_total_archived_objs = 0; 1204 int HeapShared::_num_total_recorded_klasses = 0; 1205 int HeapShared::_num_total_verifications = 0; 1206 1207 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1208 return _seen_objects_table->get(obj) != NULL; 1209 } 1210 1211 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1212 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1213 _seen_objects_table->put(obj, true); 1214 ++ _num_new_walked_objs; 1215 } 1216 1217 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1218 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1219 init_subgraph_info(k, is_full_module_graph); 1220 init_seen_objects_table(); 1221 _num_new_walked_objs = 0; 1222 _num_new_archived_objs = 0; 1223 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1224 } 1225 1226 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1227 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1228 _num_old_recorded_klasses; 1229 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1230 "walked %d objs, archived %d new objs, recorded %d classes", 1231 class_name, _num_new_walked_objs, _num_new_archived_objs, 1232 num_new_recorded_klasses); 1233 1234 delete_seen_objects_table(); 1235 1236 _num_total_subgraph_recordings ++; 1237 _num_total_walked_objs += _num_new_walked_objs; 1238 _num_total_archived_objs += _num_new_archived_objs; 1239 _num_total_recorded_klasses += num_new_recorded_klasses; 1240 } 1241 1242 class ArchivableStaticFieldFinder: public FieldClosure { 1243 InstanceKlass* _ik; 1244 Symbol* _field_name; 1245 bool _found; 1246 int _offset; 1247 public: 1248 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1249 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1250 1251 virtual void do_field(fieldDescriptor* fd) { 1252 if (fd->name() == _field_name) { 1253 assert(!_found, "fields cannot be overloaded"); 1254 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 1255 _found = true; 1256 _offset = fd->offset(); 1257 } 1258 } 1259 bool found() { return _found; } 1260 int offset() { return _offset; } 1261 }; 1262 1263 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1264 int num, Thread* THREAD) { 1265 for (int i = 0; i < num; i++) { 1266 ArchivableStaticFieldInfo* info = &fields[i]; 1267 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1268 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1269 1270 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 1271 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 1272 InstanceKlass* ik = InstanceKlass::cast(k); 1273 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1274 "Only support boot classes"); 1275 ik->initialize(THREAD); 1276 guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize"); 1277 1278 ArchivableStaticFieldFinder finder(ik, field_name); 1279 ik->do_local_static_fields(&finder); 1280 assert(finder.found(), "field must exist"); 1281 1282 info->klass = ik; 1283 info->offset = finder.offset(); 1284 } 1285 } 1286 1287 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) { 1288 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1289 1290 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1291 num_closed_archive_subgraph_entry_fields, 1292 THREAD); 1293 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1294 num_open_archive_subgraph_entry_fields, 1295 THREAD); 1296 if (MetaspaceShared::use_full_module_graph()) { 1297 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, 1298 num_fmg_open_archive_subgraph_entry_fields, 1299 THREAD); 1300 } 1301 } 1302 1303 void HeapShared::init_for_dumping(Thread* THREAD) { 1304 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings(); 1305 init_subgraph_entry_fields(THREAD); 1306 } 1307 1308 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1309 int num, bool is_closed_archive, 1310 bool is_full_module_graph, 1311 Thread* THREAD) { 1312 _num_total_subgraph_recordings = 0; 1313 _num_total_walked_objs = 0; 1314 _num_total_archived_objs = 0; 1315 _num_total_recorded_klasses = 0; 1316 _num_total_verifications = 0; 1317 1318 // For each class X that has one or more archived fields: 1319 // [1] Dump the subgraph of each archived field 1320 // [2] Create a list of all the class of the objects that can be reached 1321 // by any of these static fields. 1322 // At runtime, these classes are initialized before X's archived fields 1323 // are restored by HeapShared::initialize_from_archived_subgraph(). 1324 int i; 1325 for (i = 0; i < num; ) { 1326 ArchivableStaticFieldInfo* info = &fields[i]; 1327 const char* klass_name = info->klass_name; 1328 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1329 1330 // If you have specified consecutive fields of the same klass in 1331 // fields[], these will be archived in the same 1332 // {start_recording_subgraph ... done_recording_subgraph} pass to 1333 // save time. 1334 for (; i < num; i++) { 1335 ArchivableStaticFieldInfo* f = &fields[i]; 1336 if (f->klass_name != klass_name) { 1337 break; 1338 } 1339 1340 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1341 f->offset, f->field_name, 1342 is_closed_archive, CHECK); 1343 } 1344 done_recording_subgraph(info->klass, klass_name); 1345 } 1346 1347 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1348 is_closed_archive ? "closed" : "open", 1349 _num_total_subgraph_recordings); 1350 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1351 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1352 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1353 1354 #ifndef PRODUCT 1355 for (int i = 0; i < num; i++) { 1356 ArchivableStaticFieldInfo* f = &fields[i]; 1357 verify_subgraph_from_static_field(f->klass, f->offset); 1358 } 1359 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1360 #endif 1361 } 1362 1363 // Not all the strings in the global StringTable are dumped into the archive, because 1364 // some of those strings may be only referenced by classes that are excluded from 1365 // the archive. We need to explicitly mark the strings that are: 1366 // [1] used by classes that WILL be archived; 1367 // [2] included in the SharedArchiveConfigFile. 1368 void HeapShared::add_to_dumped_interned_strings(oop string) { 1369 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1370 bool created; 1371 _dumped_interned_strings->put_if_absent(string, true, &created); 1372 } 1373 1374 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1375 // region. This way we can quickly relocate all the pointers without using 1376 // BasicOopIterateClosure at runtime. 1377 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1378 narrowOop* _start; 1379 BitMap *_oopmap; 1380 int _num_total_oops; 1381 int _num_null_oops; 1382 public: 1383 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 1384 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1385 1386 virtual bool should_verify_oops(void) { 1387 return false; 1388 } 1389 virtual void do_oop(narrowOop* p) { 1390 _num_total_oops ++; 1391 narrowOop v = *p; 1392 if (!CompressedOops::is_null(v)) { 1393 size_t idx = p - _start; 1394 _oopmap->set_bit(idx); 1395 } else { 1396 _num_null_oops ++; 1397 } 1398 } 1399 virtual void do_oop(oop *p) { 1400 ShouldNotReachHere(); 1401 } 1402 int num_total_oops() const { return _num_total_oops; } 1403 int num_null_oops() const { return _num_null_oops; } 1404 }; 1405 1406 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1407 assert(UseCompressedOops, "must be"); 1408 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1409 ResourceBitMap oopmap(num_bits); 1410 1411 HeapWord* p = region.start(); 1412 HeapWord* end = region.end(); 1413 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1414 1415 int num_objs = 0; 1416 while (p < end) { 1417 oop o = (oop)p; 1418 o->oop_iterate(&finder); 1419 p += o->size(); 1420 ++ num_objs; 1421 } 1422 1423 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1424 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1425 return oopmap; 1426 } 1427 1428 // Patch all the embedded oop pointers inside an archived heap region, 1429 // to be consistent with the runtime oop encoding. 1430 class PatchEmbeddedPointers: public BitMapClosure { 1431 narrowOop* _start; 1432 1433 public: 1434 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1435 1436 bool do_bit(size_t offset) { 1437 narrowOop* p = _start + offset; 1438 narrowOop v = *p; 1439 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1440 oop o = HeapShared::decode_from_archive(v); 1441 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1442 return true; 1443 } 1444 }; 1445 1446 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1447 size_t oopmap_size_in_bits) { 1448 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1449 1450 #ifndef PRODUCT 1451 ResourceMark rm; 1452 ResourceBitMap checkBm = calculate_oopmap(region); 1453 assert(bm.is_same(checkBm), "sanity"); 1454 #endif 1455 1456 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1457 bm.iterate(&patcher); 1458 } 1459 1460 #endif // INCLUDE_CDS_JAVA_HEAP