1 /* 2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "logging/log.hpp" 31 #include "logging/logMessage.hpp" 32 #include "logging/logStream.hpp" 33 #include "memory/filemap.hpp" 34 #include "memory/heapShared.inline.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/metadataFactory.hpp" 37 #include "memory/metaspaceClosure.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "oops/compressedOops.inline.hpp" 41 #include "oops/fieldStreams.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/fieldDescriptor.inline.hpp" 44 #include "runtime/safepointVerifiers.hpp" 45 #include "utilities/bitMap.inline.hpp" 46 #if INCLUDE_G1GC 47 #include "gc/g1/g1CollectedHeap.hpp" 48 #endif 49 50 #if INCLUDE_CDS_JAVA_HEAP 51 52 bool HeapShared::_closed_archive_heap_region_mapped = false; 53 bool HeapShared::_open_archive_heap_region_mapped = false; 54 bool HeapShared::_archive_heap_region_fixed = false; 55 56 address HeapShared::_narrow_oop_base; 57 int HeapShared::_narrow_oop_shift; 58 59 // 60 // If you add new entries to the following tables, you should know what you're doing! 61 // 62 63 // Entry fields for shareable subgraphs archived in the closed archive heap 64 // region. Warning: Objects in the subgraphs should not have reference fields 65 // assigned at runtime. 66 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 67 {"java/lang/Integer$IntegerCache", "archivedCache"}, 68 {"java/lang/Long$LongCache", "archivedCache"}, 69 {"java/lang/Byte$ByteCache", "archivedCache"}, 70 {"java/lang/Short$ShortCache", "archivedCache"}, 71 {"java/lang/Character$CharacterCache", "archivedCache"}, 72 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 73 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 74 }; 75 // Entry fields for subgraphs archived in the open archive heap region. 76 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 77 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 78 {"java/util/ImmutableCollections$ListN", "EMPTY_LIST"}, 79 {"java/util/ImmutableCollections$MapN", "EMPTY_MAP"}, 80 {"java/util/ImmutableCollections$SetN", "EMPTY_SET"}, 81 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 82 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 83 {"java/math/BigInteger", "archivedCaches"}, 84 }; 85 86 const static int num_closed_archive_subgraph_entry_fields = 87 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 88 const static int num_open_archive_subgraph_entry_fields = 89 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 90 91 //////////////////////////////////////////////////////////////// 92 // 93 // Java heap object archiving support 94 // 95 //////////////////////////////////////////////////////////////// 96 void HeapShared::fixup_mapped_heap_regions() { 97 FileMapInfo *mapinfo = FileMapInfo::current_info(); 98 mapinfo->fixup_mapped_heap_regions(); 99 set_archive_heap_region_fixed(); 100 } 101 102 unsigned HeapShared::oop_hash(oop const& p) { 103 assert(!p->mark()->has_bias_pattern(), 104 "this object should never have been locked"); // so identity_hash won't safepoin 105 unsigned hash = (unsigned)p->identity_hash(); 106 return hash; 107 } 108 109 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 110 oop HeapShared::find_archived_heap_object(oop obj) { 111 assert(DumpSharedSpaces, "dump-time only"); 112 ArchivedObjectCache* cache = archived_object_cache(); 113 oop* p = cache->get(obj); 114 if (p != NULL) { 115 return *p; 116 } else { 117 return NULL; 118 } 119 } 120 121 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { 122 assert(DumpSharedSpaces, "dump-time only"); 123 124 oop ao = find_archived_heap_object(obj); 125 if (ao != NULL) { 126 // already archived 127 return ao; 128 } 129 130 int len = obj->size(); 131 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 132 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 133 p2i(obj), (size_t)obj->size()); 134 return NULL; 135 } 136 137 // Pre-compute object identity hash at CDS dump time. 138 obj->identity_hash(); 139 140 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 141 if (archived_oop != NULL) { 142 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len); 143 MetaspaceShared::relocate_klass_ptr(archived_oop); 144 ArchivedObjectCache* cache = archived_object_cache(); 145 cache->put(obj, archived_oop); 146 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, 147 p2i(obj), p2i(archived_oop)); 148 } else { 149 log_error(cds, heap)( 150 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 151 p2i(obj)); 152 vm_exit(1); 153 } 154 return archived_oop; 155 } 156 157 oop HeapShared::materialize_archived_object(narrowOop v) { 158 assert(archive_heap_region_fixed(), 159 "must be called after archive heap regions are fixed"); 160 if (!CompressedOops::is_null(v)) { 161 oop obj = HeapShared::decode_from_archive(v); 162 return G1CollectedHeap::heap()->materialize_archived_object(obj); 163 } 164 return NULL; 165 } 166 167 void HeapShared::archive_klass_objects(Thread* THREAD) { 168 GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses(); 169 assert(klasses != NULL, "sanity"); 170 for (int i = 0; i < klasses->length(); i++) { 171 Klass* k = klasses->at(i); 172 173 // archive mirror object 174 java_lang_Class::archive_mirror(k, CHECK); 175 176 // archive the resolved_referenes array 177 if (k->is_instance_klass()) { 178 InstanceKlass* ik = InstanceKlass::cast(k); 179 ik->constants()->archive_resolved_references(THREAD); 180 } 181 } 182 } 183 184 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed, 185 GrowableArray<MemRegion> *open) { 186 if (!is_heap_object_archiving_allowed()) { 187 if (log_is_enabled(Info, cds)) { 188 log_info(cds)( 189 "Archived java heap is not supported as UseG1GC, " 190 "UseCompressedOops and UseCompressedClassPointers are required." 191 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 192 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 193 BOOL_TO_STR(UseCompressedClassPointers)); 194 } 195 return; 196 } 197 198 G1HeapVerifier::verify_ready_for_archiving(); 199 200 { 201 NoSafepointVerifier nsv; 202 203 // Cache for recording where the archived objects are copied to 204 create_archived_object_cache(); 205 206 tty->print_cr("Dumping objects to closed archive heap region ..."); 207 NOT_PRODUCT(StringTable::verify()); 208 copy_closed_archive_heap_objects(closed); 209 210 tty->print_cr("Dumping objects to open archive heap region ..."); 211 copy_open_archive_heap_objects(open); 212 213 destroy_archived_object_cache(); 214 } 215 216 G1HeapVerifier::verify_archive_regions(); 217 } 218 219 void HeapShared::copy_closed_archive_heap_objects( 220 GrowableArray<MemRegion> * closed_archive) { 221 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 222 223 Thread* THREAD = Thread::current(); 224 G1CollectedHeap::heap()->begin_archive_alloc_range(); 225 226 // Archive interned string objects 227 StringTable::write_to_archive(); 228 229 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 230 num_closed_archive_subgraph_entry_fields, 231 true /* is_closed_archive */, THREAD); 232 233 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 234 os::vm_allocation_granularity()); 235 } 236 237 void HeapShared::copy_open_archive_heap_objects( 238 GrowableArray<MemRegion> * open_archive) { 239 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 240 241 Thread* THREAD = Thread::current(); 242 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 243 244 java_lang_Class::archive_basic_type_mirrors(THREAD); 245 246 archive_klass_objects(THREAD); 247 248 archive_object_subgraphs(open_archive_subgraph_entry_fields, 249 num_open_archive_subgraph_entry_fields, 250 false /* is_closed_archive */, 251 THREAD); 252 253 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 254 os::vm_allocation_granularity()); 255 } 256 257 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 258 _narrow_oop_base = base; 259 _narrow_oop_shift = shift; 260 } 261 262 // 263 // Subgraph archiving support 264 // 265 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 266 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 267 268 // Get the subgraph_info for Klass k. A new subgraph_info is created if 269 // there is no existing one for k. The subgraph_info records the relocated 270 // Klass* of the original k. 271 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 272 assert(DumpSharedSpaces, "dump time only"); 273 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 274 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 275 if (info == NULL) { 276 _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k)); 277 info = _dump_time_subgraph_info_table->get(relocated_k); 278 ++ _dump_time_subgraph_info_table->_count; 279 } 280 return info; 281 } 282 283 // Add an entry field to the current KlassSubGraphInfo. 284 void KlassSubGraphInfo::add_subgraph_entry_field( 285 int static_field_offset, oop v, bool is_closed_archive) { 286 assert(DumpSharedSpaces, "dump time only"); 287 if (_subgraph_entry_fields == NULL) { 288 _subgraph_entry_fields = 289 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); 290 } 291 _subgraph_entry_fields->append((juint)static_field_offset); 292 _subgraph_entry_fields->append(CompressedOops::encode(v)); 293 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); 294 } 295 296 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 297 // Only objects of boot classes can be included in sub-graph. 298 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { 299 assert(DumpSharedSpaces, "dump time only"); 300 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 301 "must be the relocated Klass in the shared space"); 302 303 if (_subgraph_object_klasses == NULL) { 304 _subgraph_object_klasses = 305 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); 306 } 307 308 assert(relocated_k->is_shared(), "must be a shared class"); 309 310 if (_k == relocated_k) { 311 // Don't add the Klass containing the sub-graph to it's own klass 312 // initialization list. 313 return; 314 } 315 316 if (relocated_k->is_instance_klass()) { 317 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 318 "must be boot class"); 319 // SystemDictionary::xxx_klass() are not updated, need to check 320 // the original Klass* 321 if (orig_k == SystemDictionary::String_klass() || 322 orig_k == SystemDictionary::Object_klass()) { 323 // Initialized early during VM initialization. No need to be added 324 // to the sub-graph object class list. 325 return; 326 } 327 } else if (relocated_k->is_objArray_klass()) { 328 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 329 if (abk->is_instance_klass()) { 330 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 331 "must be boot class"); 332 } 333 if (relocated_k == Universe::objectArrayKlassObj()) { 334 // Initialized early during Universe::genesis. No need to be added 335 // to the list. 336 return; 337 } 338 } else { 339 assert(relocated_k->is_typeArray_klass(), "must be"); 340 // Primitive type arrays are created early during Universe::genesis. 341 return; 342 } 343 344 if (log_is_enabled(Debug, cds, heap)) { 345 if (!_subgraph_object_klasses->contains(relocated_k)) { 346 ResourceMark rm; 347 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 348 } 349 } 350 351 _subgraph_object_klasses->append_if_missing(relocated_k); 352 } 353 354 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 355 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 356 _k = info->klass(); 357 _entry_field_records = NULL; 358 _subgraph_object_klasses = NULL; 359 360 // populate the entry fields 361 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 362 if (entry_fields != NULL) { 363 int num_entry_fields = entry_fields->length(); 364 assert(num_entry_fields % 3 == 0, "sanity"); 365 _entry_field_records = 366 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 367 for (int i = 0 ; i < num_entry_fields; i++) { 368 _entry_field_records->at_put(i, entry_fields->at(i)); 369 } 370 } 371 372 // the Klasses of the objects in the sub-graphs 373 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 374 if (subgraph_object_klasses != NULL) { 375 int num_subgraphs_klasses = subgraph_object_klasses->length(); 376 _subgraph_object_klasses = 377 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 378 for (int i = 0; i < num_subgraphs_klasses; i++) { 379 Klass* subgraph_k = subgraph_object_klasses->at(i); 380 if (log_is_enabled(Info, cds, heap)) { 381 ResourceMark rm; 382 log_info(cds, heap)( 383 "Archived object klass %s (%2d) => %s", 384 _k->external_name(), i, subgraph_k->external_name()); 385 } 386 _subgraph_object_klasses->at_put(i, subgraph_k); 387 } 388 } 389 } 390 391 struct CopyKlassSubGraphInfoToArchive : StackObj { 392 CompactHashtableWriter* _writer; 393 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 394 395 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 396 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 397 ArchivedKlassSubGraphInfoRecord* record = 398 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 399 record->init(&info); 400 401 unsigned int hash = primitive_hash<Klass*>(klass); 402 u4 delta = MetaspaceShared::object_delta_u4(record); 403 _writer->add(hash, delta); 404 } 405 return true; // keep on iterating 406 } 407 }; 408 409 // Build the records of archived subgraph infos, which include: 410 // - Entry points to all subgraphs from the containing class mirror. The entry 411 // points are static fields in the mirror. For each entry point, the field 412 // offset, value and is_closed_archive flag are recorded in the sub-graph 413 // info. The value is stored back to the corresponding field at runtime. 414 // - A list of klasses that need to be loaded/initialized before archived 415 // java object sub-graph can be accessed at runtime. 416 void HeapShared::write_subgraph_info_table() { 417 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 418 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 419 CompactHashtableStats stats; 420 421 _run_time_subgraph_info_table.reset(); 422 423 CompactHashtableWriter writer(d_table->_count, &stats); 424 CopyKlassSubGraphInfoToArchive copy(&writer); 425 d_table->iterate(©); 426 427 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 428 } 429 430 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 431 _run_time_subgraph_info_table.serialize_header(soc); 432 } 433 434 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 435 if (!open_archive_heap_region_mapped()) { 436 return; // nothing to do 437 } 438 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 439 440 unsigned int hash = primitive_hash<Klass*>(k); 441 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 442 443 // Initialize from archived data. Currently this is done only 444 // during VM initialization time. No lock is needed. 445 if (record != NULL) { 446 Thread* THREAD = Thread::current(); 447 448 int i; 449 // Load/link/initialize the klasses of the objects in the subgraph. 450 // NULL class loader is used. 451 Array<Klass*>* klasses = record->subgraph_object_klasses(); 452 if (klasses != NULL) { 453 for (i = 0; i < klasses->length(); i++) { 454 Klass* obj_k = klasses->at(i); 455 Klass* resolved_k = SystemDictionary::resolve_or_null( 456 (obj_k)->name(), THREAD); 457 if (resolved_k != obj_k) { 458 assert(!SystemDictionary::is_well_known_klass(resolved_k), 459 "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook"); 460 ResourceMark rm(THREAD); 461 log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive", 462 resolved_k->external_name()); 463 return; 464 } 465 if ((obj_k)->is_instance_klass()) { 466 InstanceKlass* ik = InstanceKlass::cast(obj_k); 467 ik->initialize(THREAD); 468 } else if ((obj_k)->is_objArray_klass()) { 469 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 470 oak->initialize(THREAD); 471 } 472 } 473 } 474 475 if (HAS_PENDING_EXCEPTION) { 476 CLEAR_PENDING_EXCEPTION; 477 // None of the field value will be set if there was an exception. 478 // The java code will not see any of the archived objects in the 479 // subgraphs referenced from k in this case. 480 return; 481 } 482 483 // Load the subgraph entry fields from the record and store them back to 484 // the corresponding fields within the mirror. 485 oop m = k->java_mirror(); 486 Array<juint>* entry_field_records = record->entry_field_records(); 487 if (entry_field_records != NULL) { 488 int efr_len = entry_field_records->length(); 489 assert(efr_len % 3 == 0, "sanity"); 490 for (i = 0; i < efr_len;) { 491 int field_offset = entry_field_records->at(i); 492 narrowOop nv = entry_field_records->at(i+1); 493 int is_closed_archive = entry_field_records->at(i+2); 494 oop v; 495 if (is_closed_archive == 0) { 496 // It's an archived object in the open archive heap regions, not shared. 497 // The object refereced by the field becomes 'known' by GC from this 498 // point. All objects in the subgraph reachable from the object are 499 // also 'known' by GC. 500 v = materialize_archived_object(nv); 501 } else { 502 // Shared object in the closed archive heap regions. Decode directly. 503 assert(!CompressedOops::is_null(nv), "shared object is null"); 504 v = HeapShared::decode_from_archive(nv); 505 } 506 m->obj_field_put(field_offset, v); 507 i += 3; 508 509 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 510 } 511 512 // Done. Java code can see the archived sub-graphs referenced from k's 513 // mirror after this point. 514 if (log_is_enabled(Info, cds, heap)) { 515 ResourceMark rm; 516 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT, 517 k->external_name(), p2i(k)); 518 } 519 } 520 } 521 } 522 523 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 524 int _level; 525 bool _is_closed_archive; 526 bool _record_klasses_only; 527 KlassSubGraphInfo* _subgraph_info; 528 oop _orig_referencing_obj; 529 oop _archived_referencing_obj; 530 Thread* _thread; 531 public: 532 WalkOopAndArchiveClosure(int level, 533 bool is_closed_archive, 534 bool record_klasses_only, 535 KlassSubGraphInfo* subgraph_info, 536 oop orig, oop archived, TRAPS) : 537 _level(level), _is_closed_archive(is_closed_archive), 538 _record_klasses_only(record_klasses_only), 539 _subgraph_info(subgraph_info), 540 _orig_referencing_obj(orig), _archived_referencing_obj(archived), 541 _thread(THREAD) {} 542 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 543 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 544 545 protected: 546 template <class T> void do_oop_work(T *p) { 547 oop obj = RawAccess<>::oop_load(p); 548 if (!CompressedOops::is_null(obj)) { 549 assert(!HeapShared::is_archived_object(obj), 550 "original objects must not point to archived objects"); 551 552 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 553 T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); 554 Thread* THREAD = _thread; 555 556 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 557 ResourceMark rm; 558 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 559 _orig_referencing_obj->klass()->external_name(), field_delta, 560 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 561 LogTarget(Trace, cds, heap) log; 562 LogStream out(log); 563 obj->print_on(&out); 564 } 565 566 oop archived = HeapShared::archive_reachable_objects_from( 567 _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD); 568 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 569 assert(HeapShared::is_archived_object(archived), "must be"); 570 571 if (!_record_klasses_only) { 572 // Update the reference in the archived copy of the referencing object. 573 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 574 _level, p2i(new_p), p2i(obj), p2i(archived)); 575 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 576 } 577 } 578 } 579 }; 580 581 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k, 582 Thread* THREAD) { 583 // Check fields in the object 584 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 585 if (!fs.access_flags().is_static()) { 586 BasicType ft = fs.field_descriptor().field_type(); 587 if (!fs.access_flags().is_final() && (ft == T_ARRAY || ft == T_OBJECT)) { 588 ResourceMark rm(THREAD); 589 log_warning(cds, heap)( 590 "Please check reference field in %s instance in closed archive heap region: %s %s", 591 k->external_name(), (fs.name())->as_C_string(), 592 (fs.signature())->as_C_string()); 593 } 594 } 595 } 596 } 597 598 // (1) If orig_obj has not been archived yet, archive it. 599 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 600 // trace all objects that are reachable from it, and make sure these objects are archived. 601 // (3) Record the klasses of all orig_obj and all reachable objects. 602 oop HeapShared::archive_reachable_objects_from(int level, 603 KlassSubGraphInfo* subgraph_info, 604 oop orig_obj, 605 bool is_closed_archive, 606 TRAPS) { 607 assert(orig_obj != NULL, "must be"); 608 assert(!is_archived_object(orig_obj), "sanity"); 609 610 // java.lang.Class instances cannot be included in an archived 611 // object sub-graph. 612 if (java_lang_Class::is_instance(orig_obj)) { 613 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 614 vm_exit(1); 615 } 616 617 oop archived_obj = find_archived_heap_object(orig_obj); 618 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 619 // To save time, don't walk strings that are already archived. They just contain 620 // pointers to a type array, whose klass doesn't need to be recorded. 621 return archived_obj; 622 } 623 624 if (has_been_seen_during_subgraph_recording(orig_obj)) { 625 // orig_obj has already been archived and traced. Nothing more to do. 626 return archived_obj; 627 } else { 628 set_has_been_seen_during_subgraph_recording(orig_obj); 629 } 630 631 bool record_klasses_only = (archived_obj != NULL); 632 if (archived_obj == NULL) { 633 ++_num_new_archived_objs; 634 archived_obj = archive_heap_object(orig_obj, THREAD); 635 if (archived_obj == NULL) { 636 // Skip archiving the sub-graph referenced from the current entry field. 637 ResourceMark rm; 638 log_error(cds, heap)( 639 "Cannot archive the sub-graph referenced from %s object (" 640 PTR_FORMAT ") size %d, skipped.", 641 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 642 if (level == 1) { 643 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 644 // as the Java code will take care of initializing this field dynamically. 645 return NULL; 646 } else { 647 // We don't know how to handle an object that has been archived, but some of its reachable 648 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 649 // we have a real use case. 650 vm_exit(1); 651 } 652 } 653 } 654 655 assert(archived_obj != NULL, "must be"); 656 Klass *orig_k = orig_obj->klass(); 657 Klass *relocated_k = archived_obj->klass(); 658 subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 659 660 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 661 subgraph_info, orig_obj, archived_obj, THREAD); 662 orig_obj->oop_iterate(&walker); 663 if (is_closed_archive && orig_k->is_instance_klass()) { 664 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD); 665 } 666 return archived_obj; 667 } 668 669 // 670 // Start from the given static field in a java mirror and archive the 671 // complete sub-graph of java heap objects that are reached directly 672 // or indirectly from the starting object by following references. 673 // Sub-graph archiving restrictions (current): 674 // 675 // - All classes of objects in the archived sub-graph (including the 676 // entry class) must be boot class only. 677 // - No java.lang.Class instance (java mirror) can be included inside 678 // an archived sub-graph. Mirror can only be the sub-graph entry object. 679 // 680 // The Java heap object sub-graph archiving process (see 681 // WalkOopAndArchiveClosure): 682 // 683 // 1) Java object sub-graph archiving starts from a given static field 684 // within a Class instance (java mirror). If the static field is a 685 // refererence field and points to a non-null java object, proceed to 686 // the next step. 687 // 688 // 2) Archives the referenced java object. If an archived copy of the 689 // current object already exists, updates the pointer in the archived 690 // copy of the referencing object to point to the current archived object. 691 // Otherwise, proceed to the next step. 692 // 693 // 3) Follows all references within the current java object and recursively 694 // archive the sub-graph of objects starting from each reference. 695 // 696 // 4) Updates the pointer in the archived copy of referencing object to 697 // point to the current archived object. 698 // 699 // 5) The Klass of the current java object is added to the list of Klasses 700 // for loading and initialzing before any object in the archived graph can 701 // be accessed at runtime. 702 // 703 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 704 const char* klass_name, 705 int field_offset, 706 const char* field_name, 707 bool is_closed_archive, 708 TRAPS) { 709 assert(DumpSharedSpaces, "dump time only"); 710 assert(k->is_shared_boot_class(), "must be boot class"); 711 712 oop m = k->java_mirror(); 713 714 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 715 oop f = m->obj_field(field_offset); 716 717 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 718 719 if (!CompressedOops::is_null(f)) { 720 if (log_is_enabled(Trace, cds, heap)) { 721 LogTarget(Trace, cds, heap) log; 722 LogStream out(log); 723 f->print_on(&out); 724 } 725 726 oop af = archive_reachable_objects_from(1, subgraph_info, f, 727 is_closed_archive, CHECK); 728 729 if (af == NULL) { 730 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 731 klass_name, field_name); 732 } else { 733 // Note: the field value is not preserved in the archived mirror. 734 // Record the field as a new subGraph entry point. The recorded 735 // information is restored from the archive at runtime. 736 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 737 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 738 } 739 } else { 740 // The field contains null, we still need to record the entry point, 741 // so it can be restored at runtime. 742 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 743 } 744 } 745 746 #ifndef PRODUCT 747 class VerifySharedOopClosure: public BasicOopIterateClosure { 748 private: 749 bool _is_archived; 750 751 public: 752 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 753 754 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 755 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 756 757 protected: 758 template <class T> void do_oop_work(T *p) { 759 oop obj = RawAccess<>::oop_load(p); 760 if (!CompressedOops::is_null(obj)) { 761 HeapShared::verify_reachable_objects_from(obj, _is_archived); 762 } 763 } 764 }; 765 766 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 767 assert(DumpSharedSpaces, "dump time only"); 768 assert(k->is_shared_boot_class(), "must be boot class"); 769 770 oop m = k->java_mirror(); 771 oop f = m->obj_field(field_offset); 772 if (!CompressedOops::is_null(f)) { 773 verify_subgraph_from(f); 774 } 775 } 776 777 void HeapShared::verify_subgraph_from(oop orig_obj) { 778 oop archived_obj = find_archived_heap_object(orig_obj); 779 if (archived_obj == NULL) { 780 // It's OK for the root of a subgraph to be not archived. See comments in 781 // archive_reachable_objects_from(). 782 return; 783 } 784 785 // Verify that all objects reachable from orig_obj are archived. 786 init_seen_objects_table(); 787 verify_reachable_objects_from(orig_obj, false); 788 delete_seen_objects_table(); 789 790 // Note: we could also verify that all objects reachable from the archived 791 // copy of orig_obj can only point to archived objects, with: 792 // init_seen_objects_table(); 793 // verify_reachable_objects_from(archived_obj, true); 794 // init_seen_objects_table(); 795 // but that's already done in G1HeapVerifier::verify_archive_regions so we 796 // won't do it here. 797 } 798 799 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 800 _num_total_verifications ++; 801 if (!has_been_seen_during_subgraph_recording(obj)) { 802 set_has_been_seen_during_subgraph_recording(obj); 803 804 if (is_archived) { 805 assert(is_archived_object(obj), "must be"); 806 assert(find_archived_heap_object(obj) == NULL, "must be"); 807 } else { 808 assert(!is_archived_object(obj), "must be"); 809 assert(find_archived_heap_object(obj) != NULL, "must be"); 810 } 811 812 VerifySharedOopClosure walker(is_archived); 813 obj->oop_iterate(&walker); 814 } 815 } 816 #endif 817 818 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 819 int HeapShared::_num_new_walked_objs; 820 int HeapShared::_num_new_archived_objs; 821 int HeapShared::_num_old_recorded_klasses; 822 823 int HeapShared::_num_total_subgraph_recordings = 0; 824 int HeapShared::_num_total_walked_objs = 0; 825 int HeapShared::_num_total_archived_objs = 0; 826 int HeapShared::_num_total_recorded_klasses = 0; 827 int HeapShared::_num_total_verifications = 0; 828 829 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 830 return _seen_objects_table->get(obj) != NULL; 831 } 832 833 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 834 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 835 _seen_objects_table->put(obj, true); 836 ++ _num_new_walked_objs; 837 } 838 839 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) { 840 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 841 init_seen_objects_table(); 842 _num_new_walked_objs = 0; 843 _num_new_archived_objs = 0; 844 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 845 } 846 847 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 848 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 849 _num_old_recorded_klasses; 850 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 851 "walked %d objs, archived %d new objs, recorded %d classes", 852 class_name, _num_new_walked_objs, _num_new_archived_objs, 853 num_new_recorded_klasses); 854 855 delete_seen_objects_table(); 856 857 _num_total_subgraph_recordings ++; 858 _num_total_walked_objs += _num_new_walked_objs; 859 _num_total_archived_objs += _num_new_archived_objs; 860 _num_total_recorded_klasses += num_new_recorded_klasses; 861 } 862 863 class ArchivableStaticFieldFinder: public FieldClosure { 864 InstanceKlass* _ik; 865 Symbol* _field_name; 866 bool _found; 867 int _offset; 868 public: 869 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 870 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 871 872 virtual void do_field(fieldDescriptor* fd) { 873 if (fd->name() == _field_name) { 874 assert(!_found, "fields cannot be overloaded"); 875 assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields"); 876 _found = true; 877 _offset = fd->offset(); 878 } 879 } 880 bool found() { return _found; } 881 int offset() { return _offset; } 882 }; 883 884 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 885 int num, Thread* THREAD) { 886 for (int i = 0; i < num; i++) { 887 ArchivableStaticFieldInfo* info = &fields[i]; 888 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 889 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 890 891 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 892 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 893 InstanceKlass* ik = InstanceKlass::cast(k); 894 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 895 "Only support boot classes"); 896 ik->initialize(THREAD); 897 guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize"); 898 899 ArchivableStaticFieldFinder finder(ik, field_name); 900 ik->do_local_static_fields(&finder); 901 assert(finder.found(), "field must exist"); 902 903 info->klass = ik; 904 info->offset = finder.offset(); 905 } 906 } 907 908 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) { 909 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 910 911 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 912 num_closed_archive_subgraph_entry_fields, 913 THREAD); 914 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 915 num_open_archive_subgraph_entry_fields, 916 THREAD); 917 } 918 919 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 920 int num, bool is_closed_archive, 921 Thread* THREAD) { 922 _num_total_subgraph_recordings = 0; 923 _num_total_walked_objs = 0; 924 _num_total_archived_objs = 0; 925 _num_total_recorded_klasses = 0; 926 _num_total_verifications = 0; 927 928 // For each class X that has one or more archived fields: 929 // [1] Dump the subgraph of each archived field 930 // [2] Create a list of all the class of the objects that can be reached 931 // by any of these static fields. 932 // At runtime, these classes are initialized before X's archived fields 933 // are restored by HeapShared::initialize_from_archived_subgraph(). 934 int i; 935 for (i = 0; i < num; ) { 936 ArchivableStaticFieldInfo* info = &fields[i]; 937 const char* klass_name = info->klass_name; 938 start_recording_subgraph(info->klass, klass_name); 939 940 // If you have specified consecutive fields of the same klass in 941 // fields[], these will be archived in the same 942 // {start_recording_subgraph ... done_recording_subgraph} pass to 943 // save time. 944 for (; i < num; i++) { 945 ArchivableStaticFieldInfo* f = &fields[i]; 946 if (f->klass_name != klass_name) { 947 break; 948 } 949 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 950 f->offset, f->field_name, 951 is_closed_archive, CHECK); 952 } 953 done_recording_subgraph(info->klass, klass_name); 954 } 955 956 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 957 is_closed_archive ? "closed" : "open", 958 _num_total_subgraph_recordings); 959 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 960 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 961 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 962 963 #ifndef PRODUCT 964 for (int i = 0; i < num; i++) { 965 ArchivableStaticFieldInfo* f = &fields[i]; 966 verify_subgraph_from_static_field(f->klass, f->offset); 967 } 968 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 969 #endif 970 } 971 972 // At dump-time, find the location of all the non-null oop pointers in an archived heap 973 // region. This way we can quickly relocate all the pointers without using 974 // BasicOopIterateClosure at runtime. 975 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 976 narrowOop* _start; 977 BitMap *_oopmap; 978 int _num_total_oops; 979 int _num_null_oops; 980 public: 981 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 982 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 983 984 virtual bool should_verify_oops(void) { 985 return false; 986 } 987 virtual void do_oop(narrowOop* p) { 988 _num_total_oops ++; 989 narrowOop v = *p; 990 if (!CompressedOops::is_null(v)) { 991 size_t idx = p - _start; 992 _oopmap->set_bit(idx); 993 } else { 994 _num_null_oops ++; 995 } 996 } 997 virtual void do_oop(oop *p) { 998 ShouldNotReachHere(); 999 } 1000 int num_total_oops() const { return _num_total_oops; } 1001 int num_null_oops() const { return _num_null_oops; } 1002 }; 1003 1004 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1005 assert(UseCompressedOops, "must be"); 1006 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1007 ResourceBitMap oopmap(num_bits); 1008 1009 HeapWord* p = region.start(); 1010 HeapWord* end = region.end(); 1011 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1012 1013 int num_objs = 0; 1014 while (p < end) { 1015 oop o = (oop)p; 1016 o->oop_iterate(&finder); 1017 p += o->size(); 1018 ++ num_objs; 1019 } 1020 1021 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1022 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1023 return oopmap; 1024 } 1025 1026 // Patch all the embedded oop pointers inside an archived heap region, 1027 // to be consistent with the runtime oop encoding. 1028 class PatchEmbeddedPointers: public BitMapClosure { 1029 narrowOop* _start; 1030 1031 public: 1032 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1033 1034 bool do_bit(size_t offset) { 1035 narrowOop* p = _start + offset; 1036 narrowOop v = *p; 1037 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1038 oop o = HeapShared::decode_from_archive(v); 1039 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1040 return true; 1041 } 1042 }; 1043 1044 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1045 size_t oopmap_size_in_bits) { 1046 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1047 1048 #ifndef PRODUCT 1049 ResourceMark rm; 1050 ResourceBitMap checkBm = calculate_oopmap(region); 1051 assert(bm.is_same(checkBm), "sanity"); 1052 #endif 1053 1054 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1055 bm.iterate(&patcher); 1056 } 1057 1058 #endif // INCLUDE_CDS_JAVA_HEAP