1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "logging/log.hpp" 31 #include "logging/logMessage.hpp" 32 #include "logging/logStream.hpp" 33 #include "memory/filemap.hpp" 34 #include "memory/heapShared.inline.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/metadataFactory.hpp" 37 #include "memory/metaspaceClosure.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/compressedOops.inline.hpp" 40 #include "oops/fieldStreams.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/fieldDescriptor.inline.hpp" 43 #include "runtime/safepointVerifiers.hpp" 44 #include "utilities/bitMap.inline.hpp" 45 #if INCLUDE_G1GC 46 #include "gc/g1/g1CollectedHeap.hpp" 47 #endif 48 49 #if INCLUDE_CDS_JAVA_HEAP 50 51 bool HeapShared::_closed_archive_heap_region_mapped = false; 52 bool HeapShared::_open_archive_heap_region_mapped = false; 53 bool HeapShared::_archive_heap_region_fixed = false; 54 55 address HeapShared::_narrow_oop_base; 56 int HeapShared::_narrow_oop_shift; 57 58 // 59 // If you add new entries to the following tables, you should know what you're doing! 60 // 61 62 // Entry fields for shareable subgraphs archived in the closed archive heap 63 // region. Warning: Objects in the subgraphs should not have reference fields 64 // assigned at runtime. 65 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 66 {"java/lang/Integer$IntegerCache", "archivedCache"}, 67 {"java/lang/Long$LongCache", "archivedCache"}, 68 {"java/lang/Byte$ByteCache", "archivedCache"}, 69 {"java/lang/Short$ShortCache", "archivedCache"}, 70 {"java/lang/Character$CharacterCache", "archivedCache"}, 71 }; 72 // Entry fields for subgraphs archived in the open archive heap region. 73 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 74 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 75 {"java/util/ImmutableCollections$ListN", "EMPTY_LIST"}, 76 {"java/util/ImmutableCollections$MapN", "EMPTY_MAP"}, 77 {"java/util/ImmutableCollections$SetN", "EMPTY_SET"}, 78 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 79 }; 80 81 const static int num_closed_archive_subgraph_entry_fields = 82 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 83 const static int num_open_archive_subgraph_entry_fields = 84 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 85 86 //////////////////////////////////////////////////////////////// 87 // 88 // Java heap object archiving support 89 // 90 //////////////////////////////////////////////////////////////// 91 void HeapShared::fixup_mapped_heap_regions() { 92 FileMapInfo *mapinfo = FileMapInfo::current_info(); 93 mapinfo->fixup_mapped_heap_regions(); 94 set_archive_heap_region_fixed(); 95 } 96 97 unsigned HeapShared::oop_hash(oop const& p) { 98 assert(!p->mark()->has_bias_pattern(), 99 "this object should never have been locked"); // so identity_hash won't safepoin 100 unsigned hash = (unsigned)p->identity_hash(); 101 return hash; 102 } 103 104 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 105 oop HeapShared::find_archived_heap_object(oop obj) { 106 assert(DumpSharedSpaces, "dump-time only"); 107 ArchivedObjectCache* cache = archived_object_cache(); 108 oop* p = cache->get(obj); 109 if (p != NULL) { 110 return *p; 111 } else { 112 return NULL; 113 } 114 } 115 116 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { 117 assert(DumpSharedSpaces, "dump-time only"); 118 119 oop ao = find_archived_heap_object(obj); 120 if (ao != NULL) { 121 // already archived 122 return ao; 123 } 124 125 int len = obj->size(); 126 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 127 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 128 p2i(obj), (size_t)obj->size()); 129 return NULL; 130 } 131 132 // Pre-compute object identity hash at CDS dump time. 133 obj->identity_hash(); 134 135 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 136 if (archived_oop != NULL) { 137 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len); 138 MetaspaceShared::relocate_klass_ptr(archived_oop); 139 ArchivedObjectCache* cache = archived_object_cache(); 140 cache->put(obj, archived_oop); 141 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, 142 p2i(obj), p2i(archived_oop)); 143 } else { 144 log_error(cds, heap)( 145 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 146 p2i(obj)); 147 vm_exit(1); 148 } 149 return archived_oop; 150 } 151 152 oop HeapShared::materialize_archived_object(narrowOop v) { 153 assert(archive_heap_region_fixed(), 154 "must be called after archive heap regions are fixed"); 155 if (!CompressedOops::is_null(v)) { 156 oop obj = HeapShared::decode_from_archive(v); 157 return G1CollectedHeap::heap()->materialize_archived_object(obj); 158 } 159 return NULL; 160 } 161 162 void HeapShared::archive_klass_objects(Thread* THREAD) { 163 GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses(); 164 assert(klasses != NULL, "sanity"); 165 for (int i = 0; i < klasses->length(); i++) { 166 Klass* k = klasses->at(i); 167 168 // archive mirror object 169 java_lang_Class::archive_mirror(k, CHECK); 170 171 // archive the resolved_referenes array 172 if (k->is_instance_klass()) { 173 InstanceKlass* ik = InstanceKlass::cast(k); 174 ik->constants()->archive_resolved_references(THREAD); 175 } 176 } 177 } 178 179 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed, 180 GrowableArray<MemRegion> *open) { 181 if (!is_heap_object_archiving_allowed()) { 182 if (log_is_enabled(Info, cds)) { 183 log_info(cds)( 184 "Archived java heap is not supported as UseG1GC, " 185 "UseCompressedOops and UseCompressedClassPointers are required." 186 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 187 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 188 BOOL_TO_STR(UseCompressedClassPointers)); 189 } 190 return; 191 } 192 193 G1HeapVerifier::verify_ready_for_archiving(); 194 195 { 196 NoSafepointVerifier nsv; 197 198 // Cache for recording where the archived objects are copied to 199 create_archived_object_cache(); 200 201 tty->print_cr("Dumping objects to closed archive heap region ..."); 202 NOT_PRODUCT(StringTable::verify()); 203 copy_closed_archive_heap_objects(closed); 204 205 tty->print_cr("Dumping objects to open archive heap region ..."); 206 copy_open_archive_heap_objects(open); 207 208 destroy_archived_object_cache(); 209 } 210 211 G1HeapVerifier::verify_archive_regions(); 212 } 213 214 void HeapShared::copy_closed_archive_heap_objects( 215 GrowableArray<MemRegion> * closed_archive) { 216 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 217 218 Thread* THREAD = Thread::current(); 219 G1CollectedHeap::heap()->begin_archive_alloc_range(); 220 221 // Archive interned string objects 222 StringTable::write_to_archive(); 223 224 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 225 num_closed_archive_subgraph_entry_fields, 226 true /* is_closed_archive */, THREAD); 227 228 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 229 os::vm_allocation_granularity()); 230 } 231 232 void HeapShared::copy_open_archive_heap_objects( 233 GrowableArray<MemRegion> * open_archive) { 234 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 235 236 Thread* THREAD = Thread::current(); 237 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 238 239 java_lang_Class::archive_basic_type_mirrors(THREAD); 240 241 archive_klass_objects(THREAD); 242 243 archive_object_subgraphs(open_archive_subgraph_entry_fields, 244 num_open_archive_subgraph_entry_fields, 245 false /* is_closed_archive */, 246 THREAD); 247 248 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 249 os::vm_allocation_granularity()); 250 } 251 252 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 253 _narrow_oop_base = base; 254 _narrow_oop_shift = shift; 255 } 256 257 // 258 // Subgraph archiving support 259 // 260 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 261 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 262 263 // Get the subgraph_info for Klass k. A new subgraph_info is created if 264 // there is no existing one for k. The subgraph_info records the relocated 265 // Klass* of the original k. 266 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 267 assert(DumpSharedSpaces, "dump time only"); 268 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 269 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 270 if (info == NULL) { 271 _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k)); 272 info = _dump_time_subgraph_info_table->get(relocated_k); 273 ++ _dump_time_subgraph_info_table->_count; 274 } 275 return info; 276 } 277 278 // Add an entry field to the current KlassSubGraphInfo. 279 void KlassSubGraphInfo::add_subgraph_entry_field( 280 int static_field_offset, oop v, bool is_closed_archive) { 281 assert(DumpSharedSpaces, "dump time only"); 282 if (_subgraph_entry_fields == NULL) { 283 _subgraph_entry_fields = 284 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); 285 } 286 _subgraph_entry_fields->append((juint)static_field_offset); 287 _subgraph_entry_fields->append(CompressedOops::encode(v)); 288 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); 289 } 290 291 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 292 // Only objects of boot classes can be included in sub-graph. 293 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { 294 assert(DumpSharedSpaces, "dump time only"); 295 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 296 "must be the relocated Klass in the shared space"); 297 298 if (_subgraph_object_klasses == NULL) { 299 _subgraph_object_klasses = 300 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); 301 } 302 303 assert(relocated_k->is_shared(), "must be a shared class"); 304 305 if (_k == relocated_k) { 306 // Don't add the Klass containing the sub-graph to it's own klass 307 // initialization list. 308 return; 309 } 310 311 if (relocated_k->is_instance_klass()) { 312 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 313 "must be boot class"); 314 // SystemDictionary::xxx_klass() are not updated, need to check 315 // the original Klass* 316 if (orig_k == SystemDictionary::String_klass() || 317 orig_k == SystemDictionary::Object_klass()) { 318 // Initialized early during VM initialization. No need to be added 319 // to the sub-graph object class list. 320 return; 321 } 322 } else if (relocated_k->is_objArray_klass()) { 323 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 324 if (abk->is_instance_klass()) { 325 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 326 "must be boot class"); 327 } 328 if (relocated_k == Universe::objectArrayKlassObj()) { 329 // Initialized early during Universe::genesis. No need to be added 330 // to the list. 331 return; 332 } 333 } else { 334 assert(relocated_k->is_typeArray_klass(), "must be"); 335 // Primitive type arrays are created early during Universe::genesis. 336 return; 337 } 338 339 if (log_is_enabled(Debug, cds, heap)) { 340 if (!_subgraph_object_klasses->contains(relocated_k)) { 341 ResourceMark rm; 342 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 343 } 344 } 345 346 _subgraph_object_klasses->append_if_missing(relocated_k); 347 } 348 349 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 350 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 351 _k = info->klass(); 352 _entry_field_records = NULL; 353 _subgraph_object_klasses = NULL; 354 355 // populate the entry fields 356 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 357 if (entry_fields != NULL) { 358 int num_entry_fields = entry_fields->length(); 359 assert(num_entry_fields % 3 == 0, "sanity"); 360 _entry_field_records = 361 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 362 for (int i = 0 ; i < num_entry_fields; i++) { 363 _entry_field_records->at_put(i, entry_fields->at(i)); 364 } 365 } 366 367 // the Klasses of the objects in the sub-graphs 368 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 369 if (subgraph_object_klasses != NULL) { 370 int num_subgraphs_klasses = subgraph_object_klasses->length(); 371 _subgraph_object_klasses = 372 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 373 for (int i = 0; i < num_subgraphs_klasses; i++) { 374 Klass* subgraph_k = subgraph_object_klasses->at(i); 375 if (log_is_enabled(Info, cds, heap)) { 376 ResourceMark rm; 377 log_info(cds, heap)( 378 "Archived object klass %s (%2d) => %s", 379 _k->external_name(), i, subgraph_k->external_name()); 380 } 381 _subgraph_object_klasses->at_put(i, subgraph_k); 382 } 383 } 384 } 385 386 struct CopyKlassSubGraphInfoToArchive : StackObj { 387 CompactHashtableWriter* _writer; 388 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 389 390 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 391 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 392 ArchivedKlassSubGraphInfoRecord* record = 393 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 394 record->init(&info); 395 396 unsigned int hash = primitive_hash<Klass*>(klass); 397 u4 delta = MetaspaceShared::object_delta_u4(record); 398 _writer->add(hash, delta); 399 } 400 return true; // keep on iterating 401 } 402 }; 403 404 // Build the records of archived subgraph infos, which include: 405 // - Entry points to all subgraphs from the containing class mirror. The entry 406 // points are static fields in the mirror. For each entry point, the field 407 // offset, value and is_closed_archive flag are recorded in the sub-graph 408 // info. The value is stored back to the corresponding field at runtime. 409 // - A list of klasses that need to be loaded/initialized before archived 410 // java object sub-graph can be accessed at runtime. 411 void HeapShared::write_subgraph_info_table() { 412 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 413 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 414 CompactHashtableStats stats; 415 416 _run_time_subgraph_info_table.reset(); 417 418 int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count); 419 CompactHashtableWriter writer(num_buckets, &stats); 420 CopyKlassSubGraphInfoToArchive copy(&writer); 421 d_table->iterate(©); 422 423 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 424 } 425 426 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 427 _run_time_subgraph_info_table.serialize_header(soc); 428 } 429 430 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 431 if (!open_archive_heap_region_mapped()) { 432 return; // nothing to do 433 } 434 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 435 436 unsigned int hash = primitive_hash<Klass*>(k); 437 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 438 439 // Initialize from archived data. Currently this is done only 440 // during VM initialization time. No lock is needed. 441 if (record != NULL) { 442 Thread* THREAD = Thread::current(); 443 444 int i; 445 // Load/link/initialize the klasses of the objects in the subgraph. 446 // NULL class loader is used. 447 Array<Klass*>* klasses = record->subgraph_object_klasses(); 448 if (klasses != NULL) { 449 for (i = 0; i < klasses->length(); i++) { 450 Klass* obj_k = klasses->at(i); 451 Klass* resolved_k = SystemDictionary::resolve_or_null( 452 (obj_k)->name(), THREAD); 453 if (resolved_k != obj_k) { 454 assert(!SystemDictionary::is_well_known_klass(resolved_k), 455 "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook"); 456 ResourceMark rm(THREAD); 457 log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive", 458 resolved_k->external_name()); 459 return; 460 } 461 if ((obj_k)->is_instance_klass()) { 462 InstanceKlass* ik = InstanceKlass::cast(obj_k); 463 ik->initialize(THREAD); 464 } else if ((obj_k)->is_objArray_klass()) { 465 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 466 oak->initialize(THREAD); 467 } 468 } 469 } 470 471 if (HAS_PENDING_EXCEPTION) { 472 CLEAR_PENDING_EXCEPTION; 473 // None of the field value will be set if there was an exception. 474 // The java code will not see any of the archived objects in the 475 // subgraphs referenced from k in this case. 476 return; 477 } 478 479 // Load the subgraph entry fields from the record and store them back to 480 // the corresponding fields within the mirror. 481 oop m = k->java_mirror(); 482 Array<juint>* entry_field_records = record->entry_field_records(); 483 if (entry_field_records != NULL) { 484 int efr_len = entry_field_records->length(); 485 assert(efr_len % 3 == 0, "sanity"); 486 for (i = 0; i < efr_len;) { 487 int field_offset = entry_field_records->at(i); 488 narrowOop nv = entry_field_records->at(i+1); 489 int is_closed_archive = entry_field_records->at(i+2); 490 oop v; 491 if (is_closed_archive == 0) { 492 // It's an archived object in the open archive heap regions, not shared. 493 // The object refereced by the field becomes 'known' by GC from this 494 // point. All objects in the subgraph reachable from the object are 495 // also 'known' by GC. 496 v = materialize_archived_object(nv); 497 } else { 498 // Shared object in the closed archive heap regions. Decode directly. 499 assert(!CompressedOops::is_null(nv), "shared object is null"); 500 v = HeapShared::decode_from_archive(nv); 501 } 502 m->obj_field_put(field_offset, v); 503 i += 3; 504 505 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 506 } 507 508 // Done. Java code can see the archived sub-graphs referenced from k's 509 // mirror after this point. 510 if (log_is_enabled(Info, cds, heap)) { 511 ResourceMark rm; 512 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT, 513 k->external_name(), p2i(k)); 514 } 515 } 516 } 517 } 518 519 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 520 int _level; 521 bool _is_closed_archive; 522 bool _record_klasses_only; 523 KlassSubGraphInfo* _subgraph_info; 524 oop _orig_referencing_obj; 525 oop _archived_referencing_obj; 526 Thread* _thread; 527 public: 528 WalkOopAndArchiveClosure(int level, 529 bool is_closed_archive, 530 bool record_klasses_only, 531 KlassSubGraphInfo* subgraph_info, 532 oop orig, oop archived, TRAPS) : 533 _level(level), _is_closed_archive(is_closed_archive), 534 _record_klasses_only(record_klasses_only), 535 _subgraph_info(subgraph_info), 536 _orig_referencing_obj(orig), _archived_referencing_obj(archived), 537 _thread(THREAD) {} 538 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 539 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 540 541 protected: 542 template <class T> void do_oop_work(T *p) { 543 oop obj = RawAccess<>::oop_load(p); 544 if (!CompressedOops::is_null(obj)) { 545 assert(!HeapShared::is_archived_object(obj), 546 "original objects must not point to archived objects"); 547 548 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 549 T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); 550 Thread* THREAD = _thread; 551 552 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 553 ResourceMark rm; 554 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 555 _orig_referencing_obj->klass()->external_name(), field_delta, 556 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 557 LogTarget(Trace, cds, heap) log; 558 LogStream out(log); 559 obj->print_on(&out); 560 } 561 562 oop archived = HeapShared::archive_reachable_objects_from( 563 _level + 1, _subgraph_info, obj, _is_closed_archive, THREAD); 564 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 565 assert(HeapShared::is_archived_object(archived), "must be"); 566 567 if (!_record_klasses_only) { 568 // Update the reference in the archived copy of the referencing object. 569 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 570 _level, p2i(new_p), p2i(obj), p2i(archived)); 571 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 572 } 573 } 574 } 575 }; 576 577 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k, 578 Thread* THREAD) { 579 // Check fields in the object 580 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 581 if (!fs.access_flags().is_static()) { 582 BasicType ft = fs.field_descriptor().field_type(); 583 if (!fs.access_flags().is_final() && (ft == T_ARRAY || ft == T_OBJECT)) { 584 ResourceMark rm(THREAD); 585 log_warning(cds, heap)( 586 "Please check reference field in %s instance in closed archive heap region: %s %s", 587 k->external_name(), (fs.name())->as_C_string(), 588 (fs.signature())->as_C_string()); 589 } 590 } 591 } 592 } 593 594 // (1) If orig_obj has not been archived yet, archive it. 595 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 596 // trace all objects that are reachable from it, and make sure these objects are archived. 597 // (3) Record the klasses of all orig_obj and all reachable objects. 598 oop HeapShared::archive_reachable_objects_from(int level, 599 KlassSubGraphInfo* subgraph_info, 600 oop orig_obj, 601 bool is_closed_archive, 602 TRAPS) { 603 assert(orig_obj != NULL, "must be"); 604 assert(!is_archived_object(orig_obj), "sanity"); 605 606 // java.lang.Class instances cannot be included in an archived 607 // object sub-graph. 608 if (java_lang_Class::is_instance(orig_obj)) { 609 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 610 vm_exit(1); 611 } 612 613 oop archived_obj = find_archived_heap_object(orig_obj); 614 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 615 // To save time, don't walk strings that are already archived. They just contain 616 // pointers to a type array, whose klass doesn't need to be recorded. 617 return archived_obj; 618 } 619 620 if (has_been_seen_during_subgraph_recording(orig_obj)) { 621 // orig_obj has already been archived and traced. Nothing more to do. 622 return archived_obj; 623 } else { 624 set_has_been_seen_during_subgraph_recording(orig_obj); 625 } 626 627 bool record_klasses_only = (archived_obj != NULL); 628 if (archived_obj == NULL) { 629 ++_num_new_archived_objs; 630 archived_obj = archive_heap_object(orig_obj, THREAD); 631 if (archived_obj == NULL) { 632 // Skip archiving the sub-graph referenced from the current entry field. 633 ResourceMark rm; 634 log_error(cds, heap)( 635 "Cannot archive the sub-graph referenced from %s object (" 636 PTR_FORMAT ") size %d, skipped.", 637 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 638 if (level == 1) { 639 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 640 // as the Java code will take care of initializing this field dynamically. 641 return NULL; 642 } else { 643 // We don't know how to handle an object that has been archived, but some of its reachable 644 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 645 // we have a real use case. 646 vm_exit(1); 647 } 648 } 649 } 650 651 assert(archived_obj != NULL, "must be"); 652 Klass *orig_k = orig_obj->klass(); 653 Klass *relocated_k = archived_obj->klass(); 654 subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 655 656 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 657 subgraph_info, orig_obj, archived_obj, THREAD); 658 orig_obj->oop_iterate(&walker); 659 if (is_closed_archive && orig_k->is_instance_klass()) { 660 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k), THREAD); 661 } 662 return archived_obj; 663 } 664 665 // 666 // Start from the given static field in a java mirror and archive the 667 // complete sub-graph of java heap objects that are reached directly 668 // or indirectly from the starting object by following references. 669 // Sub-graph archiving restrictions (current): 670 // 671 // - All classes of objects in the archived sub-graph (including the 672 // entry class) must be boot class only. 673 // - No java.lang.Class instance (java mirror) can be included inside 674 // an archived sub-graph. Mirror can only be the sub-graph entry object. 675 // 676 // The Java heap object sub-graph archiving process (see 677 // WalkOopAndArchiveClosure): 678 // 679 // 1) Java object sub-graph archiving starts from a given static field 680 // within a Class instance (java mirror). If the static field is a 681 // refererence field and points to a non-null java object, proceed to 682 // the next step. 683 // 684 // 2) Archives the referenced java object. If an archived copy of the 685 // current object already exists, updates the pointer in the archived 686 // copy of the referencing object to point to the current archived object. 687 // Otherwise, proceed to the next step. 688 // 689 // 3) Follows all references within the current java object and recursively 690 // archive the sub-graph of objects starting from each reference. 691 // 692 // 4) Updates the pointer in the archived copy of referencing object to 693 // point to the current archived object. 694 // 695 // 5) The Klass of the current java object is added to the list of Klasses 696 // for loading and initialzing before any object in the archived graph can 697 // be accessed at runtime. 698 // 699 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 700 const char* klass_name, 701 int field_offset, 702 const char* field_name, 703 bool is_closed_archive, 704 TRAPS) { 705 assert(DumpSharedSpaces, "dump time only"); 706 assert(k->is_shared_boot_class(), "must be boot class"); 707 708 oop m = k->java_mirror(); 709 710 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 711 oop f = m->obj_field(field_offset); 712 713 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 714 715 if (!CompressedOops::is_null(f)) { 716 if (log_is_enabled(Trace, cds, heap)) { 717 LogTarget(Trace, cds, heap) log; 718 LogStream out(log); 719 f->print_on(&out); 720 } 721 722 oop af = archive_reachable_objects_from(1, subgraph_info, f, 723 is_closed_archive, CHECK); 724 725 if (af == NULL) { 726 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 727 klass_name, field_name); 728 } else { 729 // Note: the field value is not preserved in the archived mirror. 730 // Record the field as a new subGraph entry point. The recorded 731 // information is restored from the archive at runtime. 732 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 733 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 734 } 735 } else { 736 // The field contains null, we still need to record the entry point, 737 // so it can be restored at runtime. 738 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 739 } 740 } 741 742 #ifndef PRODUCT 743 class VerifySharedOopClosure: public BasicOopIterateClosure { 744 private: 745 bool _is_archived; 746 747 public: 748 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 749 750 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 751 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 752 753 protected: 754 template <class T> void do_oop_work(T *p) { 755 oop obj = RawAccess<>::oop_load(p); 756 if (!CompressedOops::is_null(obj)) { 757 HeapShared::verify_reachable_objects_from(obj, _is_archived); 758 } 759 } 760 }; 761 762 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 763 assert(DumpSharedSpaces, "dump time only"); 764 assert(k->is_shared_boot_class(), "must be boot class"); 765 766 oop m = k->java_mirror(); 767 oop f = m->obj_field(field_offset); 768 if (!CompressedOops::is_null(f)) { 769 verify_subgraph_from(f); 770 } 771 } 772 773 void HeapShared::verify_subgraph_from(oop orig_obj) { 774 oop archived_obj = find_archived_heap_object(orig_obj); 775 if (archived_obj == NULL) { 776 // It's OK for the root of a subgraph to be not archived. See comments in 777 // archive_reachable_objects_from(). 778 return; 779 } 780 781 // Verify that all objects reachable from orig_obj are archived. 782 init_seen_objects_table(); 783 verify_reachable_objects_from(orig_obj, false); 784 delete_seen_objects_table(); 785 786 // Note: we could also verify that all objects reachable from the archived 787 // copy of orig_obj can only point to archived objects, with: 788 // init_seen_objects_table(); 789 // verify_reachable_objects_from(archived_obj, true); 790 // init_seen_objects_table(); 791 // but that's already done in G1HeapVerifier::verify_archive_regions so we 792 // won't do it here. 793 } 794 795 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 796 _num_total_verifications ++; 797 if (!has_been_seen_during_subgraph_recording(obj)) { 798 set_has_been_seen_during_subgraph_recording(obj); 799 800 if (is_archived) { 801 assert(is_archived_object(obj), "must be"); 802 assert(find_archived_heap_object(obj) == NULL, "must be"); 803 } else { 804 assert(!is_archived_object(obj), "must be"); 805 assert(find_archived_heap_object(obj) != NULL, "must be"); 806 } 807 808 VerifySharedOopClosure walker(is_archived); 809 obj->oop_iterate(&walker); 810 } 811 } 812 #endif 813 814 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 815 int HeapShared::_num_new_walked_objs; 816 int HeapShared::_num_new_archived_objs; 817 int HeapShared::_num_old_recorded_klasses; 818 819 int HeapShared::_num_total_subgraph_recordings = 0; 820 int HeapShared::_num_total_walked_objs = 0; 821 int HeapShared::_num_total_archived_objs = 0; 822 int HeapShared::_num_total_recorded_klasses = 0; 823 int HeapShared::_num_total_verifications = 0; 824 825 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 826 return _seen_objects_table->get(obj) != NULL; 827 } 828 829 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 830 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 831 _seen_objects_table->put(obj, true); 832 ++ _num_new_walked_objs; 833 } 834 835 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) { 836 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 837 init_seen_objects_table(); 838 _num_new_walked_objs = 0; 839 _num_new_archived_objs = 0; 840 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 841 } 842 843 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 844 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 845 _num_old_recorded_klasses; 846 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 847 "walked %d objs, archived %d new objs, recorded %d classes", 848 class_name, _num_new_walked_objs, _num_new_archived_objs, 849 num_new_recorded_klasses); 850 851 delete_seen_objects_table(); 852 853 _num_total_subgraph_recordings ++; 854 _num_total_walked_objs += _num_new_walked_objs; 855 _num_total_archived_objs += _num_new_archived_objs; 856 _num_total_recorded_klasses += num_new_recorded_klasses; 857 } 858 859 class ArchivableStaticFieldFinder: public FieldClosure { 860 InstanceKlass* _ik; 861 Symbol* _field_name; 862 bool _found; 863 int _offset; 864 public: 865 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 866 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 867 868 virtual void do_field(fieldDescriptor* fd) { 869 if (fd->name() == _field_name) { 870 assert(!_found, "fields cannot be overloaded"); 871 assert(fd->field_type() == T_OBJECT || fd->field_type() == T_ARRAY, "can archive only obj or array fields"); 872 _found = true; 873 _offset = fd->offset(); 874 } 875 } 876 bool found() { return _found; } 877 int offset() { return _offset; } 878 }; 879 880 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 881 int num, Thread* THREAD) { 882 for (int i = 0; i < num; i++) { 883 ArchivableStaticFieldInfo* info = &fields[i]; 884 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name, THREAD); 885 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name, THREAD); 886 887 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 888 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 889 InstanceKlass* ik = InstanceKlass::cast(k); 890 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 891 "Only support boot classes"); 892 ik->initialize(THREAD); 893 guarantee(!HAS_PENDING_EXCEPTION, "exception in initialize"); 894 895 ArchivableStaticFieldFinder finder(ik, field_name); 896 ik->do_local_static_fields(&finder); 897 assert(finder.found(), "field must exist"); 898 899 info->klass = ik; 900 info->offset = finder.offset(); 901 } 902 } 903 904 void HeapShared::init_subgraph_entry_fields(Thread* THREAD) { 905 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 906 907 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 908 num_closed_archive_subgraph_entry_fields, 909 THREAD); 910 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 911 num_open_archive_subgraph_entry_fields, 912 THREAD); 913 } 914 915 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 916 int num, bool is_closed_archive, 917 Thread* THREAD) { 918 _num_total_subgraph_recordings = 0; 919 _num_total_walked_objs = 0; 920 _num_total_archived_objs = 0; 921 _num_total_recorded_klasses = 0; 922 _num_total_verifications = 0; 923 924 // For each class X that has one or more archived fields: 925 // [1] Dump the subgraph of each archived field 926 // [2] Create a list of all the class of the objects that can be reached 927 // by any of these static fields. 928 // At runtime, these classes are initialized before X's archived fields 929 // are restored by HeapShared::initialize_from_archived_subgraph(). 930 int i; 931 for (i = 0; i < num; ) { 932 ArchivableStaticFieldInfo* info = &fields[i]; 933 const char* klass_name = info->klass_name; 934 start_recording_subgraph(info->klass, klass_name); 935 936 // If you have specified consecutive fields of the same klass in 937 // fields[], these will be archived in the same 938 // {start_recording_subgraph ... done_recording_subgraph} pass to 939 // save time. 940 for (; i < num; i++) { 941 ArchivableStaticFieldInfo* f = &fields[i]; 942 if (f->klass_name != klass_name) { 943 break; 944 } 945 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 946 f->offset, f->field_name, 947 is_closed_archive, CHECK); 948 } 949 done_recording_subgraph(info->klass, klass_name); 950 } 951 952 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 953 is_closed_archive ? "closed" : "open", 954 _num_total_subgraph_recordings); 955 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 956 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 957 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 958 959 #ifndef PRODUCT 960 for (int i = 0; i < num; i++) { 961 ArchivableStaticFieldInfo* f = &fields[i]; 962 verify_subgraph_from_static_field(f->klass, f->offset); 963 } 964 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 965 #endif 966 } 967 968 // At dump-time, find the location of all the non-null oop pointers in an archived heap 969 // region. This way we can quickly relocate all the pointers without using 970 // BasicOopIterateClosure at runtime. 971 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 972 narrowOop* _start; 973 BitMap *_oopmap; 974 int _num_total_oops; 975 int _num_null_oops; 976 public: 977 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 978 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 979 980 virtual bool should_verify_oops(void) { 981 return false; 982 } 983 virtual void do_oop(narrowOop* p) { 984 _num_total_oops ++; 985 narrowOop v = *p; 986 if (!CompressedOops::is_null(v)) { 987 size_t idx = p - _start; 988 _oopmap->set_bit(idx); 989 } else { 990 _num_null_oops ++; 991 } 992 } 993 virtual void do_oop(oop *p) { 994 ShouldNotReachHere(); 995 } 996 int num_total_oops() const { return _num_total_oops; } 997 int num_null_oops() const { return _num_null_oops; } 998 }; 999 1000 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1001 assert(UseCompressedOops, "must be"); 1002 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1003 ResourceBitMap oopmap(num_bits); 1004 1005 HeapWord* p = region.start(); 1006 HeapWord* end = region.end(); 1007 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1008 1009 int num_objs = 0; 1010 while (p < end) { 1011 oop o = (oop)p; 1012 o->oop_iterate(&finder); 1013 p += o->size(); 1014 ++ num_objs; 1015 } 1016 1017 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1018 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1019 return oopmap; 1020 } 1021 1022 // Patch all the embedded oop pointers inside an archived heap region, 1023 // to be consistent with the runtime oop encoding. 1024 class PatchEmbeddedPointers: public BitMapClosure { 1025 narrowOop* _start; 1026 1027 public: 1028 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1029 1030 bool do_bit(size_t offset) { 1031 narrowOop* p = _start + offset; 1032 narrowOop v = *p; 1033 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1034 oop o = HeapShared::decode_from_archive(v); 1035 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1036 return true; 1037 } 1038 }; 1039 1040 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1041 size_t oopmap_size_in_bits) { 1042 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1043 1044 #ifndef PRODUCT 1045 ResourceMark rm; 1046 ResourceBitMap checkBm = calculate_oopmap(region); 1047 assert(bm.is_same(checkBm), "sanity"); 1048 #endif 1049 1050 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1051 bm.iterate(&patcher); 1052 } 1053 1054 #endif // INCLUDE_CDS_JAVA_HEAP