1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logMessage.hpp" 31 #include "logging/logStream.hpp" 32 #include "memory/heapShared.inline.hpp" 33 #include "memory/iterator.inline.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/metaspaceClosure.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/compressedOops.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/fieldDescriptor.inline.hpp" 41 #include "utilities/bitMap.inline.hpp" 42 43 #if INCLUDE_CDS_JAVA_HEAP 44 KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL; 45 int HeapShared::_num_archived_subgraph_info_records = 0; 46 Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL; 47 48 // Currently there is only one class mirror (ArchivedModuleGraph) with archived 49 // sub-graphs. 50 KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) { 51 KlassSubGraphInfo* info = _subgraph_info_list; 52 while (info != NULL) { 53 if (info->klass() == k) { 54 return info; 55 } 56 info = info->next(); 57 } 58 return NULL; 59 } 60 61 // Get the subgraph_info for Klass k. A new subgraph_info is created if 62 // there is no existing one for k. The subgraph_info records the relocated 63 // Klass* of the original k. 64 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 65 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 66 KlassSubGraphInfo* info = find_subgraph_info(relocated_k); 67 if (info != NULL) { 68 return info; 69 } 70 71 info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list); 72 _subgraph_info_list = info; 73 return info; 74 } 75 76 address HeapShared::_narrow_oop_base; 77 int HeapShared::_narrow_oop_shift; 78 79 int HeapShared::num_of_subgraph_infos() { 80 int num = 0; 81 KlassSubGraphInfo* info = _subgraph_info_list; 82 while (info != NULL) { 83 num ++; 84 info = info->next(); 85 } 86 return num; 87 } 88 89 // Add an entry field to the current KlassSubGraphInfo. 90 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { 91 assert(DumpSharedSpaces, "dump time only"); 92 if (_subgraph_entry_fields == NULL) { 93 _subgraph_entry_fields = 94 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); 95 } 96 _subgraph_entry_fields->append((juint)static_field_offset); 97 _subgraph_entry_fields->append(CompressedOops::encode(v)); 98 } 99 100 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 101 // Only objects of boot classes can be included in sub-graph. 102 void KlassSubGraphInfo::add_subgraph_klass(Klass* orig_k, Klass *relocated_k) { 103 assert(DumpSharedSpaces, "dump time only"); 104 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 105 "must be the relocated Klass in the shared space"); 106 107 if (_subgraph_klasses == NULL) { 108 _subgraph_klasses = 109 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); 110 } 111 112 assert(relocated_k->is_shared(), "must be a shared class"); 113 114 if (_k == relocated_k) { 115 // Don't add the Klass containing the sub-graph to it's own klass 116 // initialization list. 117 return; 118 } 119 120 if (relocated_k->is_instance_klass()) { 121 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 122 "must be boot class"); 123 // SystemDictionary::xxx_klass() are not updated, need to check 124 // the original Klass* 125 if (orig_k == SystemDictionary::String_klass() || 126 orig_k == SystemDictionary::Object_klass()) { 127 // Initialized early during VM initialization. No need to be added 128 // to the sub-graph object class list. 129 return; 130 } 131 } else if (relocated_k->is_objArray_klass()) { 132 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 133 if (abk->is_instance_klass()) { 134 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 135 "must be boot class"); 136 } 137 if (relocated_k == Universe::objectArrayKlassObj()) { 138 // Initialized early during Universe::genesis. No need to be added 139 // to the list. 140 return; 141 } 142 } else { 143 assert(relocated_k->is_typeArray_klass(), "must be"); 144 // Primitive type arrays are created early during Universe::genesis. 145 return; 146 } 147 148 if (log_is_enabled(Debug, cds, heap)) { 149 if (!_subgraph_klasses->contains(relocated_k)) { 150 ResourceMark rm; 151 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 152 } 153 } 154 155 _subgraph_klasses->append_if_missing(relocated_k); 156 } 157 158 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 159 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 160 _k = info->klass(); 161 _next = NULL; 162 _entry_field_records = NULL; 163 _subgraph_klasses = NULL; 164 165 // populate the entry fields 166 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 167 if (entry_fields != NULL) { 168 int num_entry_fields = entry_fields->length(); 169 assert(num_entry_fields % 2 == 0, "sanity"); 170 _entry_field_records = 171 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 172 for (int i = 0 ; i < num_entry_fields; i++) { 173 _entry_field_records->at_put(i, entry_fields->at(i)); 174 } 175 } 176 177 // the Klasses of the objects in the sub-graphs 178 GrowableArray<Klass*>* subgraph_klasses = info->subgraph_klasses(); 179 if (subgraph_klasses != NULL) { 180 int num_subgraphs_klasses = subgraph_klasses->length(); 181 _subgraph_klasses = 182 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 183 for (int i = 0; i < num_subgraphs_klasses; i++) { 184 Klass* subgraph_k = subgraph_klasses->at(i); 185 if (log_is_enabled(Info, cds, heap)) { 186 ResourceMark rm; 187 log_info(cds, heap)( 188 "Archived object klass %s (%2d) => %s", 189 _k->external_name(), i, subgraph_k->external_name()); 190 } 191 _subgraph_klasses->at_put(i, subgraph_k); 192 } 193 } 194 } 195 196 // Build the records of archived subgraph infos, which include: 197 // - Entry points to all subgraphs from the containing class mirror. The entry 198 // points are static fields in the mirror. For each entry point, the field 199 // offset and value are recorded in the sub-graph info. The value are stored 200 // back to the corresponding field at runtime. 201 // - A list of klasses that need to be loaded/initialized before archived 202 // java object sub-graph can be accessed at runtime. 203 // 204 // The records are saved in the archive file and reloaded at runtime. Currently 205 // there is only one class mirror (ArchivedModuleGraph) with archived sub-graphs. 206 // 207 // Layout of the archived subgraph info records: 208 // 209 // records_size | num_records | records* 210 // ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_klasses 211 size_t HeapShared::build_archived_subgraph_info_records(int num_records) { 212 // remember the start address 213 char* start_p = MetaspaceShared::read_only_space_top(); 214 215 // now populate the archived subgraph infos, which will be saved in the 216 // archive file 217 _archived_subgraph_info_records = 218 MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records); 219 KlassSubGraphInfo* info = _subgraph_info_list; 220 int i = 0; 221 while (info != NULL) { 222 assert(i < _archived_subgraph_info_records->length(), "sanity"); 223 ArchivedKlassSubGraphInfoRecord* record = 224 _archived_subgraph_info_records->adr_at(i); 225 record->init(info); 226 info = info->next(); 227 i ++; 228 } 229 230 // _subgraph_info_list is no longer needed 231 delete _subgraph_info_list; 232 _subgraph_info_list = NULL; 233 234 char* end_p = MetaspaceShared::read_only_space_top(); 235 size_t records_size = end_p - start_p; 236 return records_size; 237 } 238 239 // Write the subgraph info records in the shared _ro region 240 void HeapShared::write_archived_subgraph_infos() { 241 assert(DumpSharedSpaces, "dump time only"); 242 243 Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3); 244 245 _num_archived_subgraph_info_records = num_of_subgraph_infos(); 246 size_t records_size = build_archived_subgraph_info_records( 247 _num_archived_subgraph_info_records); 248 249 // Now write the header information: 250 // records_size, num_records, _archived_subgraph_info_records 251 assert(records_header != NULL, "sanity"); 252 intptr_t* p = (intptr_t*)(records_header->data()); 253 *p = (intptr_t)records_size; 254 p ++; 255 *p = (intptr_t)_num_archived_subgraph_info_records; 256 p ++; 257 *p = (intptr_t)_archived_subgraph_info_records; 258 } 259 260 char* HeapShared::read_archived_subgraph_infos(char* buffer) { 261 Array<intptr_t>* records_header = (Array<intptr_t>*)buffer; 262 intptr_t* p = (intptr_t*)(records_header->data()); 263 size_t records_size = (size_t)(*p); 264 p ++; 265 _num_archived_subgraph_info_records = *p; 266 p ++; 267 _archived_subgraph_info_records = 268 (Array<ArchivedKlassSubGraphInfoRecord>*)(*p); 269 270 buffer = (char*)_archived_subgraph_info_records + records_size; 271 return buffer; 272 } 273 274 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 275 if (!MetaspaceShared::open_archive_heap_region_mapped()) { 276 return; // nothing to do 277 } 278 279 if (_num_archived_subgraph_info_records == 0) { 280 return; // no subgraph info records 281 } 282 283 // Initialize from archived data. Currently only ArchivedModuleGraph 284 // has archived object subgraphs, which is used during VM initialization 285 // time when bootstraping the system modules. No lock is needed. 286 Thread* THREAD = Thread::current(); 287 for (int i = 0; i < _archived_subgraph_info_records->length(); i++) { 288 ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i); 289 if (record->klass() == k) { 290 int i; 291 // Found the archived subgraph info record for the requesting klass. 292 // Load/link/initialize the klasses of the objects in the subgraph. 293 // NULL class loader is used. 294 Array<Klass*>* klasses = record->subgraph_klasses(); 295 if (klasses != NULL) { 296 for (i = 0; i < klasses->length(); i++) { 297 Klass* obj_k = klasses->at(i); 298 Klass* resolved_k = SystemDictionary::resolve_or_null( 299 (obj_k)->name(), THREAD); 300 if (resolved_k != obj_k) { 301 return; 302 } 303 if ((obj_k)->is_instance_klass()) { 304 InstanceKlass* ik = InstanceKlass::cast(obj_k); 305 ik->initialize(THREAD); 306 } else if ((obj_k)->is_objArray_klass()) { 307 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 308 oak->initialize(THREAD); 309 } 310 } 311 } 312 313 if (HAS_PENDING_EXCEPTION) { 314 CLEAR_PENDING_EXCEPTION; 315 // None of the field value will be set if there was an exception. 316 // The java code will not see any of the archived objects in the 317 // subgraphs referenced from k in this case. 318 return; 319 } 320 321 // Load the subgraph entry fields from the record and store them back to 322 // the corresponding fields within the mirror. 323 oop m = k->java_mirror(); 324 Array<juint>* entry_field_records = record->entry_field_records(); 325 if (entry_field_records != NULL) { 326 int efr_len = entry_field_records->length(); 327 assert(efr_len % 2 == 0, "sanity"); 328 for (i = 0; i < efr_len;) { 329 int field_offset = entry_field_records->at(i); 330 // The object refereced by the field becomes 'known' by GC from this 331 // point. All objects in the subgraph reachable from the object are 332 // also 'known' by GC. 333 oop v = MetaspaceShared::materialize_archived_object( 334 entry_field_records->at(i+1)); 335 m->obj_field_put(field_offset, v); 336 i += 2; 337 } 338 } 339 340 // Done. Java code can see the archived sub-graphs referenced from k's 341 // mirror after this point. 342 return; 343 } 344 } 345 } 346 347 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 348 int _level; 349 oop _orig_referencing_obj; 350 oop _archived_referencing_obj; 351 Thread* _thread; 352 public: 353 WalkOopAndArchiveClosure(int level, 354 oop orig, oop archived, TRAPS) : _level(level), 355 _orig_referencing_obj(orig), 356 _archived_referencing_obj(archived), 357 _thread(THREAD) {} 358 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 359 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 360 361 protected: 362 template <class T> void do_oop_work(T *p) { 363 oop obj = RawAccess<>::oop_load(p); 364 if (!CompressedOops::is_null(obj)) { 365 assert(!MetaspaceShared::is_archive_object(obj), 366 "original objects must not directly point to archived object"); 367 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 368 T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); 369 370 // A java.lang.Class instance cannot be included in an archived 371 // object sub-graph. 372 if (java_lang_Class::is_instance(obj)) { 373 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", _level); 374 vm_exit(1); 375 } 376 377 if (log_is_enabled(Debug, cds, heap)) { 378 ResourceMark rm; 379 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 380 _orig_referencing_obj->klass()->external_name(), field_delta, 381 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 382 LogTarget(Trace, cds, heap) log; 383 LogStream out(log); 384 obj->print_on(&out); 385 } 386 387 oop archived = MetaspaceShared::find_archived_heap_object(obj); 388 if (archived == NULL) { 389 Thread* THREAD = _thread; 390 archived = HeapShared::archive_reachable_objects_from(_level + 1, obj, THREAD); 391 assert(archived != NULL, "VM should have exited"); 392 } 393 assert(MetaspaceShared::is_archive_object(archived), "must be"); 394 395 // Update the reference in the archived copy of the referencing object. 396 log_debug(cds, heap)("(%d) archiving oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 397 _level, p2i(new_p), p2i(obj), p2i(archived)); 398 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 399 } 400 } 401 }; 402 403 oop HeapShared::archive_reachable_objects_from(int level, oop orig_obj, TRAPS) { 404 assert(orig_obj != NULL, "must be"); 405 assert(!MetaspaceShared::is_archive_object(orig_obj), "sanity"); 406 407 // get the archived copy of the field referenced object 408 oop archived_obj = MetaspaceShared::archive_heap_object(orig_obj, THREAD); 409 if (archived_obj == NULL) { 410 // Skip archiving the sub-graph referenced from the current entry field. 411 ResourceMark rm; 412 log_error(cds, heap)( 413 "Cannot archive the sub-graph referenced from %s object (" 414 PTR_FORMAT ") size %d, skipped.", 415 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 416 if (level == 1) { 417 // See runtime/appcds/cacheObject/ArchivedIntegerCacheTest.java 418 return NULL; 419 } else { 420 // We don't know how to handle an object that has been archived, but some of its reachable 421 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 422 // we have a real use case. 423 vm_exit(1); 424 } 425 } 426 427 WalkOopAndArchiveClosure walker(level, orig_obj, archived_obj, THREAD); 428 orig_obj->oop_iterate(&walker); 429 assert(archived_obj != NULL, "must be"); 430 return archived_obj; 431 } 432 433 // 434 // Start from the given static field in a java mirror and archive the 435 // complete sub-graph of java heap objects that are reached directly 436 // or indirectly from the starting object by following references. 437 // Sub-graph archiving restrictions (current): 438 // 439 // - All classes of objects in the archived sub-graph (including the 440 // entry class) must be boot class only. 441 // - No java.lang.Class instance (java mirror) can be included inside 442 // an archived sub-graph. Mirror can only be the sub-graph entry object. 443 // 444 // The Java heap object sub-graph archiving process (see 445 // WalkOopAndArchiveClosure): 446 // 447 // 1) Java object sub-graph archiving starts from a given static field 448 // within a Class instance (java mirror). If the static field is a 449 // refererence field and points to a non-null java object, proceed to 450 // the next step. 451 // 452 // 2) Archives the referenced java object. If an archived copy of the 453 // current object already exists, updates the pointer in the archived 454 // copy of the referencing object to point to the current archived object. 455 // Otherwise, proceed to the next step. 456 // 457 // 3) Follows all references within the current java object and recursively 458 // archive the sub-graph of objects starting from each reference. 459 // 460 // 4) Updates the pointer in the archived copy of referencing object to 461 // point to the current archived object. 462 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 463 const char* klass_name, 464 int field_offset, 465 const char* field_name, 466 TRAPS) { 467 assert(DumpSharedSpaces, "dump time only"); 468 assert(k->is_shared_boot_class(), "must be boot class"); 469 470 oop m = k->java_mirror(); 471 oop archived_m = MetaspaceShared::find_archived_heap_object(m); 472 if (CompressedOops::is_null(archived_m)) { 473 return; 474 } 475 476 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 477 oop f = m->obj_field(field_offset); 478 479 log_debug(cds, heap)("Start archiving from: %s::%s ", klass_name, field_name); 480 if (log_is_enabled(Trace, cds, heap) && !CompressedOops::is_null(f)) { 481 LogTarget(Trace, cds, heap) log; 482 LogStream out(log); 483 f->print_on(&out); 484 } 485 486 if (!CompressedOops::is_null(f)) { 487 oop af = archive_reachable_objects_from(1, f, CHECK); 488 if (af == NULL) { 489 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 490 klass_name, field_name); 491 } else { 492 // Note: the field value is not preserved in the archived mirror. 493 // Record the field as a new subGraph entry point. The recorded 494 // information is restored from the archive at runtime. 495 subgraph_info->add_subgraph_entry_field(field_offset, af); 496 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 497 } 498 } else { 499 // The field contains null, we still need to record the entry point, 500 // so it can be restored at runtime. 501 subgraph_info->add_subgraph_entry_field(field_offset, NULL); 502 } 503 } 504 505 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 506 507 bool HeapShared::has_been_seen_during_subgraph_klasses_recording(oop obj) { 508 if (_seen_objects_table->get(obj) == NULL) { 509 _seen_objects_table->put(obj, true); 510 return false; 511 } else { 512 return true; 513 } 514 } 515 516 void HeapShared::start_recording_subgraph_klasses(InstanceKlass *k, const char* class_name) { 517 log_info(cds, heap)("Record classes for archived fields in %s", class_name); 518 assert(_seen_objects_table == NULL, "must be"); 519 _seen_objects_table = new (ResourceObj::C_HEAP, mtClass)SeenObjectsTable(); 520 } 521 522 void HeapShared::done_recording_subgraph_klasses(InstanceKlass *k) { 523 delete _seen_objects_table; 524 _seen_objects_table = NULL; 525 } 526 527 void HeapShared::record_subgraph_klasses_from_static_field(InstanceKlass *k, 528 const char* klass_name, 529 int field_offset, 530 const char* field_name) { 531 assert(DumpSharedSpaces, "dump time only"); 532 assert(k->is_shared_boot_class(), "must be boot class"); 533 534 log_debug(cds, heap)("Start recording from: %s::%s", klass_name, field_name); 535 536 oop m = k->java_mirror(); 537 oop f = m->obj_field(field_offset); 538 record_subgraph_klasses_for(get_subgraph_info(k), f); 539 } 540 541 class RecordKlassesClosure: public BasicOopIterateClosure { 542 KlassSubGraphInfo* _subgraph_info; 543 public: 544 RecordKlassesClosure(KlassSubGraphInfo* subgraph_info) : 545 _subgraph_info(subgraph_info) {} 546 void do_oop(narrowOop *p) { do_oop_work(p); } 547 void do_oop( oop *p) { do_oop_work(p); } 548 549 protected: 550 template <class T> void do_oop_work(T *p) { 551 oop obj = RawAccess<>::oop_load(p); 552 if (!CompressedOops::is_null(obj)) { 553 assert(!java_lang_Class::is_instance(obj), "should have been excluded"); 554 555 if (!HeapShared::has_been_seen_during_subgraph_klasses_recording(obj)) { 556 HeapShared::record_subgraph_klasses_for(_subgraph_info, obj); 557 } 558 } 559 } 560 }; 561 562 void HeapShared::record_subgraph_klasses_for(KlassSubGraphInfo* subgraph_info, oop obj) { 563 assert(!java_lang_Class::is_instance(obj), "should have been excluded"); 564 565 if (!CompressedOops::is_null(obj)) { 566 oop archived = MetaspaceShared::find_archived_heap_object(obj); 567 if (CompressedOops::is_null(archived)) { 568 // Object was skipped because it was too big 569 return; 570 } 571 572 Klass *orig_k = obj->klass(); 573 Klass *relocated_k = archived->klass(); 574 subgraph_info->add_subgraph_klass(orig_k, relocated_k); 575 576 RecordKlassesClosure walker(subgraph_info); 577 obj->oop_iterate(&walker); 578 } 579 } 580 581 struct ArchivableStaticFieldInfo { 582 const char* klass_name; 583 const char* field_name; 584 InstanceKlass* klass; 585 int offset; 586 BasicType type; 587 }; 588 589 // If you add new entries to this table, you should know what you're doing! 590 static ArchivableStaticFieldInfo archivable_static_fields[] = { 591 {"jdk/internal/module/ArchivedModuleGraph", "archivedSystemModules"}, 592 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleFinder"}, 593 {"jdk/internal/module/ArchivedModuleGraph", "archivedMainModule"}, 594 {"jdk/internal/module/ArchivedModuleGraph", "archivedConfiguration"}, 595 {"java/util/ImmutableCollections$ListN", "EMPTY_LIST"}, 596 {"java/util/ImmutableCollections$MapN", "EMPTY_MAP"}, 597 {"java/util/ImmutableCollections$SetN", "EMPTY_SET"}, 598 {"java/lang/Integer$IntegerCache", "archivedCache"}, 599 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 600 }; 601 602 const static int num_archivable_static_fields = sizeof(archivable_static_fields) / sizeof(ArchivableStaticFieldInfo); 603 604 class ArchivableStaticFieldFinder: public FieldClosure { 605 InstanceKlass* _ik; 606 Symbol* _field_name; 607 bool _found; 608 int _offset; 609 BasicType _type; 610 public: 611 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 612 _ik(ik), _field_name(field_name), _found(false), _offset(-1), _type(T_ILLEGAL) {} 613 614 virtual void do_field(fieldDescriptor* fd) { 615 if (fd->name() == _field_name) { 616 assert(!_found, "fields cannot be overloaded"); 617 _found = true; 618 _offset = fd->offset(); 619 _type = fd->field_type(); 620 assert(_type == T_OBJECT || _type == T_ARRAY, "can archive only obj or array fields"); 621 } 622 } 623 bool found() { return _found; } 624 int offset() { return _offset; } 625 BasicType type() { return _type; } 626 }; 627 628 void HeapShared::init_archivable_static_fields(Thread* THREAD) { 629 for (int i = 0; i < num_archivable_static_fields; i++) { 630 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 631 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name, THREAD); 632 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name, THREAD); 633 634 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 635 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 636 InstanceKlass* ik = InstanceKlass::cast(k); 637 638 ArchivableStaticFieldFinder finder(ik, field_name); 639 ik->do_local_static_fields(&finder); 640 assert(finder.found(), "field must exist"); 641 642 info->klass = ik; 643 info->offset = finder.offset(); 644 info->type = finder.type(); 645 646 assert(info->type == T_OBJECT || info->type == T_ARRAY, "must be"); 647 } 648 } 649 650 void HeapShared::archive_module_graph_objects(Thread* THREAD) { 651 for (int i = 0; i < num_archivable_static_fields; i++) { 652 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 653 archive_reachable_objects_from_static_field(info->klass, info->klass_name, 654 info->offset, info->field_name, CHECK); 655 } 656 657 record_subgraph_klasses(); 658 } 659 660 // For each class X that has one or more archived fields, create a list of 661 // all the class of the objects that can be reached by any of these static 662 // fields. 663 // 664 // At runtime, these classes are initialized before X's archived fields 665 // are restored by HeapShared::initialize_from_archived_subgraph(). 666 void HeapShared::record_subgraph_klasses() { 667 for (int i = 0; i < num_archivable_static_fields; ) { 668 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 669 const char* klass_name = info->klass_name; 670 start_recording_subgraph_klasses(info->klass, klass_name); 671 672 int j = i; 673 while (j < num_archivable_static_fields) { 674 ArchivableStaticFieldInfo* f = &archivable_static_fields[j]; 675 if (f->klass_name == klass_name) { 676 record_subgraph_klasses_from_static_field(f->klass, f->klass_name, 677 f->offset, f->field_name); 678 j++; 679 } else { 680 break; 681 } 682 } 683 done_recording_subgraph_klasses(info->klass); 684 i = j; 685 } 686 } 687 688 // At dump-time, find the location of all the non-null oop pointers in an archived heap 689 // region. This way we can quickly relocate all the pointers without using 690 // BasicOopIterateClosure at runtime. 691 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 692 narrowOop* _start; 693 BitMap *_oopmap; 694 int _num_total_oops; 695 int _num_null_oops; 696 public: 697 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 698 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 699 700 virtual bool should_verify_oops(void) { 701 return false; 702 } 703 virtual void do_oop(narrowOop* p) { 704 _num_total_oops ++; 705 narrowOop v = *p; 706 if (!CompressedOops::is_null(v)) { 707 size_t idx = p - _start; 708 _oopmap->set_bit(idx); 709 } else { 710 _num_null_oops ++; 711 } 712 } 713 virtual void do_oop(oop *p) { 714 ShouldNotReachHere(); 715 } 716 int num_total_oops() const { return _num_total_oops; } 717 int num_null_oops() const { return _num_null_oops; } 718 }; 719 720 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 721 assert(UseCompressedOops, "must be"); 722 size_t num_bits = region.byte_size() / sizeof(narrowOop); 723 ResourceBitMap oopmap(num_bits); 724 725 HeapWord* p = region.start(); 726 HeapWord* end = region.end(); 727 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 728 729 int num_objs = 0; 730 while (p < end) { 731 oop o = (oop)p; 732 o->oop_iterate(&finder); 733 p += o->size(); 734 ++ num_objs; 735 } 736 737 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 738 num_objs, finder.num_total_oops(), finder.num_null_oops()); 739 return oopmap; 740 } 741 742 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 743 _narrow_oop_base = base; 744 _narrow_oop_shift = shift; 745 } 746 747 // Patch all the embedded oop pointers inside an archived heap region, 748 // to be consistent with the runtime oop encoding. 749 class PatchEmbeddedPointers: public BitMapClosure { 750 narrowOop* _start; 751 752 public: 753 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 754 755 bool do_bit(size_t offset) { 756 narrowOop* p = _start + offset; 757 narrowOop v = *p; 758 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 759 oop o = HeapShared::decode_with_archived_oop_encoding_mode(v); 760 RawAccess<IS_NOT_NULL>::oop_store(p, o); 761 return true; 762 } 763 }; 764 765 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 766 size_t oopmap_size_in_bits) { 767 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 768 769 #ifndef PRODUCT 770 ResourceMark rm; 771 ResourceBitMap checkBm = calculate_oopmap(region); 772 assert(bm.is_same(checkBm), "sanity"); 773 #endif 774 775 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 776 bm.iterate(&patcher); 777 } 778 779 #endif // INCLUDE_CDS_JAVA_HEAP