1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logMessage.hpp" 31 #include "logging/logStream.hpp" 32 #include "memory/heapShared.inline.hpp" 33 #include "memory/iterator.inline.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/metaspaceClosure.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/compressedOops.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/fieldDescriptor.inline.hpp" 41 #include "utilities/bitMap.inline.hpp" 42 43 #if INCLUDE_CDS_JAVA_HEAP 44 KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL; 45 int HeapShared::_num_archived_subgraph_info_records = 0; 46 Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL; 47 48 // Currently there is only one class mirror (ArchivedModuleGraph) with archived 49 // sub-graphs. 50 KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) { 51 KlassSubGraphInfo* info = _subgraph_info_list; 52 while (info != NULL) { 53 if (info->klass() == k) { 54 return info; 55 } 56 info = info->next(); 57 } 58 return NULL; 59 } 60 61 // Get the subgraph_info for Klass k. A new subgraph_info is created if 62 // there is no existing one for k. The subgraph_info records the relocated 63 // Klass* of the original k. 64 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 65 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 66 KlassSubGraphInfo* info = find_subgraph_info(relocated_k); 67 if (info != NULL) { 68 return info; 69 } 70 71 info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list); 72 _subgraph_info_list = info; 73 return info; 74 } 75 76 address HeapShared::_narrow_oop_base; 77 int HeapShared::_narrow_oop_shift; 78 79 int HeapShared::num_of_subgraph_infos() { 80 int num = 0; 81 KlassSubGraphInfo* info = _subgraph_info_list; 82 while (info != NULL) { 83 num ++; 84 info = info->next(); 85 } 86 return num; 87 } 88 89 // Add an entry field to the current KlassSubGraphInfo. 90 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { 91 assert(DumpSharedSpaces, "dump time only"); 92 if (_subgraph_entry_fields == NULL) { 93 _subgraph_entry_fields = 94 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); 95 } 96 _subgraph_entry_fields->append((juint)static_field_offset); 97 _subgraph_entry_fields->append(CompressedOops::encode(v)); 98 } 99 100 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 101 // Only objects of boot classes can be included in sub-graph. 102 void KlassSubGraphInfo::add_subgraph_klass(Klass* orig_k, Klass *relocated_k) { 103 assert(DumpSharedSpaces, "dump time only"); 104 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 105 "must be the relocated Klass in the shared space"); 106 107 if (_subgraph_klasses == NULL) { 108 _subgraph_klasses = 109 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); 110 } 111 112 assert(relocated_k->is_shared(), "must be a shared class"); 113 114 if (_k == relocated_k) { 115 // Don't add the Klass containing the sub-graph to it's own klass 116 // initialization list. 117 return; 118 } 119 120 if (relocated_k->is_instance_klass()) { 121 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 122 "must be boot class"); 123 // SystemDictionary::xxx_klass() are not updated, need to check 124 // the original Klass* 125 if (orig_k == SystemDictionary::String_klass() || 126 orig_k == SystemDictionary::Object_klass()) { 127 // Initialized early during VM initialization. No need to be added 128 // to the sub-graph object class list. 129 return; 130 } 131 } else if (relocated_k->is_objArray_klass()) { 132 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 133 if (abk->is_instance_klass()) { 134 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 135 "must be boot class"); 136 } 137 if (relocated_k == Universe::objectArrayKlassObj()) { 138 // Initialized early during Universe::genesis. No need to be added 139 // to the list. 140 return; 141 } 142 } else { 143 assert(relocated_k->is_typeArray_klass(), "must be"); 144 // Primitive type arrays are created early during Universe::genesis. 145 return; 146 } 147 148 if (log_is_enabled(Debug, cds, heap)) { 149 if (!_subgraph_klasses->contains(relocated_k)) { 150 ResourceMark rm; 151 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 152 } 153 } 154 155 _subgraph_klasses->append_if_missing(relocated_k); 156 } 157 158 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 159 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 160 _k = info->klass(); 161 _next = NULL; 162 _entry_field_records = NULL; 163 _subgraph_klasses = NULL; 164 165 // populate the entry fields 166 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 167 if (entry_fields != NULL) { 168 int num_entry_fields = entry_fields->length(); 169 assert(num_entry_fields % 2 == 0, "sanity"); 170 _entry_field_records = 171 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 172 for (int i = 0 ; i < num_entry_fields; i++) { 173 _entry_field_records->at_put(i, entry_fields->at(i)); 174 } 175 } 176 177 // the Klasses of the objects in the sub-graphs 178 GrowableArray<Klass*>* subgraph_klasses = info->subgraph_klasses(); 179 if (subgraph_klasses != NULL) { 180 int num_subgraphs_klasses = subgraph_klasses->length(); 181 _subgraph_klasses = 182 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 183 for (int i = 0; i < num_subgraphs_klasses; i++) { 184 Klass* subgraph_k = subgraph_klasses->at(i); 185 if (log_is_enabled(Info, cds, heap)) { 186 ResourceMark rm; 187 log_info(cds, heap)( 188 "Archived object klass (%d): %s in %s sub-graphs", 189 i, subgraph_k->external_name(), _k->external_name()); 190 } 191 _subgraph_klasses->at_put(i, subgraph_k); 192 } 193 } 194 } 195 196 // Build the records of archived subgraph infos, which include: 197 // - Entry points to all subgraphs from the containing class mirror. The entry 198 // points are static fields in the mirror. For each entry point, the field 199 // offset and value are recorded in the sub-graph info. The value are stored 200 // back to the corresponding field at runtime. 201 // - A list of klasses that need to be loaded/initialized before archived 202 // java object sub-graph can be accessed at runtime. 203 // 204 // The records are saved in the archive file and reloaded at runtime. Currently 205 // there is only one class mirror (ArchivedModuleGraph) with archived sub-graphs. 206 // 207 // Layout of the archived subgraph info records: 208 // 209 // records_size | num_records | records* 210 // ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_klasses 211 size_t HeapShared::build_archived_subgraph_info_records(int num_records) { 212 // remember the start address 213 char* start_p = MetaspaceShared::read_only_space_top(); 214 215 // now populate the archived subgraph infos, which will be saved in the 216 // archive file 217 _archived_subgraph_info_records = 218 MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records); 219 KlassSubGraphInfo* info = _subgraph_info_list; 220 int i = 0; 221 while (info != NULL) { 222 assert(i < _archived_subgraph_info_records->length(), "sanity"); 223 ArchivedKlassSubGraphInfoRecord* record = 224 _archived_subgraph_info_records->adr_at(i); 225 record->init(info); 226 info = info->next(); 227 i ++; 228 } 229 230 // _subgraph_info_list is no longer needed 231 delete _subgraph_info_list; 232 _subgraph_info_list = NULL; 233 234 char* end_p = MetaspaceShared::read_only_space_top(); 235 size_t records_size = end_p - start_p; 236 return records_size; 237 } 238 239 // Write the subgraph info records in the shared _ro region 240 void HeapShared::write_archived_subgraph_infos() { 241 assert(DumpSharedSpaces, "dump time only"); 242 243 Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3); 244 245 _num_archived_subgraph_info_records = num_of_subgraph_infos(); 246 size_t records_size = build_archived_subgraph_info_records( 247 _num_archived_subgraph_info_records); 248 249 // Now write the header information: 250 // records_size, num_records, _archived_subgraph_info_records 251 assert(records_header != NULL, "sanity"); 252 intptr_t* p = (intptr_t*)(records_header->data()); 253 *p = (intptr_t)records_size; 254 p ++; 255 *p = (intptr_t)_num_archived_subgraph_info_records; 256 p ++; 257 *p = (intptr_t)_archived_subgraph_info_records; 258 } 259 260 char* HeapShared::read_archived_subgraph_infos(char* buffer) { 261 Array<intptr_t>* records_header = (Array<intptr_t>*)buffer; 262 intptr_t* p = (intptr_t*)(records_header->data()); 263 size_t records_size = (size_t)(*p); 264 p ++; 265 _num_archived_subgraph_info_records = *p; 266 p ++; 267 _archived_subgraph_info_records = 268 (Array<ArchivedKlassSubGraphInfoRecord>*)(*p); 269 270 buffer = (char*)_archived_subgraph_info_records + records_size; 271 return buffer; 272 } 273 274 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 275 if (!MetaspaceShared::open_archive_heap_region_mapped()) { 276 return; // nothing to do 277 } 278 279 if (_num_archived_subgraph_info_records == 0) { 280 return; // no subgraph info records 281 } 282 283 // Initialize from archived data. Currently only ArchivedModuleGraph 284 // has archived object subgraphs, which is used during VM initialization 285 // time when bootstraping the system modules. No lock is needed. 286 Thread* THREAD = Thread::current(); 287 for (int i = 0; i < _archived_subgraph_info_records->length(); i++) { 288 ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i); 289 if (record->klass() == k) { 290 int i; 291 // Found the archived subgraph info record for the requesting klass. 292 // Load/link/initialize the klasses of the objects in the subgraph. 293 // NULL class loader is used. 294 Array<Klass*>* klasses = record->subgraph_klasses(); 295 if (klasses != NULL) { 296 for (i = 0; i < klasses->length(); i++) { 297 Klass* obj_k = klasses->at(i); 298 Klass* resolved_k = SystemDictionary::resolve_or_null( 299 (obj_k)->name(), THREAD); 300 if (resolved_k != obj_k) { 301 return; 302 } 303 if ((obj_k)->is_instance_klass()) { 304 InstanceKlass* ik = InstanceKlass::cast(obj_k); 305 ik->initialize(THREAD); 306 } else if ((obj_k)->is_objArray_klass()) { 307 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 308 oak->initialize(THREAD); 309 } 310 } 311 } 312 313 if (HAS_PENDING_EXCEPTION) { 314 CLEAR_PENDING_EXCEPTION; 315 // None of the field value will be set if there was an exception. 316 // The java code will not see any of the archived objects in the 317 // subgraphs referenced from k in this case. 318 return; 319 } 320 321 // Load the subgraph entry fields from the record and store them back to 322 // the corresponding fields within the mirror. 323 oop m = k->java_mirror(); 324 Array<juint>* entry_field_records = record->entry_field_records(); 325 if (entry_field_records != NULL) { 326 int efr_len = entry_field_records->length(); 327 assert(efr_len % 2 == 0, "sanity"); 328 for (i = 0; i < efr_len;) { 329 int field_offset = entry_field_records->at(i); 330 // The object refereced by the field becomes 'known' by GC from this 331 // point. All objects in the subgraph reachable from the object are 332 // also 'known' by GC. 333 oop v = MetaspaceShared::materialize_archived_object( 334 entry_field_records->at(i+1)); 335 m->obj_field_put(field_offset, v); 336 i += 2; 337 } 338 } 339 340 // Done. Java code can see the archived sub-graphs referenced from k's 341 // mirror after this point. 342 return; 343 } 344 } 345 } 346 347 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 348 int _level; 349 oop _orig_referencing_obj; 350 oop _archived_referencing_obj; 351 public: 352 WalkOopAndArchiveClosure(int level, 353 oop orig, oop archived) : _level(level), 354 _orig_referencing_obj(orig), 355 _archived_referencing_obj(archived) {} 356 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 357 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 358 359 protected: 360 template <class T> void do_oop_work(T *p) { 361 oop obj = RawAccess<>::oop_load(p); 362 if (!CompressedOops::is_null(obj)) { 363 // A java.lang.Class instance cannot be included in an archived 364 // object sub-graph. 365 if (java_lang_Class::is_instance(obj)) { 366 log_error(cds, heap)("Unknown java.lang.Class object is in the archived sub-graph"); 367 vm_exit(1); 368 } 369 370 if (log_is_enabled(Debug, cds, heap)) { 371 LogTarget(Debug, cds, heap) log; 372 LogStream ls(log); 373 outputStream* out = &ls; 374 ResourceMark rm; 375 log.print("(%d) %s <--- referenced from: %s", 376 _level, obj->klass()->external_name(), 377 CompressedOops::is_null(_orig_referencing_obj) ? 378 "" : _orig_referencing_obj->klass()->external_name()); 379 obj->print_on(out); 380 } 381 382 assert(!MetaspaceShared::is_archive_object(obj), 383 "original objects must not directly point to archived object"); 384 385 size_t field_delta = pointer_delta( 386 p, _orig_referencing_obj, sizeof(char)); 387 T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); 388 oop archived = MetaspaceShared::find_archived_heap_object(obj); 389 if (archived != NULL) { 390 // There is an archived copy existing, update reference to point 391 // to the archived copy 392 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 393 log_debug(cds, heap)("--- found existing archived copy, store archived " PTR_FORMAT " in " PTR_FORMAT, 394 p2i(archived), p2i(new_p)); 395 return; 396 } 397 398 Thread* THREAD = Thread::current(); 399 // Archive the current oop before iterating through its references 400 archived = MetaspaceShared::archive_heap_object(obj, THREAD); 401 if (archived == NULL) { 402 ResourceMark rm; 403 LogTarget(Error, cds, heap) log_err; 404 LogStream ls_err(log_err); 405 outputStream* out_err = &ls_err; 406 log_err.print("Failed to archive %s object (" 407 PTR_FORMAT "), size[" SIZE_FORMAT "] in sub-graph", 408 obj->klass()->external_name(), p2i(obj), (size_t)obj->size()); 409 obj->print_on(out_err); 410 vm_exit(1); 411 } 412 assert(MetaspaceShared::is_archive_object(archived), "must be archived"); 413 log_debug(cds, heap)("=== archiving oop " PTR_FORMAT " ==> " PTR_FORMAT, 414 p2i(obj), p2i(archived)); 415 416 // Following the references in the current oop and archive any 417 // encountered objects during the process 418 WalkOopAndArchiveClosure walker(_level + 1, obj, archived); 419 obj->oop_iterate(&walker); 420 421 // Update the reference in the archived copy of the referencing object 422 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 423 log_debug(cds,heap)("=== store archived " PTR_FORMAT " in " PTR_FORMAT, 424 p2i(archived), p2i(new_p)); 425 } 426 } 427 }; 428 429 // 430 // Start from the given static field in a java mirror and archive the 431 // complete sub-graph of java heap objects that are reached directly 432 // or indirectly from the starting object by following references. 433 // Currently, only ArchivedModuleGraph class instance (mirror) has archived 434 // object subgraphs. Sub-graph archiving restrictions (current): 435 // 436 // - All classes of objects in the archived sub-graph (including the 437 // entry class) must be boot class only. 438 // - No java.lang.Class instance (java mirror) can be included inside 439 // an archived sub-graph. Mirror can only be the sub-graph entry object. 440 // 441 // The Java heap object sub-graph archiving process (see 442 // WalkOopAndArchiveClosure): 443 // 444 // 1) Java object sub-graph archiving starts from a given static field 445 // within a Class instance (java mirror). If the static field is a 446 // refererence field and points to a non-null java object, proceed to 447 // the next step. 448 // 449 // 2) Archives the referenced java object. If an archived copy of the 450 // current object already exists, updates the pointer in the archived 451 // copy of the referencing object to point to the current archived object. 452 // Otherwise, proceed to the next step. 453 // 454 // 3) Follows all references within the current java object and recursively 455 // archive the sub-graph of objects starting from each reference. 456 // 457 // 4) Updates the pointer in the archived copy of referencing object to 458 // point to the current archived object. 459 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 460 const char* klass_name, 461 int field_offset, 462 const char* field_name, 463 TRAPS) { 464 assert(DumpSharedSpaces, "dump time only"); 465 assert(k->is_shared_boot_class(), "must be boot class"); 466 467 oop m = k->java_mirror(); 468 oop archived_m = MetaspaceShared::find_archived_heap_object(m); 469 if (CompressedOops::is_null(archived_m)) { 470 return; 471 } 472 473 // obtain k's subGraph Info 474 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 475 476 // get the object referenced by the field 477 oop f = m->obj_field(field_offset); 478 479 if (log_is_enabled(Debug, cds, heap)) { 480 LogTarget(Debug, cds, heap) log; 481 LogStream ls(log); 482 outputStream* out = &ls; 483 log.print("Start archiving from: %s::%s ", klass_name, field_name); 484 if (!CompressedOops::is_null(f)) { 485 f->print_on(out); 486 } else { 487 log.print("null"); 488 } 489 } 490 491 if (!CompressedOops::is_null(f)) { 492 // get the archived copy of the field referenced object 493 oop af = MetaspaceShared::archive_heap_object(f, THREAD); 494 if (af == NULL) { 495 // Skip archiving the sub-graph referenced from the current entry field. 496 ResourceMark rm; 497 log_info(cds, heap)( 498 "Cannot archive the sub-graph referenced from %s object (" 499 PTR_FORMAT ") size[" SIZE_FORMAT "], skipped.", 500 f->klass()->external_name(), p2i(f), (size_t)f->size()); 501 return; 502 } 503 if (!MetaspaceShared::is_archive_object(f)) { 504 WalkOopAndArchiveClosure walker(1, f, af); 505 f->oop_iterate(&walker); 506 } 507 508 // The field value is not preserved in the archived mirror. 509 // Record the field as a new subGraph entry point. The recorded 510 // information is restored from the archive at runtime. 511 subgraph_info->add_subgraph_entry_field(field_offset, af); 512 if (log_is_enabled(Info, cds, heap)) { 513 ResourceMark rm; 514 log_info(cds, heap)( 515 "Archived the sub-graph referenced from %s object " PTR_FORMAT, 516 f->klass()->external_name(), p2i(f)); 517 } 518 } else { 519 // The field contains null, we still need to record the entry point, 520 // so it can be restored at runtime. 521 subgraph_info->add_subgraph_entry_field(field_offset, NULL); 522 } 523 } 524 525 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 526 527 bool HeapShared::has_been_seen_during_subgraph_klasses_recording(oop obj) { 528 if (_seen_objects_table->get(obj) == NULL) { 529 _seen_objects_table->put(obj, true); 530 return false; 531 } else { 532 return true; 533 } 534 } 535 536 void HeapShared::start_recording_subgraph_klasses(InstanceKlass *k, const char* class_name) { 537 log_info(cds, heap)("Record classes for archived fields in %s", class_name); 538 assert(_seen_objects_table == NULL, "must be"); 539 _seen_objects_table = new (ResourceObj::C_HEAP, mtClass)SeenObjectsTable(); 540 } 541 542 void HeapShared::done_recording_subgraph_klasses(InstanceKlass *k) { 543 delete _seen_objects_table; 544 _seen_objects_table = NULL; 545 } 546 547 void HeapShared::record_subgraph_klasses_from_static_field(InstanceKlass *k, 548 const char* klass_name, 549 int field_offset, 550 const char* field_name) { 551 assert(DumpSharedSpaces, "dump time only"); 552 assert(k->is_shared_boot_class(), "must be boot class"); 553 554 if (log_is_enabled(Debug, cds, heap)) { 555 LogTarget(Debug, cds, heap) log; 556 LogStream ls(log); 557 outputStream* out = &ls; 558 log.print("Start recording from: %s::%s", klass_name, field_name); 559 } 560 561 oop m = k->java_mirror(); 562 oop f = m->obj_field(field_offset); 563 record_subgraph_klasses_for(get_subgraph_info(k), f); 564 } 565 566 class RecordKlassesClosure: public BasicOopIterateClosure { 567 KlassSubGraphInfo* _subgraph_info; 568 public: 569 RecordKlassesClosure(KlassSubGraphInfo* subgraph_info) : 570 _subgraph_info(subgraph_info) {} 571 void do_oop(narrowOop *p) { do_oop_work(p); } 572 void do_oop( oop *p) { do_oop_work(p); } 573 574 protected: 575 template <class T> void do_oop_work(T *p) { 576 oop obj = RawAccess<>::oop_load(p); 577 if (!CompressedOops::is_null(obj)) { 578 assert(!java_lang_Class::is_instance(obj), "should have been excluded"); 579 580 if (!HeapShared::has_been_seen_during_subgraph_klasses_recording(obj)) { 581 HeapShared::record_subgraph_klasses_for(_subgraph_info, obj); 582 } 583 } 584 } 585 }; 586 587 void HeapShared::record_subgraph_klasses_for(KlassSubGraphInfo* subgraph_info, oop obj) { 588 assert(!java_lang_Class::is_instance(obj), "should have been excluded"); 589 590 if (!CompressedOops::is_null(obj)) { 591 oop archived = MetaspaceShared::find_archived_heap_object(obj); 592 if (CompressedOops::is_null(archived)) { 593 // Object was skipped because it was too big 594 return; 595 } 596 597 Klass *orig_k = obj->klass(); 598 Klass *relocated_k = archived->klass(); 599 subgraph_info->add_subgraph_klass(orig_k, relocated_k); 600 601 RecordKlassesClosure walker(subgraph_info); 602 obj->oop_iterate(&walker); 603 } 604 } 605 606 struct ArchivableStaticFieldInfo { 607 const char* klass_name; 608 const char* field_name; 609 InstanceKlass* klass; 610 int offset; 611 BasicType type; 612 }; 613 614 // If you add new entries to this table, you should know what you're doing! 615 static ArchivableStaticFieldInfo archivable_static_fields[] = { 616 {"jdk/internal/module/ArchivedModuleGraph", "archivedSystemModules"}, 617 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleFinder"}, 618 {"jdk/internal/module/ArchivedModuleGraph", "archivedMainModule"}, 619 {"jdk/internal/module/ArchivedModuleGraph", "archivedConfiguration"}, 620 {"java/util/ImmutableCollections$ListN", "EMPTY_LIST"}, 621 {"java/util/ImmutableCollections$MapN", "EMPTY_MAP"}, 622 {"java/util/ImmutableCollections$SetN", "EMPTY_SET"}, 623 {"java/lang/Integer$IntegerCache", "archivedCache"}, 624 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 625 }; 626 627 const static int num_archivable_static_fields = sizeof(archivable_static_fields) / sizeof(ArchivableStaticFieldInfo); 628 629 class ArchivableStaticFieldFinder: public FieldClosure { 630 InstanceKlass* _ik; 631 Symbol* _field_name; 632 bool _found; 633 int _offset; 634 BasicType _type; 635 public: 636 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 637 _ik(ik), _field_name(field_name), _found(false), _offset(-1), _type(T_ILLEGAL) {} 638 639 virtual void do_field(fieldDescriptor* fd) { 640 if (fd->name() == _field_name) { 641 assert(!_found, "fields cannot be overloaded"); 642 _found = true; 643 _offset = fd->offset(); 644 _type = fd->field_type(); 645 assert(_type == T_OBJECT || _type == T_ARRAY, "can archive only obj or array fields"); 646 } 647 } 648 bool found() { return _found; } 649 int offset() { return _offset; } 650 BasicType type() { return _type; } 651 }; 652 653 void HeapShared::init_archivable_static_fields(Thread* THREAD) { 654 for (int i = 0; i < num_archivable_static_fields; i++) { 655 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 656 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name, THREAD); 657 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name, THREAD); 658 659 Klass* k = SystemDictionary::resolve_or_null(klass_name, THREAD); 660 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 661 InstanceKlass* ik = InstanceKlass::cast(k); 662 663 ArchivableStaticFieldFinder finder(ik, field_name); 664 ik->do_local_static_fields(&finder); 665 assert(finder.found(), "field must exist"); 666 667 info->klass = ik; 668 info->offset = finder.offset(); 669 info->type = finder.type(); 670 671 assert(info->type == T_OBJECT || info->type == T_ARRAY, "must be"); 672 } 673 } 674 675 void HeapShared::archive_module_graph_objects(Thread* THREAD) { 676 for (int i = 0; i < num_archivable_static_fields; i++) { 677 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 678 archive_reachable_objects_from_static_field(info->klass, info->klass_name, 679 info->offset, info->field_name, CHECK); 680 } 681 682 record_subgraph_klasses(); 683 } 684 685 // For each class X that has one or more archived fields, create a list of 686 // all the class of the objects that can be reached by any of these static 687 // fields. 688 // 689 // At runtime, these classes are initialized before X's archived fields 690 // are restored by HeapShared::initialize_from_archived_subgraph(). 691 void HeapShared::record_subgraph_klasses() { 692 for (int i = 0; i < num_archivable_static_fields; ) { 693 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 694 const char* klass_name = info->klass_name; 695 start_recording_subgraph_klasses(info->klass, klass_name); 696 697 int j = i; 698 while (j < num_archivable_static_fields) { 699 ArchivableStaticFieldInfo* f = &archivable_static_fields[j]; 700 if (f->klass_name == klass_name) { 701 record_subgraph_klasses_from_static_field(f->klass, f->klass_name, 702 f->offset, f->field_name); 703 j++; 704 } else { 705 break; 706 } 707 } 708 done_recording_subgraph_klasses(info->klass); 709 i = j; 710 } 711 } 712 713 // At dump-time, find the location of all the non-null oop pointers in an archived heap 714 // region. This way we can quickly relocate all the pointers without using 715 // BasicOopIterateClosure at runtime. 716 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 717 narrowOop* _start; 718 BitMap *_oopmap; 719 int _num_total_oops; 720 int _num_null_oops; 721 public: 722 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 723 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 724 725 virtual bool should_verify_oops(void) { 726 return false; 727 } 728 virtual void do_oop(narrowOop* p) { 729 _num_total_oops ++; 730 narrowOop v = *p; 731 if (!CompressedOops::is_null(v)) { 732 size_t idx = p - _start; 733 _oopmap->set_bit(idx); 734 } else { 735 _num_null_oops ++; 736 } 737 } 738 virtual void do_oop(oop *p) { 739 ShouldNotReachHere(); 740 } 741 int num_total_oops() const { return _num_total_oops; } 742 int num_null_oops() const { return _num_null_oops; } 743 }; 744 745 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 746 assert(UseCompressedOops, "must be"); 747 size_t num_bits = region.byte_size() / sizeof(narrowOop); 748 ResourceBitMap oopmap(num_bits); 749 750 HeapWord* p = region.start(); 751 HeapWord* end = region.end(); 752 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 753 754 int num_objs = 0; 755 while (p < end) { 756 oop o = (oop)p; 757 o->oop_iterate(&finder); 758 p += o->size(); 759 ++ num_objs; 760 } 761 762 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 763 num_objs, finder.num_total_oops(), finder.num_null_oops()); 764 return oopmap; 765 } 766 767 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 768 _narrow_oop_base = base; 769 _narrow_oop_shift = shift; 770 } 771 772 // Patch all the embedded oop pointers inside an archived heap region, 773 // to be consistent with the runtime oop encoding. 774 class PatchEmbeddedPointers: public BitMapClosure { 775 narrowOop* _start; 776 777 public: 778 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 779 780 bool do_bit(size_t offset) { 781 narrowOop* p = _start + offset; 782 narrowOop v = *p; 783 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 784 oop o = HeapShared::decode_with_archived_oop_encoding_mode(v); 785 RawAccess<IS_NOT_NULL>::oop_store(p, o); 786 return true; 787 } 788 }; 789 790 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 791 size_t oopmap_size_in_bits) { 792 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 793 794 #ifndef PRODUCT 795 ResourceMark rm; 796 ResourceBitMap checkBm = calculate_oopmap(region); 797 assert(bm.is_same(checkBm), "sanity"); 798 #endif 799 800 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 801 bm.iterate(&patcher); 802 } 803 804 #endif // INCLUDE_CDS_JAVA_HEAP