1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logMessage.hpp" 31 #include "logging/logStream.hpp" 32 #include "memory/heapShared.inline.hpp" 33 #include "memory/iterator.inline.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/metaspaceClosure.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/compressedOops.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/fieldDescriptor.inline.hpp" 41 #include "utilities/bitMap.inline.hpp" 42 43 #if INCLUDE_CDS_JAVA_HEAP 44 KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL; 45 int HeapShared::_num_archived_subgraph_info_records = 0; 46 Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL; 47 48 // Currently there is only one class mirror (ArchivedModuleGraph) with archived 49 // sub-graphs. 50 KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) { 51 KlassSubGraphInfo* info = _subgraph_info_list; 52 while (info != NULL) { 53 if (info->klass() == k) { 54 return info; 55 } 56 info = info->next(); 57 } 58 return NULL; 59 } 60 61 // Get the subgraph_info for Klass k. A new subgraph_info is created if 62 // there is no existing one for k. The subgraph_info records the relocated 63 // Klass* of the original k. 64 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 65 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); 66 KlassSubGraphInfo* info = find_subgraph_info(relocated_k); 67 if (info != NULL) { 68 return info; 69 } 70 71 info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list); 72 _subgraph_info_list = info; 73 return info; 74 } 75 76 address HeapShared::_narrow_oop_base; 77 int HeapShared::_narrow_oop_shift; 78 79 int HeapShared::num_of_subgraph_infos() { 80 int num = 0; 81 KlassSubGraphInfo* info = _subgraph_info_list; 82 while (info != NULL) { 83 num ++; 84 info = info->next(); 85 } 86 return num; 87 } 88 89 // Add an entry field to the current KlassSubGraphInfo. 90 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { 91 assert(DumpSharedSpaces, "dump time only"); 92 if (_subgraph_entry_fields == NULL) { 93 _subgraph_entry_fields = 94 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); 95 } 96 _subgraph_entry_fields->append((juint)static_field_offset); 97 _subgraph_entry_fields->append(CompressedOops::encode(v)); 98 } 99 100 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 101 // Only objects of boot classes can be included in sub-graph. 102 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { 103 assert(DumpSharedSpaces, "dump time only"); 104 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), 105 "must be the relocated Klass in the shared space"); 106 107 if (_subgraph_object_klasses == NULL) { 108 _subgraph_object_klasses = 109 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); 110 } 111 112 assert(relocated_k->is_shared(), "must be a shared class"); 113 114 if (_k == relocated_k) { 115 // Don't add the Klass containing the sub-graph to it's own klass 116 // initialization list. 117 return; 118 } 119 120 if (relocated_k->is_instance_klass()) { 121 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 122 "must be boot class"); 123 // SystemDictionary::xxx_klass() are not updated, need to check 124 // the original Klass* 125 if (orig_k == SystemDictionary::String_klass() || 126 orig_k == SystemDictionary::Object_klass()) { 127 // Initialized early during VM initialization. No need to be added 128 // to the sub-graph object class list. 129 return; 130 } 131 } else if (relocated_k->is_objArray_klass()) { 132 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 133 if (abk->is_instance_klass()) { 134 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 135 "must be boot class"); 136 } 137 if (relocated_k == Universe::objectArrayKlassObj()) { 138 // Initialized early during Universe::genesis. No need to be added 139 // to the list. 140 return; 141 } 142 } else { 143 assert(relocated_k->is_typeArray_klass(), "must be"); 144 // Primitive type arrays are created early during Universe::genesis. 145 return; 146 } 147 148 _subgraph_object_klasses->append_if_missing(relocated_k); 149 } 150 151 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 152 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 153 _k = info->klass(); 154 _next = NULL; 155 _entry_field_records = NULL; 156 _subgraph_klasses = NULL; 157 158 // populate the entry fields 159 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); 160 if (entry_fields != NULL) { 161 int num_entry_fields = entry_fields->length(); 162 assert(num_entry_fields % 2 == 0, "sanity"); 163 _entry_field_records = 164 MetaspaceShared::new_ro_array<juint>(num_entry_fields); 165 for (int i = 0 ; i < num_entry_fields; i++) { 166 _entry_field_records->at_put(i, entry_fields->at(i)); 167 } 168 } 169 170 // the Klasses of the objects in the sub-graphs 171 GrowableArray<Klass*>* subgraph_klasses = info->subgraph_object_klasses(); 172 if (subgraph_klasses != NULL) { 173 int num_subgraphs_klasses = subgraph_klasses->length(); 174 _subgraph_klasses = 175 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); 176 for (int i = 0; i < num_subgraphs_klasses; i++) { 177 Klass* subgraph_k = subgraph_klasses->at(i); 178 if (log_is_enabled(Info, cds, heap)) { 179 ResourceMark rm; 180 log_info(cds, heap)( 181 "Archived object klass (%d): %s in %s sub-graphs", 182 i, subgraph_k->external_name(), _k->external_name()); 183 } 184 _subgraph_klasses->at_put(i, subgraph_k); 185 } 186 } 187 } 188 189 // Build the records of archived subgraph infos, which include: 190 // - Entry points to all subgraphs from the containing class mirror. The entry 191 // points are static fields in the mirror. For each entry point, the field 192 // offset and value are recorded in the sub-graph info. The value are stored 193 // back to the corresponding field at runtime. 194 // - A list of klasses that need to be loaded/initialized before archived 195 // java object sub-graph can be accessed at runtime. 196 // 197 // The records are saved in the archive file and reloaded at runtime. Currently 198 // there is only one class mirror (ArchivedModuleGraph) with archived sub-graphs. 199 // 200 // Layout of the archived subgraph info records: 201 // 202 // records_size | num_records | records* 203 // ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_object_klasses 204 size_t HeapShared::build_archived_subgraph_info_records(int num_records) { 205 // remember the start address 206 char* start_p = MetaspaceShared::read_only_space_top(); 207 208 // now populate the archived subgraph infos, which will be saved in the 209 // archive file 210 _archived_subgraph_info_records = 211 MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records); 212 KlassSubGraphInfo* info = _subgraph_info_list; 213 int i = 0; 214 while (info != NULL) { 215 assert(i < _archived_subgraph_info_records->length(), "sanity"); 216 ArchivedKlassSubGraphInfoRecord* record = 217 _archived_subgraph_info_records->adr_at(i); 218 record->init(info); 219 info = info->next(); 220 i ++; 221 } 222 223 // _subgraph_info_list is no longer needed 224 delete _subgraph_info_list; 225 _subgraph_info_list = NULL; 226 227 char* end_p = MetaspaceShared::read_only_space_top(); 228 size_t records_size = end_p - start_p; 229 return records_size; 230 } 231 232 // Write the subgraph info records in the shared _ro region 233 void HeapShared::write_archived_subgraph_infos() { 234 assert(DumpSharedSpaces, "dump time only"); 235 236 Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3); 237 238 _num_archived_subgraph_info_records = num_of_subgraph_infos(); 239 size_t records_size = build_archived_subgraph_info_records( 240 _num_archived_subgraph_info_records); 241 242 // Now write the header information: 243 // records_size, num_records, _archived_subgraph_info_records 244 assert(records_header != NULL, "sanity"); 245 intptr_t* p = (intptr_t*)(records_header->data()); 246 *p = (intptr_t)records_size; 247 p ++; 248 *p = (intptr_t)_num_archived_subgraph_info_records; 249 p ++; 250 *p = (intptr_t)_archived_subgraph_info_records; 251 } 252 253 char* HeapShared::read_archived_subgraph_infos(char* buffer) { 254 Array<intptr_t>* records_header = (Array<intptr_t>*)buffer; 255 intptr_t* p = (intptr_t*)(records_header->data()); 256 size_t records_size = (size_t)(*p); 257 p ++; 258 _num_archived_subgraph_info_records = *p; 259 p ++; 260 _archived_subgraph_info_records = 261 (Array<ArchivedKlassSubGraphInfoRecord>*)(*p); 262 263 buffer = (char*)_archived_subgraph_info_records + records_size; 264 return buffer; 265 } 266 267 void HeapShared::initialize_from_archived_subgraph(Klass* k) { 268 if (!MetaspaceShared::open_archive_heap_region_mapped()) { 269 return; // nothing to do 270 } 271 272 if (_num_archived_subgraph_info_records == 0) { 273 return; // no subgraph info records 274 } 275 276 // Initialize from archived data. Currently only ArchivedModuleGraph 277 // has archived object subgraphs, which is used during VM initialization 278 // time when bootstraping the system modules. No lock is needed. 279 Thread* THREAD = Thread::current(); 280 for (int i = 0; i < _archived_subgraph_info_records->length(); i++) { 281 ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i); 282 if (record->klass() == k) { 283 int i; 284 // Found the archived subgraph info record for the requesting klass. 285 // Load/link/initialize the klasses of the objects in the subgraph. 286 // NULL class loader is used. 287 Array<Klass*>* klasses = record->subgraph_klasses(); 288 if (klasses != NULL) { 289 for (i = 0; i < klasses->length(); i++) { 290 Klass* obj_k = klasses->at(i); 291 Klass* resolved_k = SystemDictionary::resolve_or_null( 292 (obj_k)->name(), THREAD); 293 if (resolved_k != obj_k) { 294 return; 295 } 296 if ((obj_k)->is_instance_klass()) { 297 InstanceKlass* ik = InstanceKlass::cast(obj_k); 298 ik->initialize(THREAD); 299 } else if ((obj_k)->is_objArray_klass()) { 300 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); 301 oak->initialize(THREAD); 302 } 303 } 304 } 305 306 if (HAS_PENDING_EXCEPTION) { 307 CLEAR_PENDING_EXCEPTION; 308 // None of the field value will be set if there was an exception. 309 // The java code will not see any of the archived objects in the 310 // subgraphs referenced from k in this case. 311 return; 312 } 313 314 // Load the subgraph entry fields from the record and store them back to 315 // the corresponding fields within the mirror. 316 oop m = k->java_mirror(); 317 Array<juint>* entry_field_records = record->entry_field_records(); 318 if (entry_field_records != NULL) { 319 int efr_len = entry_field_records->length(); 320 assert(efr_len % 2 == 0, "sanity"); 321 for (i = 0; i < efr_len;) { 322 int field_offset = entry_field_records->at(i); 323 // The object refereced by the field becomes 'known' by GC from this 324 // point. All objects in the subgraph reachable from the object are 325 // also 'known' by GC. 326 oop v = MetaspaceShared::materialize_archived_object( 327 entry_field_records->at(i+1)); 328 m->obj_field_put(field_offset, v); 329 i += 2; 330 } 331 } 332 333 // Done. Java code can see the archived sub-graphs referenced from k's 334 // mirror after this point. 335 return; 336 } 337 } 338 } 339 340 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 341 int _level; 342 KlassSubGraphInfo* _subgraph_info; 343 oop _orig_referencing_obj; 344 oop _archived_referencing_obj; 345 public: 346 WalkOopAndArchiveClosure(int level, KlassSubGraphInfo* subgraph_info, 347 oop orig, oop archived) : _level(level), 348 _subgraph_info(subgraph_info), 349 _orig_referencing_obj(orig), 350 _archived_referencing_obj(archived) {} 351 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 352 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 353 354 protected: 355 template <class T> void do_oop_work(T *p) { 356 oop obj = RawAccess<>::oop_load(p); 357 if (!CompressedOops::is_null(obj)) { 358 // A java.lang.Class instance can not be included in an archived 359 // object sub-graph. 360 if (java_lang_Class::is_instance(obj)) { 361 log_error(cds, heap)("Unknown java.lang.Class object is in the archived sub-graph\n"); 362 vm_exit(1); 363 } 364 365 LogTarget(Debug, cds, heap) log; 366 LogStream ls(log); 367 outputStream* out = &ls; 368 { 369 ResourceMark rm; 370 log.print("(%d) %s <--- referenced from: %s", 371 _level, obj->klass()->external_name(), 372 CompressedOops::is_null(_orig_referencing_obj) ? 373 "" : _orig_referencing_obj->klass()->external_name()); 374 obj->print_on(out); 375 } 376 377 if (MetaspaceShared::is_archive_object(obj)) { 378 // The current oop is an archived oop, nothing needs to be done 379 log.print("--- object is already archived ---"); 380 return; 381 } 382 383 size_t field_delta = pointer_delta( 384 p, _orig_referencing_obj, sizeof(char)); 385 T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); 386 oop archived = MetaspaceShared::find_archived_heap_object(obj); 387 if (archived != NULL) { 388 // There is an archived copy existing, update reference to point 389 // to the archived copy 390 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 391 log.print( 392 "--- found existing archived copy, store archived " PTR_FORMAT " in " PTR_FORMAT, 393 p2i(archived), p2i(new_p)); 394 return; 395 } 396 397 int l = _level + 1; 398 Thread* THREAD = Thread::current(); 399 // Archive the current oop before iterating through its references 400 archived = MetaspaceShared::archive_heap_object(obj, THREAD); 401 if (archived == NULL) { 402 ResourceMark rm; 403 LogTarget(Error, cds, heap) log_err; 404 LogStream ls_err(log_err); 405 outputStream* out_err = &ls_err; 406 log_err.print("Failed to archive %s object (" 407 PTR_FORMAT "), size[" SIZE_FORMAT "] in sub-graph", 408 obj->klass()->external_name(), p2i(obj), (size_t)obj->size()); 409 obj->print_on(out_err); 410 vm_exit(1); 411 } 412 assert(MetaspaceShared::is_archive_object(archived), "must be archived"); 413 log.print("=== archiving oop " PTR_FORMAT " ==> " PTR_FORMAT, 414 p2i(obj), p2i(archived)); 415 416 // Following the references in the current oop and archive any 417 // encountered objects during the process 418 WalkOopAndArchiveClosure walker(l, _subgraph_info, obj, archived); 419 obj->oop_iterate(&walker); 420 421 // Update the reference in the archived copy of the referencing object 422 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 423 log.print("=== store archived " PTR_FORMAT " in " PTR_FORMAT, 424 p2i(archived), p2i(new_p)); 425 426 // Add the klass to the list of classes that need to be loaded before 427 // module system initialization 428 Klass *orig_k = obj->klass(); 429 Klass *relocated_k = archived->klass(); 430 _subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 431 } 432 } 433 }; 434 435 // 436 // Start from the given static field in a java mirror and archive the 437 // complete sub-graph of java heap objects that are reached directly 438 // or indirectly from the starting object by following references. 439 // Currently, only ArchivedModuleGraph class instance (mirror) has archived 440 // object subgraphs. Sub-graph archiving restrictions (current): 441 // 442 // - All classes of objects in the archived sub-graph (including the 443 // entry class) must be boot class only. 444 // - No java.lang.Class instance (java mirror) can be included inside 445 // an archived sub-graph. Mirror can only be the sub-graph entry object. 446 // 447 // The Java heap object sub-graph archiving process (see 448 // WalkOopAndArchiveClosure): 449 // 450 // 1) Java object sub-graph archiving starts from a given static field 451 // within a Class instance (java mirror). If the static field is a 452 // refererence field and points to a non-null java object, proceed to 453 // the next step. 454 // 455 // 2) Archives the referenced java object. If an archived copy of the 456 // current object already exists, updates the pointer in the archived 457 // copy of the referencing object to point to the current archived object. 458 // Otherwise, proceed to the next step. 459 // 460 // 3) Follows all references within the current java object and recursively 461 // archive the sub-graph of objects starting from each reference. 462 // 463 // 4) Updates the pointer in the archived copy of referencing object to 464 // point to the current archived object. 465 // 466 // 5) The Klass of the current java object is added to the list of Klasses 467 // for loading and initialzing before any object in the archived graph can 468 // be accessed at runtime. 469 // 470 void HeapShared::archive_reachable_objects_from_static_field(Klass *k, 471 int field_offset, 472 BasicType field_type, 473 TRAPS) { 474 assert(DumpSharedSpaces, "dump time only"); 475 assert(k->is_instance_klass(), "sanity"); 476 assert(InstanceKlass::cast(k)->is_shared_boot_class(), 477 "must be boot class"); 478 479 oop m = k->java_mirror(); 480 oop archived_m = MetaspaceShared::find_archived_heap_object(m); 481 if (CompressedOops::is_null(archived_m)) { 482 return; 483 } 484 485 if (field_type == T_OBJECT || field_type == T_ARRAY) { 486 // obtain k's subGraph Info 487 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 488 489 // get the object referenced by the field 490 oop f = m->obj_field(field_offset); 491 if (!CompressedOops::is_null(f)) { 492 LogTarget(Debug, cds, heap) log; 493 LogStream ls(log); 494 outputStream* out = &ls; 495 log.print("Start from: "); 496 f->print_on(out); 497 498 // get the archived copy of the field referenced object 499 oop af = MetaspaceShared::archive_heap_object(f, THREAD); 500 if (af == NULL) { 501 // Skip archiving the sub-graph referenced from the current entry field. 502 ResourceMark rm; 503 log_info(cds, heap)( 504 "Cannot archive the sub-graph referenced from %s object (" 505 PTR_FORMAT ") size[" SIZE_FORMAT "], skipped.", 506 f->klass()->external_name(), p2i(f), (size_t)f->size()); 507 return; 508 } 509 if (!MetaspaceShared::is_archive_object(f)) { 510 WalkOopAndArchiveClosure walker(1, subgraph_info, f, af); 511 f->oop_iterate(&walker); 512 } 513 514 // The field value is not preserved in the archived mirror. 515 // Record the field as a new subGraph entry point. The recorded 516 // information is restored from the archive at runtime. 517 subgraph_info->add_subgraph_entry_field(field_offset, af); 518 Klass *relocated_k = af->klass(); 519 Klass *orig_k = f->klass(); 520 subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); 521 ResourceMark rm; 522 log_info(cds, heap)( 523 "Archived the sub-graph referenced from %s object " PTR_FORMAT, 524 f->klass()->external_name(), p2i(f)); 525 } else { 526 // The field contains null, we still need to record the entry point, 527 // so it can be restored at runtime. 528 subgraph_info->add_subgraph_entry_field(field_offset, NULL); 529 } 530 } else { 531 ShouldNotReachHere(); 532 } 533 } 534 535 struct ArchivableStaticFieldInfo { 536 const char* class_name; 537 const char* field_name; 538 InstanceKlass* klass; 539 int offset; 540 BasicType type; 541 }; 542 543 // If you add new entries to this table, you should know what you're doing! 544 static ArchivableStaticFieldInfo archivable_static_fields[] = { 545 {"jdk/internal/module/ArchivedModuleGraph", "archivedSystemModules"}, 546 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleFinder"}, 547 {"jdk/internal/module/ArchivedModuleGraph", "archivedMainModule"}, 548 {"jdk/internal/module/ArchivedModuleGraph", "archivedConfiguration"}, 549 {"java/util/ImmutableCollections$ListN", "EMPTY_LIST"}, 550 {"java/util/ImmutableCollections$MapN", "EMPTY_MAP"}, 551 {"java/util/ImmutableCollections$SetN", "EMPTY_SET"}, 552 {"java/lang/Integer$IntegerCache", "archivedCache"}, 553 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 554 }; 555 556 const static int num_archivable_static_fields = sizeof(archivable_static_fields) / sizeof(ArchivableStaticFieldInfo); 557 558 class ArchivableStaticFieldFinder: public FieldClosure { 559 InstanceKlass* _ik; 560 Symbol* _field_name; 561 bool _found; 562 int _offset; 563 BasicType _type; 564 public: 565 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 566 _ik(ik), _field_name(field_name), _found(false), _offset(-1), _type(T_ILLEGAL) {} 567 568 virtual void do_field(fieldDescriptor* fd) { 569 if (fd->name() == _field_name) { 570 assert(!_found, "fields cannot be overloaded"); 571 _found = true; 572 _offset = fd->offset(); 573 _type = fd->field_type(); 574 assert(_type == T_OBJECT || _type == T_ARRAY, "can archive only obj or array fields"); 575 } 576 } 577 bool found() { return _found; } 578 int offset() { return _offset; } 579 BasicType type() { return _type; } 580 }; 581 582 void HeapShared::init_archivable_static_fields(Thread* THREAD) { 583 for (int i = 0; i < num_archivable_static_fields; i++) { 584 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 585 TempNewSymbol class_name = SymbolTable::new_symbol(info->class_name, THREAD); 586 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name, THREAD); 587 588 Klass* k = SystemDictionary::resolve_or_null(class_name, THREAD); 589 assert(k != NULL && !HAS_PENDING_EXCEPTION, "class must exist"); 590 InstanceKlass* ik = InstanceKlass::cast(k); 591 592 ArchivableStaticFieldFinder finder(ik, field_name); 593 ik->do_local_static_fields(&finder); 594 assert(finder.found(), "field must exist"); 595 596 info->klass = ik; 597 info->offset = finder.offset(); 598 info->type = finder.type(); 599 } 600 } 601 602 void HeapShared::archive_module_graph_objects(Thread* THREAD) { 603 for (int i = 0; i < num_archivable_static_fields; i++) { 604 ArchivableStaticFieldInfo* info = &archivable_static_fields[i]; 605 archive_reachable_objects_from_static_field(info->klass, info->offset, info->type, CHECK); 606 } 607 } 608 609 // At dump-time, find the location of all the non-null oop pointers in an archived heap 610 // region. This way we can quickly relocate all the pointers without using 611 // BasicOopIterateClosure at runtime. 612 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 613 narrowOop* _start; 614 BitMap *_oopmap; 615 int _num_total_oops; 616 int _num_null_oops; 617 public: 618 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 619 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 620 621 virtual bool should_verify_oops(void) { 622 return false; 623 } 624 virtual void do_oop(narrowOop* p) { 625 _num_total_oops ++; 626 narrowOop v = *p; 627 if (!CompressedOops::is_null(v)) { 628 size_t idx = p - _start; 629 _oopmap->set_bit(idx); 630 } else { 631 _num_null_oops ++; 632 } 633 } 634 virtual void do_oop(oop *p) { 635 ShouldNotReachHere(); 636 } 637 int num_total_oops() const { return _num_total_oops; } 638 int num_null_oops() const { return _num_null_oops; } 639 }; 640 641 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 642 assert(UseCompressedOops, "must be"); 643 size_t num_bits = region.byte_size() / sizeof(narrowOop); 644 ResourceBitMap oopmap(num_bits); 645 646 HeapWord* p = region.start(); 647 HeapWord* end = region.end(); 648 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 649 650 int num_objs = 0; 651 while (p < end) { 652 oop o = (oop)p; 653 o->oop_iterate(&finder); 654 p += o->size(); 655 ++ num_objs; 656 } 657 658 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 659 num_objs, finder.num_total_oops(), finder.num_null_oops()); 660 return oopmap; 661 } 662 663 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 664 _narrow_oop_base = base; 665 _narrow_oop_shift = shift; 666 } 667 668 // Patch all the embedded oop pointers inside an archived heap region, 669 // to be consistent with the runtime oop encoding. 670 class PatchEmbeddedPointers: public BitMapClosure { 671 narrowOop* _start; 672 673 public: 674 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 675 676 bool do_bit(size_t offset) { 677 narrowOop* p = _start + offset; 678 narrowOop v = *p; 679 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 680 oop o = HeapShared::decode_with_archived_oop_encoding_mode(v); 681 RawAccess<IS_NOT_NULL>::oop_store(p, o); 682 return true; 683 } 684 }; 685 686 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 687 size_t oopmap_size_in_bits) { 688 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 689 690 #ifndef PRODUCT 691 ResourceMark rm; 692 ResourceBitMap checkBm = calculate_oopmap(region); 693 assert(bm.is_same(checkBm), "sanity"); 694 #endif 695 696 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 697 bm.iterate(&patcher); 698 } 699 700 #endif // INCLUDE_CDS_JAVA_HEAP