31 #include "classfile/symbolTable.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "classfile/systemDictionaryShared.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "gc/shared/gcLocker.hpp"
36 #include "logging/log.hpp"
37 #include "logging/logMessage.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/archiveBuilder.hpp"
40 #include "memory/archiveUtils.hpp"
41 #include "memory/filemap.hpp"
42 #include "memory/heapShared.inline.hpp"
43 #include "memory/iterator.inline.hpp"
44 #include "memory/metadataFactory.hpp"
45 #include "memory/metaspaceClosure.hpp"
46 #include "memory/metaspaceShared.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/fieldStreams.inline.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/fieldDescriptor.inline.hpp"
53 #include "runtime/javaCalls.hpp"
54 #include "runtime/safepointVerifiers.hpp"
55 #include "utilities/bitMap.inline.hpp"
56 #if INCLUDE_G1GC
57 #include "gc/g1/g1CollectedHeap.hpp"
58 #endif
59
60 #if INCLUDE_CDS_JAVA_HEAP
61
62 bool HeapShared::_closed_archive_heap_region_mapped = false;
63 bool HeapShared::_open_archive_heap_region_mapped = false;
64 bool HeapShared::_archive_heap_region_fixed = false;
65 address HeapShared::_narrow_oop_base;
66 int HeapShared::_narrow_oop_shift;
67 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
68
69 //
70 // If you add new entries to the following tables, you should know what you're doing!
71 //
72
87 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
88 {"java/util/ImmutableCollections", "archivedObjects"},
89 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
90 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
91 };
92
93 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
94 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
95 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
96 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
97 {"java/lang/Module$ArchivedData", "archivedData"},
98 };
99
100 const static int num_closed_archive_subgraph_entry_fields =
101 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
102 const static int num_open_archive_subgraph_entry_fields =
103 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
104 const static int num_fmg_open_archive_subgraph_entry_fields =
105 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
106
107 ////////////////////////////////////////////////////////////////
108 //
109 // Java heap object archiving support
110 //
111 ////////////////////////////////////////////////////////////////
112 void HeapShared::fixup_mapped_heap_regions() {
113 FileMapInfo *mapinfo = FileMapInfo::current_info();
114 mapinfo->fixup_mapped_heap_regions();
115 set_archive_heap_region_fixed();
116 SystemDictionaryShared::update_archived_mirror_native_pointers();
117 }
118
119 unsigned HeapShared::oop_hash(oop const& p) {
120 assert(!p->mark().has_bias_pattern(),
121 "this object should never have been locked"); // so identity_hash won't safepoin
122 unsigned hash = (unsigned)p->identity_hash();
123 return hash;
124 }
125
126 static void reset_states(oop obj, TRAPS) {
127 Handle h_obj(THREAD, obj);
128 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
129 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
130 Symbol* method_sig = vmSymbols::void_method_signature();
131
132 while (klass != NULL) {
133 Method* method = klass->find_method(method_name, method_sig);
134 if (method != NULL) {
135 assert(method->is_private(), "must be");
148 void HeapShared::reset_archived_object_states(TRAPS) {
149 assert(DumpSharedSpaces, "dump-time only");
150 log_debug(cds)("Resetting platform loader");
151 reset_states(SystemDictionary::java_platform_loader(), THREAD);
152 log_debug(cds)("Resetting system loader");
153 reset_states(SystemDictionary::java_system_loader(), THREAD);
154 }
155
156 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
157 oop HeapShared::find_archived_heap_object(oop obj) {
158 assert(DumpSharedSpaces, "dump-time only");
159 ArchivedObjectCache* cache = archived_object_cache();
160 oop* p = cache->get(obj);
161 if (p != NULL) {
162 return *p;
163 } else {
164 return NULL;
165 }
166 }
167
168 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
169 assert(DumpSharedSpaces, "dump-time only");
170
171 oop ao = find_archived_heap_object(obj);
172 if (ao != NULL) {
173 // already archived
174 return ao;
175 }
176
177 int len = obj->size();
178 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
179 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
180 p2i(obj), (size_t)obj->size());
181 return NULL;
182 }
183
184 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
185 if (archived_oop != NULL) {
186 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
187 MetaspaceShared::relocate_klass_ptr(archived_oop);
188 // Reinitialize markword to remove age/marking/locking/etc.
189 //
190 // We need to retain the identity_hash, because it may have been used by some hashtables
191 // in the shared heap. This also has the side effect of pre-initializing the
192 // identity_hash for all shared objects, so they are less likely to be written
193 // into during run time, increasing the potential of memory sharing.
194 int hash_original = obj->identity_hash();
195 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
196 assert(archived_oop->mark().is_unlocked(), "sanity");
197
198 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
199 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
200
201 ArchivedObjectCache* cache = archived_object_cache();
202 cache->put(obj, archived_oop);
203 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
204 p2i(obj), p2i(archived_oop));
205 } else {
206 log_error(cds, heap)(
207 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
208 p2i(obj));
209 vm_exit(1);
210 }
211 return archived_oop;
212 }
213
214 oop HeapShared::materialize_archived_object(narrowOop v) {
215 assert(archive_heap_region_fixed(),
216 "must be called after archive heap regions are fixed");
217 if (!CompressedOops::is_null(v)) {
218 oop obj = HeapShared::decode_from_archive(v);
219 return G1CollectedHeap::heap()->materialize_archived_object(obj);
220 }
221 return NULL;
222 }
223
224 void HeapShared::archive_klass_objects(Thread* THREAD) {
240
241 void HeapShared::run_full_gc_in_vm_thread() {
242 if (is_heap_object_archiving_allowed()) {
243 // Avoid fragmentation while archiving heap objects.
244 // We do this inside a safepoint, so that no further allocation can happen after GC
245 // has finished.
246 if (GCLocker::is_active()) {
247 // Just checking for safety ...
248 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
249 // has been modified such that JNI code is executed in some clean up threads after
250 // we have finished class loading.
251 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
252 } else {
253 log_info(cds)("Run GC ...");
254 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
255 log_info(cds)("Run GC done");
256 }
257 }
258 }
259
260 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
261 GrowableArray<MemRegion> *open) {
262 if (!is_heap_object_archiving_allowed()) {
263 log_info(cds)(
264 "Archived java heap is not supported as UseG1GC, "
265 "UseCompressedOops and UseCompressedClassPointers are required."
266 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
267 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
268 BOOL_TO_STR(UseCompressedClassPointers));
269 return;
270 }
271
272 G1HeapVerifier::verify_ready_for_archiving();
273
274 {
275 NoSafepointVerifier nsv;
276
277 // Cache for recording where the archived objects are copied to
278 create_archived_object_cache();
279
280 log_info(cds)("Dumping objects to closed archive heap region ...");
281 copy_closed_archive_heap_objects(closed);
282
283 log_info(cds)("Dumping objects to open archive heap region ...");
284 copy_open_archive_heap_objects(open);
285
286 if (MetaspaceShared::use_full_module_graph()) {
287 ClassLoaderDataShared::init_archived_oops();
288 }
289
290 destroy_archived_object_cache();
291 }
292
293 G1HeapVerifier::verify_archive_regions();
294 }
295
296 void HeapShared::copy_closed_archive_heap_objects(
297 GrowableArray<MemRegion> * closed_archive) {
298 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
299
300 Thread* THREAD = Thread::current();
301 G1CollectedHeap::heap()->begin_archive_alloc_range();
302
303 // Archive interned string objects
304 StringTable::write_to_archive(_dumped_interned_strings);
305
306 archive_object_subgraphs(closed_archive_subgraph_entry_fields,
307 num_closed_archive_subgraph_entry_fields,
308 true /* is_closed_archive */,
309 false /* is_full_module_graph */,
318 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
319
320 Thread* THREAD = Thread::current();
321 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
322
323 java_lang_Class::archive_basic_type_mirrors(THREAD);
324
325 archive_klass_objects(THREAD);
326
327 archive_object_subgraphs(open_archive_subgraph_entry_fields,
328 num_open_archive_subgraph_entry_fields,
329 false /* is_closed_archive */,
330 false /* is_full_module_graph */,
331 THREAD);
332 if (MetaspaceShared::use_full_module_graph()) {
333 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
334 num_fmg_open_archive_subgraph_entry_fields,
335 false /* is_closed_archive */,
336 true /* is_full_module_graph */,
337 THREAD);
338 }
339
340 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
341 os::vm_allocation_granularity());
342 }
343
344 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
345 _narrow_oop_base = base;
346 _narrow_oop_shift = shift;
347 }
348
349 //
350 // Subgraph archiving support
351 //
352 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
353 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
354
355 // Get the subgraph_info for Klass k. A new subgraph_info is created if
356 // there is no existing one for k. The subgraph_info records the relocated
357 // Klass* of the original k.
358 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
359 assert(DumpSharedSpaces, "dump time only");
360 bool created;
361 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
362 KlassSubGraphInfo* info =
363 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
364 &created);
365 assert(created, "must not initialize twice");
366 return info;
367 }
368
369 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
370 assert(DumpSharedSpaces, "dump time only");
371 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
372 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
373 assert(info != NULL, "must have been initialized");
374 return info;
375 }
376
377 // Add an entry field to the current KlassSubGraphInfo.
378 void KlassSubGraphInfo::add_subgraph_entry_field(
379 int static_field_offset, oop v, bool is_closed_archive) {
380 assert(DumpSharedSpaces, "dump time only");
381 if (_subgraph_entry_fields == NULL) {
382 _subgraph_entry_fields =
383 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass);
384 }
385 _subgraph_entry_fields->append((juint)static_field_offset);
386 _subgraph_entry_fields->append(CompressedOops::narrow_oop_value(v));
387 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
388 }
389
390 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
391 // Only objects of boot classes can be included in sub-graph.
392 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
393 assert(DumpSharedSpaces, "dump time only");
394 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
395 "must be the relocated Klass in the shared space");
396
397 if (_subgraph_object_klasses == NULL) {
398 _subgraph_object_klasses =
399 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
400 }
401
402 assert(ArchiveBuilder::singleton()->is_in_buffer_space(relocated_k), "must be a shared class");
403
404 if (_k == relocated_k) {
405 // Don't add the Klass containing the sub-graph to it's own klass
406 // initialization list.
426 }
427 if (relocated_k == Universe::objectArrayKlassObj()) {
428 // Initialized early during Universe::genesis. No need to be added
429 // to the list.
430 return;
431 }
432 } else {
433 assert(relocated_k->is_typeArray_klass(), "must be");
434 // Primitive type arrays are created early during Universe::genesis.
435 return;
436 }
437
438 if (log_is_enabled(Debug, cds, heap)) {
439 if (!_subgraph_object_klasses->contains(relocated_k)) {
440 ResourceMark rm;
441 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
442 }
443 }
444
445 _subgraph_object_klasses->append_if_missing(relocated_k);
446 }
447
448 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
449 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
450 _k = info->klass();
451 _entry_field_records = NULL;
452 _subgraph_object_klasses = NULL;
453 _is_full_module_graph = info->is_full_module_graph();
454
455 // populate the entry fields
456 GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
457 if (entry_fields != NULL) {
458 int num_entry_fields = entry_fields->length();
459 assert(num_entry_fields % 3 == 0, "sanity");
460 _entry_field_records =
461 MetaspaceShared::new_ro_array<juint>(num_entry_fields);
462 for (int i = 0 ; i < num_entry_fields; i++) {
463 _entry_field_records->at_put(i, entry_fields->at(i));
464 }
465 }
466
467 // the Klasses of the objects in the sub-graphs
468 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
469 if (subgraph_object_klasses != NULL) {
470 int num_subgraphs_klasses = subgraph_object_klasses->length();
471 _subgraph_object_klasses =
472 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
473 for (int i = 0; i < num_subgraphs_klasses; i++) {
474 Klass* subgraph_k = subgraph_object_klasses->at(i);
475 if (log_is_enabled(Info, cds, heap)) {
476 ResourceMark rm;
477 log_info(cds, heap)(
478 "Archived object klass %s (%2d) => %s",
479 _k->external_name(), i, subgraph_k->external_name());
480 }
481 _subgraph_object_klasses->at_put(i, subgraph_k);
514 // - A list of klasses that need to be loaded/initialized before archived
515 // java object sub-graph can be accessed at runtime.
516 void HeapShared::write_subgraph_info_table() {
517 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
518 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
519 CompactHashtableStats stats;
520
521 _run_time_subgraph_info_table.reset();
522
523 CompactHashtableWriter writer(d_table->_count, &stats);
524 CopyKlassSubGraphInfoToArchive copy(&writer);
525 d_table->iterate(©);
526
527 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
528 }
529
530 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
531 _run_time_subgraph_info_table.serialize_header(soc);
532 }
533
534 void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) {
535 if (!open_archive_heap_region_mapped()) {
536 return; // nothing to do
537 }
538 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
539
540 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
541 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
542
543 // Initialize from archived data. Currently this is done only
544 // during VM initialization time. No lock is needed.
545 if (record != NULL) {
546 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
547 return;
548 }
549
550 int i;
551 // Load/link/initialize the klasses of the objects in the subgraph.
552 // NULL class loader is used.
553 Array<Klass*>* klasses = record->subgraph_object_klasses();
554 if (klasses != NULL) {
555 for (i = 0; i < klasses->length(); i++) {
556 Klass* obj_k = klasses->at(i);
557 Klass* resolved_k = SystemDictionary::resolve_or_null(
558 (obj_k)->name(), THREAD);
559 if (resolved_k != obj_k) {
560 assert(!SystemDictionary::is_well_known_klass(resolved_k),
561 "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook");
562 ResourceMark rm(THREAD);
563 log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive",
564 resolved_k->external_name());
565 return;
566 }
567 if ((obj_k)->is_instance_klass()) {
568 InstanceKlass* ik = InstanceKlass::cast(obj_k);
569 ik->initialize(THREAD);
570 } else if ((obj_k)->is_objArray_klass()) {
571 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
572 oak->initialize(THREAD);
573 }
574 }
575 }
576
577 if (HAS_PENDING_EXCEPTION) {
578 CLEAR_PENDING_EXCEPTION;
579 // None of the field value will be set if there was an exception.
580 // The java code will not see any of the archived objects in the
581 // subgraphs referenced from k in this case.
582 return;
583 }
584
585 // Load the subgraph entry fields from the record and store them back to
586 // the corresponding fields within the mirror.
587 oop m = k->java_mirror();
588 Array<juint>* entry_field_records = record->entry_field_records();
589 if (entry_field_records != NULL) {
590 int efr_len = entry_field_records->length();
591 assert(efr_len % 3 == 0, "sanity");
592 for (i = 0; i < efr_len;) {
593 int field_offset = entry_field_records->at(i);
594 narrowOop nv = CompressedOops::narrow_oop_cast(entry_field_records->at(i+1));
595 int is_closed_archive = entry_field_records->at(i+2);
596 oop v;
597 if (is_closed_archive == 0) {
598 // It's an archived object in the open archive heap regions, not shared.
599 // The object refereced by the field becomes 'known' by GC from this
600 // point. All objects in the subgraph reachable from the object are
601 // also 'known' by GC.
602 v = materialize_archived_object(nv);
603 } else {
604 // Shared object in the closed archive heap regions. Decode directly.
605 assert(!CompressedOops::is_null(nv), "shared object is null");
606 v = HeapShared::decode_from_archive(nv);
607 }
608 m->obj_field_put(field_offset, v);
609 i += 3;
610
611 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
612 }
613
614 // Done. Java code can see the archived sub-graphs referenced from k's
615 // mirror after this point.
616 if (log_is_enabled(Info, cds, heap)) {
617 ResourceMark rm;
618 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT,
619 k->external_name(), p2i(k));
620 }
621 }
622 }
623 }
624
625 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
626 int _level;
627 bool _is_closed_archive;
628 bool _record_klasses_only;
629 KlassSubGraphInfo* _subgraph_info;
630 oop _orig_referencing_obj;
631 oop _archived_referencing_obj;
632 Thread* _thread;
633 public:
634 WalkOopAndArchiveClosure(int level,
635 bool is_closed_archive,
636 bool record_klasses_only,
637 KlassSubGraphInfo* subgraph_info,
638 oop orig, oop archived, TRAPS) :
639 _level(level), _is_closed_archive(is_closed_archive),
|
31 #include "classfile/symbolTable.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "classfile/systemDictionaryShared.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "gc/shared/gcLocker.hpp"
36 #include "logging/log.hpp"
37 #include "logging/logMessage.hpp"
38 #include "logging/logStream.hpp"
39 #include "memory/archiveBuilder.hpp"
40 #include "memory/archiveUtils.hpp"
41 #include "memory/filemap.hpp"
42 #include "memory/heapShared.inline.hpp"
43 #include "memory/iterator.inline.hpp"
44 #include "memory/metadataFactory.hpp"
45 #include "memory/metaspaceClosure.hpp"
46 #include "memory/metaspaceShared.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/compressedOops.inline.hpp"
50 #include "oops/fieldStreams.inline.hpp"
51 #include "oops/objArrayOop.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/oopHandle.hpp"
54 #include "runtime/fieldDescriptor.inline.hpp"
55 #include "runtime/init.hpp"
56 #include "runtime/javaCalls.hpp"
57 #include "runtime/safepointVerifiers.hpp"
58 #include "utilities/bitMap.inline.hpp"
59 #if INCLUDE_G1GC
60 #include "gc/g1/g1CollectedHeap.hpp"
61 #endif
62
63 #if INCLUDE_CDS_JAVA_HEAP
64
65 bool HeapShared::_closed_archive_heap_region_mapped = false;
66 bool HeapShared::_open_archive_heap_region_mapped = false;
67 bool HeapShared::_archive_heap_region_fixed = false;
68 address HeapShared::_narrow_oop_base;
69 int HeapShared::_narrow_oop_shift;
70 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
71
72 //
73 // If you add new entries to the following tables, you should know what you're doing!
74 //
75
90 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
91 {"java/util/ImmutableCollections", "archivedObjects"},
92 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
93 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
94 };
95
96 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
97 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
98 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
99 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
100 {"java/lang/Module$ArchivedData", "archivedData"},
101 };
102
103 const static int num_closed_archive_subgraph_entry_fields =
104 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
105 const static int num_open_archive_subgraph_entry_fields =
106 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
107 const static int num_fmg_open_archive_subgraph_entry_fields =
108 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
109
110 static GrowableArrayCHeap<oop, mtClassShared>* _pending_roots = NULL;
111 static objArrayOop _dumptime_roots = NULL; // FIXME -- combine this with _runtime_roots??
112 static narrowOop _runtime_roots_narrow;
113 static OopHandle _runtime_roots;
114
115 ////////////////////////////////////////////////////////////////
116 //
117 // Java heap object archiving support
118 //
119 ////////////////////////////////////////////////////////////////
120 void HeapShared::fixup_mapped_heap_regions() {
121 FileMapInfo *mapinfo = FileMapInfo::current_info();
122 mapinfo->fixup_mapped_heap_regions();
123 set_archive_heap_region_fixed();
124 if (is_mapped()) {
125 _runtime_roots = OopHandle(Universe::vm_global(), HeapShared::materialize_archived_object(_runtime_roots_narrow));
126 if (!MetaspaceShared::use_full_module_graph()) {
127 // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
128 ClassLoaderDataShared::clear_archived_oops();
129 }
130 }
131 SystemDictionaryShared::update_archived_mirror_native_pointers();
132 }
133
134 unsigned HeapShared::oop_hash(oop const& p) {
135 assert(!p->mark().has_bias_pattern(),
136 "this object should never have been locked"); // so identity_hash won't safepoin
137 unsigned hash = (unsigned)p->identity_hash();
138 return hash;
139 }
140
141 static void reset_states(oop obj, TRAPS) {
142 Handle h_obj(THREAD, obj);
143 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
144 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
145 Symbol* method_sig = vmSymbols::void_method_signature();
146
147 while (klass != NULL) {
148 Method* method = klass->find_method(method_name, method_sig);
149 if (method != NULL) {
150 assert(method->is_private(), "must be");
163 void HeapShared::reset_archived_object_states(TRAPS) {
164 assert(DumpSharedSpaces, "dump-time only");
165 log_debug(cds)("Resetting platform loader");
166 reset_states(SystemDictionary::java_platform_loader(), THREAD);
167 log_debug(cds)("Resetting system loader");
168 reset_states(SystemDictionary::java_system_loader(), THREAD);
169 }
170
171 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
172 oop HeapShared::find_archived_heap_object(oop obj) {
173 assert(DumpSharedSpaces, "dump-time only");
174 ArchivedObjectCache* cache = archived_object_cache();
175 oop* p = cache->get(obj);
176 if (p != NULL) {
177 return *p;
178 } else {
179 return NULL;
180 }
181 }
182
183 int HeapShared::append_root(oop obj) {
184 assert(DumpSharedSpaces, "dump-time only");
185
186 // No GC should happen since we aren't scanning _pending_roots.
187 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
188
189 if (_pending_roots == NULL) {
190 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
191 }
192
193 return _pending_roots->append(obj);
194 }
195
196 objArrayOop HeapShared::roots() {
197 if (DumpSharedSpaces) {
198 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
199 return _dumptime_roots;
200 } else {
201 assert(UseSharedSpaces, "must be");
202 objArrayOop roots = (objArrayOop)_runtime_roots.resolve();
203 assert(roots != NULL, "should have been initialized");
204 return roots;
205 }
206 }
207
208 void HeapShared::set_roots(narrowOop roots) {
209 assert(UseSharedSpaces, "runtime only");
210 assert(open_archive_heap_region_mapped(), "must be");
211 _runtime_roots_narrow = roots;
212 }
213
214 oop HeapShared::get_root(int index, bool clear) {
215 assert(index >= 0, "sanity");
216 if (DumpSharedSpaces) {
217 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
218 assert(_pending_roots != NULL, "sanity");
219 return _pending_roots->at(index);
220 } else {
221 assert(UseSharedSpaces, "must be");
222 assert(!_runtime_roots.is_empty(), "must have loaded shared heap");
223 oop result = roots()->obj_at(index);
224 if (clear) {
225 clear_root(index);
226 }
227 return result;
228 }
229 }
230
231 void HeapShared::clear_root(int index) {
232 assert(index >= 0, "sanity");
233 assert(UseSharedSpaces, "must be");
234 if (open_archive_heap_region_mapped()) {
235 if (log_is_enabled(Debug, cds, heap)) {
236 oop old = roots()->obj_at(index);
237 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
238 }
239 roots()->obj_at_put(index, NULL);
240 }
241 }
242
243 oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
244 assert(DumpSharedSpaces, "dump-time only");
245
246 oop ao = find_archived_heap_object(obj);
247 if (ao != NULL) {
248 // already archived
249 return ao;
250 }
251
252 int len = obj->size();
253 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
254 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
255 p2i(obj), (size_t)obj->size());
256 return NULL;
257 }
258
259 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
260 if (archived_oop != NULL) {
261 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
262 MetaspaceShared::relocate_klass_ptr(archived_oop);
263 // Reinitialize markword to remove age/marking/locking/etc.
264 //
265 // We need to retain the identity_hash, because it may have been used by some hashtables
266 // in the shared heap. This also has the side effect of pre-initializing the
267 // identity_hash for all shared objects, so they are less likely to be written
268 // into during run time, increasing the potential of memory sharing.
269 int hash_original = obj->identity_hash();
270 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
271 assert(archived_oop->mark().is_unlocked(), "sanity");
272
273 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
274 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
275
276 ArchivedObjectCache* cache = archived_object_cache();
277 cache->put(obj, archived_oop);
278 if (log_is_enabled(Debug, cds, heap)) {
279 ResourceMark rm;
280 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
281 p2i(obj), p2i(archived_oop), obj->klass()->external_name());
282 }
283 } else {
284 log_error(cds, heap)(
285 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
286 p2i(obj));
287 vm_exit(1);
288 }
289 return archived_oop;
290 }
291
292 oop HeapShared::materialize_archived_object(narrowOop v) {
293 assert(archive_heap_region_fixed(),
294 "must be called after archive heap regions are fixed");
295 if (!CompressedOops::is_null(v)) {
296 oop obj = HeapShared::decode_from_archive(v);
297 return G1CollectedHeap::heap()->materialize_archived_object(obj);
298 }
299 return NULL;
300 }
301
302 void HeapShared::archive_klass_objects(Thread* THREAD) {
318
319 void HeapShared::run_full_gc_in_vm_thread() {
320 if (is_heap_object_archiving_allowed()) {
321 // Avoid fragmentation while archiving heap objects.
322 // We do this inside a safepoint, so that no further allocation can happen after GC
323 // has finished.
324 if (GCLocker::is_active()) {
325 // Just checking for safety ...
326 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
327 // has been modified such that JNI code is executed in some clean up threads after
328 // we have finished class loading.
329 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
330 } else {
331 log_info(cds)("Run GC ...");
332 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
333 log_info(cds)("Run GC done");
334 }
335 }
336 }
337
338 // Returns an objArray that contains all the roots of the archived objects
339 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
340 GrowableArray<MemRegion> *open) {
341 if (!is_heap_object_archiving_allowed()) {
342 log_info(cds)(
343 "Archived java heap is not supported as UseG1GC, "
344 "UseCompressedOops and UseCompressedClassPointers are required."
345 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
346 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
347 BOOL_TO_STR(UseCompressedClassPointers));
348 return;
349 }
350
351 G1HeapVerifier::verify_ready_for_archiving();
352
353 {
354 NoSafepointVerifier nsv;
355
356 // Cache for recording where the archived objects are copied to
357 create_archived_object_cache();
358
359 log_info(cds)("Dumping objects to closed archive heap region ...");
360 copy_closed_archive_heap_objects(closed);
361
362 log_info(cds)("Dumping objects to open archive heap region ...");
363 copy_open_archive_heap_objects(open);
364
365 destroy_archived_object_cache();
366 }
367
368 G1HeapVerifier::verify_archive_regions();
369 }
370
371 void HeapShared::copy_closed_archive_heap_objects(
372 GrowableArray<MemRegion> * closed_archive) {
373 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
374
375 Thread* THREAD = Thread::current();
376 G1CollectedHeap::heap()->begin_archive_alloc_range();
377
378 // Archive interned string objects
379 StringTable::write_to_archive(_dumped_interned_strings);
380
381 archive_object_subgraphs(closed_archive_subgraph_entry_fields,
382 num_closed_archive_subgraph_entry_fields,
383 true /* is_closed_archive */,
384 false /* is_full_module_graph */,
393 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
394
395 Thread* THREAD = Thread::current();
396 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
397
398 java_lang_Class::archive_basic_type_mirrors(THREAD);
399
400 archive_klass_objects(THREAD);
401
402 archive_object_subgraphs(open_archive_subgraph_entry_fields,
403 num_open_archive_subgraph_entry_fields,
404 false /* is_closed_archive */,
405 false /* is_full_module_graph */,
406 THREAD);
407 if (MetaspaceShared::use_full_module_graph()) {
408 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
409 num_fmg_open_archive_subgraph_entry_fields,
410 false /* is_closed_archive */,
411 true /* is_full_module_graph */,
412 THREAD);
413 ClassLoaderDataShared::init_archived_oops();
414 }
415
416 copy_roots();
417
418 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
419 os::vm_allocation_granularity());
420 }
421
422 // Copy _pending_archive_roots into an objArray
423 void HeapShared::copy_roots() {
424 int length = _pending_roots != NULL ? _pending_roots->length() : 0;
425 int size = objArrayOopDesc::object_size(length);
426 Klass *k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
427 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
428
429 memset(mem, 0, size * BytesPerWord); // Is this correct??
430 {
431 // This is copied from MemAllocator::finish
432 if (UseBiasedLocking) {
433 oopDesc::set_mark(mem, k->prototype_header());
434 } else {
435 oopDesc::set_mark(mem, markWord::prototype());
436 }
437 oopDesc::release_set_klass(mem, k);
438 }
439 {
440 // This is copied from ObjArrayAllocator::initialize
441 arrayOopDesc::set_length(mem, length);
442 }
443
444 _dumptime_roots = (objArrayOop)mem;
445 for (int i = 0; i < length; i++) {
446 _dumptime_roots->obj_at_put(i, _pending_roots->at(i));
447 }
448 log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem);
449 }
450
451 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
452 _narrow_oop_base = base;
453 _narrow_oop_shift = shift;
454 }
455
456 //
457 // Subgraph archiving support
458 //
459 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
460 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
461
462 // Get the subgraph_info for Klass k. A new subgraph_info is created if
463 // there is no existing one for k. The subgraph_info records the relocated
464 // Klass* of the original k.
465 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
466 assert(DumpSharedSpaces, "dump time only");
467 bool created;
468 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
469 KlassSubGraphInfo* info =
470 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
471 &created);
472 assert(created, "must not initialize twice");
473 return info;
474 }
475
476 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
477 assert(DumpSharedSpaces, "dump time only");
478 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
479 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
480 assert(info != NULL, "must have been initialized");
481 return info;
482 }
483
484 // Add an entry field to the current KlassSubGraphInfo.
485 void KlassSubGraphInfo::add_subgraph_entry_field(
486 int static_field_offset, oop v, bool is_closed_archive) {
487 assert(DumpSharedSpaces, "dump time only");
488 if (_subgraph_entry_fields == NULL) {
489 _subgraph_entry_fields =
490 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
491 }
492 _subgraph_entry_fields->append(static_field_offset);
493 _subgraph_entry_fields->append(HeapShared::append_root(v));
494 _subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
495 }
496
497 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
498 // Only objects of boot classes can be included in sub-graph.
499 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
500 assert(DumpSharedSpaces, "dump time only");
501 assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
502 "must be the relocated Klass in the shared space");
503
504 if (_subgraph_object_klasses == NULL) {
505 _subgraph_object_klasses =
506 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
507 }
508
509 assert(ArchiveBuilder::singleton()->is_in_buffer_space(relocated_k), "must be a shared class");
510
511 if (_k == relocated_k) {
512 // Don't add the Klass containing the sub-graph to it's own klass
513 // initialization list.
533 }
534 if (relocated_k == Universe::objectArrayKlassObj()) {
535 // Initialized early during Universe::genesis. No need to be added
536 // to the list.
537 return;
538 }
539 } else {
540 assert(relocated_k->is_typeArray_klass(), "must be");
541 // Primitive type arrays are created early during Universe::genesis.
542 return;
543 }
544
545 if (log_is_enabled(Debug, cds, heap)) {
546 if (!_subgraph_object_klasses->contains(relocated_k)) {
547 ResourceMark rm;
548 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
549 }
550 }
551
552 _subgraph_object_klasses->append_if_missing(relocated_k);
553 _has_non_early_klasses |= is_non_early_klass(orig_k);
554 }
555
556 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
557 if (k->is_objArray_klass()) {
558 k = ObjArrayKlass::cast(k)->bottom_klass();
559 }
560 if (k->is_instance_klass()) {
561 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
562 ResourceMark rm;
563 log_info(cds, heap)("non-early: %s", k->external_name());
564 return true;
565 } else {
566 return false;
567 }
568 } else {
569 return false;
570 }
571 }
572
573 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
574 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
575 _k = info->klass();
576 _entry_field_records = NULL;
577 _subgraph_object_klasses = NULL;
578 _is_full_module_graph = info->is_full_module_graph();
579 _has_non_early_klasses = info->has_non_early_klasses();
580
581 if (_has_non_early_klasses) {
582 ResourceMark rm;
583 log_info(cds, heap)(
584 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
585 _k->external_name());
586 }
587
588 // populate the entry fields
589 GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
590 if (entry_fields != NULL) {
591 int num_entry_fields = entry_fields->length();
592 assert(num_entry_fields % 3 == 0, "sanity");
593 _entry_field_records =
594 MetaspaceShared::new_ro_array<int>(num_entry_fields);
595 for (int i = 0 ; i < num_entry_fields; i++) {
596 _entry_field_records->at_put(i, entry_fields->at(i));
597 }
598 }
599
600 // the Klasses of the objects in the sub-graphs
601 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
602 if (subgraph_object_klasses != NULL) {
603 int num_subgraphs_klasses = subgraph_object_klasses->length();
604 _subgraph_object_klasses =
605 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
606 for (int i = 0; i < num_subgraphs_klasses; i++) {
607 Klass* subgraph_k = subgraph_object_klasses->at(i);
608 if (log_is_enabled(Info, cds, heap)) {
609 ResourceMark rm;
610 log_info(cds, heap)(
611 "Archived object klass %s (%2d) => %s",
612 _k->external_name(), i, subgraph_k->external_name());
613 }
614 _subgraph_object_klasses->at_put(i, subgraph_k);
647 // - A list of klasses that need to be loaded/initialized before archived
648 // java object sub-graph can be accessed at runtime.
649 void HeapShared::write_subgraph_info_table() {
650 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
651 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
652 CompactHashtableStats stats;
653
654 _run_time_subgraph_info_table.reset();
655
656 CompactHashtableWriter writer(d_table->_count, &stats);
657 CopyKlassSubGraphInfoToArchive copy(&writer);
658 d_table->iterate(©);
659
660 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
661 }
662
663 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
664 _run_time_subgraph_info_table.serialize_header(soc);
665 }
666
667 static void verify_the_heap(Klass* k, const char* which) {
668 if (VerifyArchivedFields) {
669 ResourceMark rm;
670 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
671 which, k->external_name());
672 VM_Verify verify_op;
673 VMThread::execute(&verify_op);
674 #if 0
675 // For some reason, this causes jtreg to lock up with
676 // "jtreg -vmoptions:-XX:+VerifyArchivedFields HelloTest.java"
677 if (is_init_completed()) {
678 FlagSetting fs1(VerifyBeforeGC, true);
679 FlagSetting fs2(VerifyDuringGC, true);
680 FlagSetting fs3(VerifyAfterGC, true);
681 Universe::heap()->collect(GCCause::_java_lang_system_gc);
682 }
683 #endif
684 }
685 }
686
687 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
688 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
689 //
690 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
691 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
692 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
693 void HeapShared::resolve_classes(TRAPS) {
694 if (!is_mapped()) {
695 return; // nothing to do
696 }
697 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
698 num_closed_archive_subgraph_entry_fields,
699 CHECK);
700 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields,
701 num_open_archive_subgraph_entry_fields,
702 CHECK);
703 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields,
704 num_fmg_open_archive_subgraph_entry_fields,
705 CHECK);
706 }
707
708 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
709 int num, TRAPS) {
710 for (int i = 0; i < num; i++) {
711 ArchivableStaticFieldInfo* info = &fields[i];
712 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
713 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
714 assert(k != NULL && k->is_shared_boot_class(), "sanity");
715 resolve_classes_for_subgraph_of(k, CHECK);
716 }
717 }
718
719 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, TRAPS) {
720 const ArchivedKlassSubGraphInfoRecord* record = resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
721 if (HAS_PENDING_EXCEPTION) {
722 CLEAR_PENDING_EXCEPTION;
723 }
724 if (record == NULL) {
725 clear_archived_roots_of(k);
726 }
727 }
728
729 void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) {
730 if (!is_mapped()) {
731 return; // nothing to do
732 }
733
734 const ArchivedKlassSubGraphInfoRecord* record =
735 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
736
737 if (HAS_PENDING_EXCEPTION) {
738 CLEAR_PENDING_EXCEPTION;
739 // None of the field value will be set if there was an exception when initializing the classes.
740 // The java code will not see any of the archived objects in the
741 // subgraphs referenced from k in this case.
742 return;
743 }
744
745 if (record != NULL) {
746 init_archived_fields_for(k, record, THREAD);
747 }
748 }
749
750 const ArchivedKlassSubGraphInfoRecord*
751 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
752 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
753
754 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
755 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
756
757 // Initialize from archived data. Currently this is done only
758 // during VM initialization time. No lock is needed.
759 if (record != NULL) {
760 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
761 if (log_is_enabled(Info, cds, heap)) {
762 ResourceMark rm;
763 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
764 k->external_name());
765 }
766 return NULL;
767 }
768
769 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
770 if (log_is_enabled(Info, cds, heap)) {
771 ResourceMark rm;
772 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
773 k->external_name());
774 }
775 return NULL;
776 }
777
778 resolve_or_init(k, do_init, CHECK_NULL);
779
780 // Load/link/initialize the klasses of the objects in the subgraph.
781 // NULL class loader is used.
782 Array<Klass*>* klasses = record->subgraph_object_klasses();
783 if (klasses != NULL) {
784 for (int i = 0; i < klasses->length(); i++) {
785 resolve_or_init(klasses->at(i), do_init, CHECK_NULL);
786 }
787 }
788 }
789
790 return record;
791 }
792
793 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
794 if (!do_init) {
795 if (k->class_loader_data() == NULL) {
796 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
797 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
798 }
799 } else {
800 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
801 if (k->is_instance_klass()) {
802 InstanceKlass* ik = InstanceKlass::cast(k);
803 ik->initialize(CHECK);
804 } else if (k->is_objArray_klass()) {
805 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
806 oak->initialize(CHECK);
807 }
808 }
809 }
810
811 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS) {
812 verify_the_heap(k, "before");
813
814 // Load the subgraph entry fields from the record and store them back to
815 // the corresponding fields within the mirror.
816 oop m = k->java_mirror();
817 Array<int>* entry_field_records = record->entry_field_records();
818 if (entry_field_records != NULL) {
819 int efr_len = entry_field_records->length();
820 assert(efr_len % 3 == 0, "sanity");
821 for (int i = 0; i < efr_len; i += 3) {
822 int field_offset = entry_field_records->at(i);
823 int root_index = entry_field_records->at(i+1);
824 int is_closed_archive = entry_field_records->at(i+2);
825 oop v = get_root(root_index, /*clear=*/true);
826 m->obj_field_put(field_offset, v);
827 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
828 }
829
830 // Done. Java code can see the archived sub-graphs referenced from k's
831 // mirror after this point.
832 if (log_is_enabled(Info, cds, heap)) {
833 ResourceMark rm;
834 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
835 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
836 }
837 }
838
839 verify_the_heap(k, "after ");
840 }
841
842 void HeapShared::clear_archived_roots_of(Klass* k) {
843 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
844 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
845 if (record != NULL) {
846 Array<int>* entry_field_records = record->entry_field_records();
847 if (entry_field_records != NULL) {
848 int efr_len = entry_field_records->length();
849 assert(efr_len % 3 == 0, "sanity");
850 for (int i = 0; i < efr_len; i += 3) {
851 int root_index = entry_field_records->at(i+1);
852 clear_root(root_index);
853 }
854 }
855 }
856 }
857
858 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
859 int _level;
860 bool _is_closed_archive;
861 bool _record_klasses_only;
862 KlassSubGraphInfo* _subgraph_info;
863 oop _orig_referencing_obj;
864 oop _archived_referencing_obj;
865 Thread* _thread;
866 public:
867 WalkOopAndArchiveClosure(int level,
868 bool is_closed_archive,
869 bool record_klasses_only,
870 KlassSubGraphInfo* subgraph_info,
871 oop orig, oop archived, TRAPS) :
872 _level(level), _is_closed_archive(is_closed_archive),
|