diff a/src/hotspot/share/classfile/classLoaderDataShared.cpp b/src/hotspot/share/classfile/classLoaderDataShared.cpp --- a/src/hotspot/share/classfile/classLoaderDataShared.cpp +++ b/src/hotspot/share/classfile/classLoaderDataShared.cpp @@ -57,10 +57,11 @@ f->do_ptr((void**)&_packages); f->do_ptr((void**)&_modules); } void restore(ClassLoaderData* loader_data, bool do_entries, bool do_oops); + void clear_archived_oops(); }; static ArchivedClassLoaderData _archived_boot_loader_data; static ArchivedClassLoaderData _archived_platform_loader_data; static ArchivedClassLoaderData _archived_system_loader_data; @@ -121,10 +122,19 @@ modules->restore_archived_oops(loader_data, _modules); } } } +void ArchivedClassLoaderData::clear_archived_oops() { + assert(UseSharedSpaces, "must be"); + if (_modules != NULL) { + for (int i = 0; i < _modules->length(); i++) { + _modules->at(i)->clear_archived_oops(); + } + } +} + // ------------------------------ static ClassLoaderData* null_class_loader_data() { ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); assert(loader_data != NULL, "must be"); @@ -181,10 +191,17 @@ log_info(cds)("use_full_module_graph = true; java.base = " INTPTR_FORMAT, p2i(_archived_javabase_moduleEntry)); } } +void ClassLoaderDataShared::clear_archived_oops() { + assert(UseSharedSpaces && !MetaspaceShared::use_full_module_graph(), "must be"); + _archived_boot_loader_data.clear_archived_oops(); + _archived_platform_loader_data.clear_archived_oops(); + _archived_system_loader_data.clear_archived_oops(); +} + oop ClassLoaderDataShared::restore_archived_oops_for_null_class_loader_data() { assert(UseSharedSpaces && MetaspaceShared::use_full_module_graph(), "must be"); _archived_boot_loader_data.restore(null_class_loader_data(), false, true); return _archived_javabase_moduleEntry->module(); } diff a/src/hotspot/share/classfile/classLoaderDataShared.hpp b/src/hotspot/share/classfile/classLoaderDataShared.hpp --- a/src/hotspot/share/classfile/classLoaderDataShared.hpp +++ b/src/hotspot/share/classfile/classLoaderDataShared.hpp @@ -37,10 +37,11 @@ static void allocate_archived_tables(); static void iterate_symbols(MetaspaceClosure* closure); static void init_archived_tables(); static void init_archived_oops(); static void serialize(SerializeClosure* f); + static void clear_archived_oops(); static oop restore_archived_oops_for_null_class_loader_data(); static void restore_java_platform_loader_from_archive(ClassLoaderData* loader_data); static void restore_java_system_loader_from_archive(ClassLoaderData* loader_data); }; diff a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -886,18 +886,18 @@ } } } } - if (k->is_shared() && k->has_raw_archived_mirror()) { + if (k->is_shared() && k->has_archived_mirror_index()) { if (HeapShared::open_archive_heap_region_mapped()) { bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK); assert(present, "Missing archived mirror for %s", k->external_name()); return; } else { k->clear_java_mirror_handle(); - k->clear_has_raw_archived_mirror(); + k->clear_archived_mirror_index(); } } create_mirror(k, Handle(), Handle(), Handle(), Handle(), CHECK); } @@ -1159,13 +1159,13 @@ oop java_lang_Class::archive_mirror(Klass* k, TRAPS) { assert(HeapShared::is_heap_object_archiving_allowed(), "HeapShared::is_heap_object_archiving_allowed() must be true"); // Mirror is already archived - if (k->has_raw_archived_mirror()) { - assert(k->archived_java_mirror_raw() != NULL, "no archived mirror"); - return k->archived_java_mirror_raw(); + if (k->has_archived_mirror_index()) { + assert(k->archived_java_mirror() != NULL, "no archived mirror"); + return k->archived_java_mirror(); } // No mirror oop mirror = k->java_mirror(); if (mirror == NULL) { @@ -1193,13 +1193,11 @@ archived_mirror = process_archived_mirror(k, mirror, archived_mirror, THREAD); if (archived_mirror == NULL) { return NULL; } - k->set_archived_java_mirror_raw(archived_mirror); - - k->set_has_raw_archived_mirror(); + k->set_archived_java_mirror(archived_mirror); ResourceMark rm; log_trace(cds, heap, mirror)( "Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT, k->external_name(), p2i(mirror), p2i(archived_mirror)); @@ -1311,14 +1309,15 @@ assert(fixup_mirror_list() != NULL, "fixup_mirror_list not initialized"); fixup_mirror_list()->push(k); return true; } - oop m = HeapShared::materialize_archived_object(k->archived_java_mirror_raw_narrow()); - if (m == NULL) { - return false; - } + oop m = k->archived_java_mirror(); + assert(m != NULL, "must have stored non-null archived mirror"); + + // Sanity: clear it now to prevent re-initialization if any of the following fails + k->clear_archived_mirror_index(); // mirror is archived, restore log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m)); assert(HeapShared::is_archived_object(m), "must be archived mirror object"); assert(as_Klass(m) == k, "must be"); @@ -1340,11 +1339,10 @@ if (class_loader.not_null()) { set_class_loader(mirror(), class_loader()); } k->set_java_mirror(mirror); - k->clear_has_raw_archived_mirror(); set_mirror_module_field(k, mirror, module, THREAD); if (log_is_enabled(Trace, cds, heap, mirror)) { ResourceMark rm(THREAD); diff a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp --- a/src/hotspot/share/classfile/moduleEntry.cpp +++ b/src/hotspot/share/classfile/moduleEntry.cpp @@ -465,11 +465,11 @@ assert(DumpSharedSpaces, "static dump only"); oop module_obj = module(); if (module_obj != NULL) { oop m = HeapShared::find_archived_heap_object(module_obj); assert(m != NULL, "sanity"); - _archived_module_narrow_oop = CompressedOops::encode(m); + _archived_module_index = HeapShared::append_root(m); } assert(shared_protection_domain() == NULL, "never set during -Xshare:dump"); // Clear handles and restore at run time. Handles cannot be archived. OopHandle null_handle; _module = null_handle; @@ -479,12 +479,12 @@ set_loader_data(loader_data); _reads = restore_growable_array((Array*)_reads); JFR_ONLY(INIT_ID(this);) } -void ModuleEntry::restore_archive_oops(ClassLoaderData* loader_data) { - Handle module_handle(Thread::current(), HeapShared::materialize_archived_object(_archived_module_narrow_oop)); +void ModuleEntry::restore_archived_oops(ClassLoaderData* loader_data) { + Handle module_handle(Thread::current(), HeapShared::get_root(_archived_module_index, /*clear=*/true)); assert(module_handle.not_null(), "huh"); set_module(loader_data->add_handle(module_handle)); // This was cleared to zero during dump time -- we didn't save the value // because it may be affected by archive relocation. @@ -493,10 +493,14 @@ if (loader_data->class_loader() != NULL) { java_lang_Module::set_loader(module_handle(), loader_data->class_loader()); } } +void ModuleEntry::clear_archived_oops() { + HeapShared::clear_root(_archived_module_index); +} + static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) { assert(a == b || a->name() != b->name(), "no duplicated names"); return a->name()->fast_compare(b->name()); } @@ -559,11 +563,11 @@ void ModuleEntryTable::restore_archived_oops(ClassLoaderData* loader_data, Array* archived_modules) { assert(UseSharedSpaces, "runtime only"); for (int i = 0; i < archived_modules->length(); i++) { ModuleEntry* archived_entry = archived_modules->at(i); - archived_entry->restore_archive_oops(loader_data); + archived_entry->restore_archived_oops(loader_data); } } #endif // INCLUDE_CDS_JAVA_HEAP ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle, diff a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp --- a/src/hotspot/share/classfile/moduleEntry.hpp +++ b/src/hotspot/share/classfile/moduleEntry.hpp @@ -75,11 +75,11 @@ bool _can_read_all_unnamed; bool _has_default_read_edges; // JVMTI redefine/retransform support bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules bool _is_open; // whether the packages in the module are all unqualifiedly exported bool _is_patched; // whether the module is patched via --patch-module - CDS_JAVA_HEAP_ONLY(narrowOop _archived_module_narrow_oop;) + CDS_JAVA_HEAP_ONLY(int _archived_module_index;) JFR_ONLY(DEFINE_TRACE_ID_FIELD;) enum {MODULE_READS_SIZE = 101}; // Initial size of list of modules that the module can read. public: @@ -200,11 +200,12 @@ void init_archived_oops(); static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry); static Array* write_growable_array(GrowableArray* array); static GrowableArray* restore_growable_array(Array* archived_array); void load_from_archive(ClassLoaderData* loader_data); - void restore_archive_oops(ClassLoaderData* loader_data); + void restore_archived_oops(ClassLoaderData* loader_data); + void clear_archived_oops(); #endif }; // Iterator interface class ModuleClosure: public StackObj { diff a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -1975,12 +1975,16 @@ // Allocate private object used as system class loader lock oop lock_obj = oopFactory::new_intArray(0, CHECK); _system_loader_lock_obj = OopHandle(Universe::vm_global(), lock_obj); - // Initialize basic classes + // Resolve basic classes resolve_well_known_classes(CHECK); + // Resolve classes used by archived heap objects + if (UseSharedSpaces) { + HeapShared::resolve_classes(CHECK); + } } // Compact table of directions on the initialization of klasses: static const short wk_init_info[] = { #define WK_KLASS_INIT_INFO(name, symbol) \ diff a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -51,10 +51,11 @@ #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "prims/jvmtiExport.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp" @@ -68,10 +69,11 @@ DEBUG_ONLY(bool SystemDictionaryShared::_no_class_loading_should_happen = false;) bool SystemDictionaryShared::_dump_in_progress = false; class DumpTimeSharedClassInfo: public CHeapObj { bool _excluded; + bool _is_early_klass; public: struct DTLoaderConstraint { Symbol* _name; char _loader_type1; char _loader_type2; @@ -112,10 +114,11 @@ _is_archived_lambda_proxy = false; _id = -1; _clsfile_size = -1; _clsfile_crc32 = -1; _excluded = false; + _is_early_klass = JvmtiExport::is_early_phase(); _verifier_constraints = NULL; _verifier_constraint_flags = NULL; _loader_constraints = NULL; } @@ -167,10 +170,14 @@ bool is_excluded() { // _klass may become NULL due to DynamicArchiveBuilder::set_to_null return _excluded || _failed_verification || _klass == NULL; } + // Was this class loaded while JvmtiExport::is_early_phase()==true + bool is_early_klass() { + return _is_early_klass; + } void set_failed_verification() { _failed_verification = true; } bool failed_verification() { @@ -1270,10 +1277,15 @@ } else { return false; } } +bool SystemDictionaryShared::is_early_klass(InstanceKlass* ik) { + DumpTimeSharedClassInfo* info = _dumptime_table->get(ik); + return (info != NULL) ? info->is_early_klass() : false; +} + void SystemDictionaryShared::warn_excluded(InstanceKlass* k, const char* reason) { ResourceMark rm; log_warning(cds)("Skipping %s: %s", k->name()->as_C_string(), reason); } @@ -2138,12 +2150,12 @@ #if INCLUDE_CDS_JAVA_HEAP class ArchivedMirrorPatcher { static void update(Klass* k) { - if (k->has_raw_archived_mirror()) { - oop m = HeapShared::materialize_archived_object(k->archived_java_mirror_raw_narrow()); + if (k->has_archived_mirror_index()) { + oop m = k->archived_java_mirror(); if (m != NULL) { java_lang_Class::update_archived_mirror_native_pointers(m); } } } diff a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp @@ -221,10 +221,11 @@ static bool _dump_in_progress; DEBUG_ONLY(static bool _no_class_loading_should_happen;) public: static bool is_hidden_lambda_proxy(InstanceKlass* ik); + static bool is_early_klass(InstanceKlass* k); // Was k loaded while JvmtiExport::is_early_phase()==true static Handle init_security_info(Handle class_loader, InstanceKlass* ik, PackageEntry* pkg_entry, TRAPS); static InstanceKlass* find_builtin_class(Symbol* class_name); static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* static_dict, RunTimeSharedDictionary* dynamic_dict, diff a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -1328,25 +1328,26 @@ bool is_marked_next(oop obj) const; // Determine if an object is dead, given the object and also // the region to which the object belongs. An object is dead // iff a) it was not allocated since the last mark, b) it - // is not marked, and c) it is not in an archive region. + // is not marked, and c) it is not in a closed archive region. bool is_obj_dead(const oop obj, const HeapRegion* hr) const { return - hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) && - !hr->is_archive(); + hr->is_obj_dead(obj, _cm->prev_mark_bitmap()); +// && +// !hr->is_closed_archive(); // FIXME: remove } // This function returns true when an object has been // around since the previous marking and hasn't yet - // been marked during this marking, and is not in an archive region. + // been marked during this marking, and is not in a closed archive region. bool is_obj_ill(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_next_marking(obj) && !is_marked_next(obj) && - !hr->is_archive(); + !hr->is_open_archive(); } // Determine if an object is dead, given only the object itself. // This will find the region to which the object belongs and // then call the region version of the same function. diff a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -280,11 +280,11 @@ } return is_obj_ill(obj, heap_region_containing(obj)); } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const { - return !is_marked_next(obj) && !hr->is_archive(); + return !is_marked_next(obj) && !hr->is_closed_archive(); } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const { return is_obj_dead_full(obj, heap_region_containing(obj)); } diff a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -70,10 +70,13 @@ // Some callers may have stale objects to mark above nTAMS after humongous reclaim. // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread. assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start())); + if (hr->is_open_archive()) { + log_debug(gc)("mark OA obj " PTR_FORMAT, p2i(obj)); + } bool success = _next_mark_bitmap->par_mark(obj); if (success) { add_to_liveness(worker_id, obj, obj->size()); } return success; diff a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp --- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp @@ -163,11 +163,17 @@ } } while (!is_empty()); } inline void G1FullGCMarker::follow_klass(Klass* k) { - oop op = k->class_loader_data()->holder_no_keepalive(); + ClassLoaderData* cld = k->class_loader_data(); +#if 0 // IOITMP + if (cld == NULL) { + return; + } +#endif + oop op = cld->holder_no_keepalive(); mark_and_push(&op); } inline void G1FullGCMarker::follow_cld(ClassLoaderData* cld) { _cld_closure.do_cld(cld); diff a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -66,11 +66,11 @@ T heap_oop = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { Log(gc, verify) log; - log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); + log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT " vo %d", p2i(p), p2i(obj), _vo); ResourceMark rm; LogStream ls(log.error()); obj->print_on(&ls); _failures = true; } @@ -181,21 +181,30 @@ }; class VerifyLivenessOopClosure: public BasicOopIterateClosure { G1CollectedHeap* _g1h; VerifyOption _vo; + oop _o; public: - VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): - _g1h(g1h), _vo(vo) + VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo, oop o): + _g1h(g1h), _vo(vo), _o(o) { } void do_oop(narrowOop *p) { do_oop_work(p); } void do_oop( oop *p) { do_oop_work(p); } template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), - "Dead object referenced by a not dead object"); + "vo %d Dead object " PTR_FORMAT " (%s) referenced by a not dead object " PTR_FORMAT " (%s) _g1h->is_obj_dead_cond(obj, _vo) %d marked %d since prev mark %d since next mark %d", + _vo, + p2i(obj), _g1h->heap_region_containing(obj)->get_short_type_str(), + p2i(_o), _g1h->heap_region_containing(_o)->get_short_type_str(), + _g1h->is_obj_dead_cond(obj, _vo), + _g1h->concurrent_mark()->prev_mark_bitmap()->is_marked(_o), + _g1h->heap_region_containing(_o)->obj_allocated_since_prev_marking(_o), + _g1h->heap_region_containing(_o)->obj_allocated_since_next_marking(_o) + ); } }; class VerifyObjsInRegionClosure: public ObjectClosure { private: @@ -210,11 +219,11 @@ VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) : _live_bytes(0), _hr(hr), _vo(vo) { _g1h = G1CollectedHeap::heap(); } void do_object(oop o) { - VerifyLivenessOopClosure isLive(_g1h, _vo); + VerifyLivenessOopClosure isLive(_g1h, _vo, o); assert(o != NULL, "Huh?"); if (!_g1h->is_obj_dead_cond(o, _vo)) { // If the object is alive according to the full gc mark, // then verify that the marking information agrees. // Note we can't verify the contra-positive of the diff a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp --- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp @@ -164,11 +164,13 @@ inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); return !obj_allocated_since_prev_marking(obj) && !prev_bitmap->is_marked(obj) && - !is_open_archive(); + !is_closed_archive(); + +// return !(obj_allocated_since_prev_marking(obj) || prev_bitmap->is_marked(obj) || is_closed_archive()); } inline size_t HeapRegion::block_size(const HeapWord *addr) const { if (addr == top()) { return pointer_delta(end(), addr); @@ -348,10 +350,14 @@ size_t size; bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); bool is_precise = false; + if (is_open_archive()) { + log_debug(gc)("seq_iter " PTR_FORMAT " dead %d", p2i(obj), is_dead); + } + cur += size; if (!is_dead) { // Process live object's references. // Non-objArrays are usually marked imprecise at the object diff a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -535,10 +535,13 @@ "Verify memory system after GC") \ \ product(bool, VerifyDuringGC, false, DIAGNOSTIC, \ "Verify memory system during GC (between phases)") \ \ + product(bool, VerifyArchivedFields, trueInDebug, DIAGNOSTIC, \ + "Verify memory when archived oop fields are loaded from CDS)") \ + \ product(ccstrlist, VerifyGCType, "", DIAGNOSTIC, \ "GC type(s) to verify when Verify*GC is enabled." \ "Available types are collector specific.") \ \ product(ccstrlist, VerifySubSet, "", DIAGNOSTIC, \ diff a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/memory/filemap.cpp --- a/src/hotspot/share/memory/filemap.cpp +++ b/src/hotspot/share/memory/filemap.cpp @@ -243,10 +243,11 @@ _base_archive_name_size = 0; _base_archive_is_default = false; if (!DynamicDumpSharedSpaces) { set_shared_path_table(mapinfo->_shared_path_table); + CDS_JAVA_HEAP_ONLY(_heap_obj_roots = CompressedOops::encode(HeapShared::roots())); } } void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) { _type = non_existent_entry; @@ -1828,10 +1829,11 @@ MetaspaceShared::first_open_archive_heap_region, MetaspaceShared::max_open_archive_heap_region, &num_open_archive_heap_ranges, true /* open */)) { HeapShared::set_open_archive_heap_region_mapped(); + HeapShared::set_roots(header()->heap_obj_roots()); } } } void FileMapInfo::map_heap_regions() { diff a/src/hotspot/share/memory/filemap.hpp b/src/hotspot/share/memory/filemap.hpp --- a/src/hotspot/share/memory/filemap.hpp +++ b/src/hotspot/share/memory/filemap.hpp @@ -233,11 +233,11 @@ bool _allow_archiving_with_java_agent; // setting of the AllowArchivingWithJavaAgent option bool _use_optimized_module_handling;// No module-relation VM options were specified, so we can skip // some expensive operations. bool _use_full_module_graph; // Can we use the full archived module graph? size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap - + narrowOop _heap_obj_roots; // An objArray that stores all the roots of archived heap objects char* from_mapped_offset(size_t offset) const { return mapped_base_address() + offset; } void set_mapped_offset(char* p, size_t *offset) { assert(p >= mapped_base_address(), "sanity"); @@ -281,19 +281,22 @@ // FIXME: These should really return int jshort max_used_path_index() const { return _max_used_path_index; } jshort app_module_paths_start_index() const { return _app_module_paths_start_index; } jshort app_class_paths_start_index() const { return _app_class_paths_start_index; } jshort num_module_paths() const { return _num_module_paths; } + narrowOop heap_obj_roots() const { return _heap_obj_roots; } void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; } void set_cloned_vtables(char* p) { set_mapped_offset(p, &_cloned_vtables_offset); } void set_serialized_data(char* p) { set_mapped_offset(p, &_serialized_data_offset); } void set_base_archive_name_size(size_t s) { _base_archive_name_size = s; } void set_base_archive_is_default(bool b) { _base_archive_is_default = b; } void set_header_size(size_t s) { _header_size = s; } void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; } void set_mapped_base_address(char* p) { _mapped_base_address = p; } + void set_heap_obj_roots(narrowOop r) { _heap_obj_roots = r; } + void set_i2i_entry_code_buffers(address p, size_t s) { set_mapped_offset((char*)p, &_i2i_entry_code_buffers_offset); _i2i_entry_code_buffers_size = s; } diff a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/memory/heapShared.cpp --- a/src/hotspot/share/memory/heapShared.cpp +++ b/src/hotspot/share/memory/heapShared.cpp @@ -46,12 +46,15 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/fieldStreams.inline.hpp" +#include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" +#include "oops/oopHandle.hpp" #include "runtime/fieldDescriptor.inline.hpp" +#include "runtime/init.hpp" #include "runtime/javaCalls.hpp" #include "runtime/safepointVerifiers.hpp" #include "utilities/bitMap.inline.hpp" #if INCLUDE_G1GC #include "gc/g1/g1CollectedHeap.hpp" @@ -102,19 +105,31 @@ const static int num_open_archive_subgraph_entry_fields = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); const static int num_fmg_open_archive_subgraph_entry_fields = sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); +static GrowableArrayCHeap* _pending_roots = NULL; +static objArrayOop _dumptime_roots = NULL; // FIXME -- combine this with _runtime_roots?? +static narrowOop _runtime_roots_narrow; +static OopHandle _runtime_roots; + //////////////////////////////////////////////////////////////// // // Java heap object archiving support // //////////////////////////////////////////////////////////////// void HeapShared::fixup_mapped_heap_regions() { FileMapInfo *mapinfo = FileMapInfo::current_info(); mapinfo->fixup_mapped_heap_regions(); set_archive_heap_region_fixed(); + if (is_mapped()) { + _runtime_roots = OopHandle(Universe::vm_global(), HeapShared::materialize_archived_object(_runtime_roots_narrow)); + if (!MetaspaceShared::use_full_module_graph()) { + // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). + ClassLoaderDataShared::clear_archived_oops(); + } + } SystemDictionaryShared::update_archived_mirror_native_pointers(); } unsigned HeapShared::oop_hash(oop const& p) { assert(!p->mark().has_bias_pattern(), @@ -163,10 +178,70 @@ } else { return NULL; } } +int HeapShared::append_root(oop obj) { + assert(DumpSharedSpaces, "dump-time only"); + + // No GC should happen since we aren't scanning _pending_roots. + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + + if (_pending_roots == NULL) { + _pending_roots = new GrowableArrayCHeap(500); + } + + return _pending_roots->append(obj); +} + +objArrayOop HeapShared::roots() { + if (DumpSharedSpaces) { + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + return _dumptime_roots; + } else { + assert(UseSharedSpaces, "must be"); + objArrayOop roots = (objArrayOop)_runtime_roots.resolve(); + assert(roots != NULL, "should have been initialized"); + return roots; + } +} + +void HeapShared::set_roots(narrowOop roots) { + assert(UseSharedSpaces, "runtime only"); + assert(open_archive_heap_region_mapped(), "must be"); + _runtime_roots_narrow = roots; +} + +oop HeapShared::get_root(int index, bool clear) { + assert(index >= 0, "sanity"); + if (DumpSharedSpaces) { + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + assert(_pending_roots != NULL, "sanity"); + return _pending_roots->at(index); + } else { + assert(UseSharedSpaces, "must be"); + assert(!_runtime_roots.is_empty(), "must have loaded shared heap"); + oop result = roots()->obj_at(index); + if (clear) { + clear_root(index); + } + return result; + } +} + +void HeapShared::clear_root(int index) { + assert(index >= 0, "sanity"); + assert(UseSharedSpaces, "must be"); + if (open_archive_heap_region_mapped()) { + if (log_is_enabled(Debug, cds, heap)) { + oop old = roots()->obj_at(index); + log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); + } + roots()->obj_at_put(index, NULL); + } +} + oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { assert(DumpSharedSpaces, "dump-time only"); oop ao = find_archived_heap_object(obj); if (ao != NULL) { @@ -198,12 +273,15 @@ DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); ArchivedObjectCache* cache = archived_object_cache(); cache->put(obj, archived_oop); - log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, - p2i(obj), p2i(archived_oop)); + if (log_is_enabled(Debug, cds, heap)) { + ResourceMark rm; + log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", + p2i(obj), p2i(archived_oop), obj->klass()->external_name()); + } } else { log_error(cds, heap)( "Cannot allocate space for object " PTR_FORMAT " in archived heap region", p2i(obj)); vm_exit(1); @@ -255,10 +333,11 @@ log_info(cds)("Run GC done"); } } } +// Returns an objArray that contains all the roots of the archived objects void HeapShared::archive_java_heap_objects(GrowableArray *closed, GrowableArray *open) { if (!is_heap_object_archiving_allowed()) { log_info(cds)( "Archived java heap is not supported as UseG1GC, " @@ -281,14 +360,10 @@ copy_closed_archive_heap_objects(closed); log_info(cds)("Dumping objects to open archive heap region ..."); copy_open_archive_heap_objects(open); - if (MetaspaceShared::use_full_module_graph()) { - ClassLoaderDataShared::init_archived_oops(); - } - destroy_archived_object_cache(); } G1HeapVerifier::verify_archive_regions(); } @@ -333,16 +408,48 @@ archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, num_fmg_open_archive_subgraph_entry_fields, false /* is_closed_archive */, true /* is_full_module_graph */, THREAD); + ClassLoaderDataShared::init_archived_oops(); } + copy_roots(); + G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, os::vm_allocation_granularity()); } +// Copy _pending_archive_roots into an objArray +void HeapShared::copy_roots() { + int length = _pending_roots != NULL ? _pending_roots->length() : 0; + int size = objArrayOopDesc::object_size(length); + Klass *k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass + HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); + + memset(mem, 0, size * BytesPerWord); // Is this correct?? + { + // This is copied from MemAllocator::finish + if (UseBiasedLocking) { + oopDesc::set_mark(mem, k->prototype_header()); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + } + oopDesc::release_set_klass(mem, k); + } + { + // This is copied from ObjArrayAllocator::initialize + arrayOopDesc::set_length(mem, length); + } + + _dumptime_roots = (objArrayOop)mem; + for (int i = 0; i < length; i++) { + _dumptime_roots->obj_at_put(i, _pending_roots->at(i)); + } + log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem); +} + void HeapShared::init_narrow_oop_decoding(address base, int shift) { _narrow_oop_base = base; _narrow_oop_shift = shift; } @@ -378,14 +485,14 @@ void KlassSubGraphInfo::add_subgraph_entry_field( int static_field_offset, oop v, bool is_closed_archive) { assert(DumpSharedSpaces, "dump time only"); if (_subgraph_entry_fields == NULL) { _subgraph_entry_fields = - new(ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass); + new(ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass); } - _subgraph_entry_fields->append((juint)static_field_offset); - _subgraph_entry_fields->append(CompressedOops::narrow_oop_value(v)); + _subgraph_entry_fields->append(static_field_offset); + _subgraph_entry_fields->append(HeapShared::append_root(v)); _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); } // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. // Only objects of boot classes can be included in sub-graph. @@ -441,26 +548,52 @@ log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); } } _subgraph_object_klasses->append_if_missing(relocated_k); + _has_non_early_klasses |= is_non_early_klass(orig_k); +} + +bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { + if (k->is_objArray_klass()) { + k = ObjArrayKlass::cast(k)->bottom_klass(); + } + if (k->is_instance_klass()) { + if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { + ResourceMark rm; + log_info(cds, heap)("non-early: %s", k->external_name()); + return true; + } else { + return false; + } + } else { + return false; + } } // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { _k = info->klass(); _entry_field_records = NULL; _subgraph_object_klasses = NULL; _is_full_module_graph = info->is_full_module_graph(); + _has_non_early_klasses = info->has_non_early_klasses(); + + if (_has_non_early_klasses) { + ResourceMark rm; + log_info(cds, heap)( + "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", + _k->external_name()); + } // populate the entry fields - GrowableArray* entry_fields = info->subgraph_entry_fields(); + GrowableArray* entry_fields = info->subgraph_entry_fields(); if (entry_fields != NULL) { int num_entry_fields = entry_fields->length(); assert(num_entry_fields % 3 == 0, "sanity"); _entry_field_records = - MetaspaceShared::new_ro_array(num_entry_fields); + MetaspaceShared::new_ro_array(num_entry_fields); for (int i = 0 ; i < num_entry_fields; i++) { _entry_field_records->at_put(i, entry_fields->at(i)); } } @@ -529,96 +662,196 @@ void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { _run_time_subgraph_info_table.serialize_header(soc); } +static void verify_the_heap(Klass* k, const char* which) { + if (VerifyArchivedFields) { + ResourceMark rm; + log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", + which, k->external_name()); + VM_Verify verify_op; + VMThread::execute(&verify_op); +#if 0 + // For some reason, this causes jtreg to lock up with + // "jtreg -vmoptions:-XX:+VerifyArchivedFields HelloTest.java" + if (is_init_completed()) { + FlagSetting fs1(VerifyBeforeGC, true); + FlagSetting fs2(VerifyDuringGC, true); + FlagSetting fs3(VerifyAfterGC, true); + Universe::heap()->collect(GCCause::_java_lang_system_gc); + } +#endif + } +} + +// Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() +// have a valid klass. I.e., oopDesc::klass() must have already been resolved. +// +// Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI +// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In +// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. +void HeapShared::resolve_classes(TRAPS) { + if (!is_mapped()) { + return; // nothing to do + } + resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, + num_closed_archive_subgraph_entry_fields, + CHECK); + resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, + num_open_archive_subgraph_entry_fields, + CHECK); + resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, + num_fmg_open_archive_subgraph_entry_fields, + CHECK); +} + +void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], + int num, TRAPS) { + for (int i = 0; i < num; i++) { + ArchivableStaticFieldInfo* info = &fields[i]; + TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); + InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); + assert(k != NULL && k->is_shared_boot_class(), "sanity"); + resolve_classes_for_subgraph_of(k, CHECK); + } +} + +void HeapShared::resolve_classes_for_subgraph_of(Klass* k, TRAPS) { + const ArchivedKlassSubGraphInfoRecord* record = resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + if (record == NULL) { + clear_archived_roots_of(k); + } +} + void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) { - if (!open_archive_heap_region_mapped()) { + if (!is_mapped()) { return; // nothing to do } + + const ArchivedKlassSubGraphInfoRecord* record = + resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); + + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + // None of the field value will be set if there was an exception when initializing the classes. + // The java code will not see any of the archived objects in the + // subgraphs referenced from k in this case. + return; + } + + if (record != NULL) { + init_archived_fields_for(k, record, THREAD); + } +} + +const ArchivedKlassSubGraphInfoRecord* +HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); // Initialize from archived data. Currently this is done only // during VM initialization time. No lock is needed. if (record != NULL) { if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { - return; + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", + k->external_name()); + } + return NULL; } - int i; + if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", + k->external_name()); + } + return NULL; + } + + resolve_or_init(k, do_init, CHECK_NULL); + // Load/link/initialize the klasses of the objects in the subgraph. // NULL class loader is used. Array* klasses = record->subgraph_object_klasses(); if (klasses != NULL) { - for (i = 0; i < klasses->length(); i++) { - Klass* obj_k = klasses->at(i); - Klass* resolved_k = SystemDictionary::resolve_or_null( - (obj_k)->name(), THREAD); - if (resolved_k != obj_k) { - assert(!SystemDictionary::is_well_known_klass(resolved_k), - "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook"); - ResourceMark rm(THREAD); - log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive", - resolved_k->external_name()); - return; - } - if ((obj_k)->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(obj_k); - ik->initialize(THREAD); - } else if ((obj_k)->is_objArray_klass()) { - ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); - oak->initialize(THREAD); - } + for (int i = 0; i < klasses->length(); i++) { + resolve_or_init(klasses->at(i), do_init, CHECK_NULL); } } + } - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - // None of the field value will be set if there was an exception. - // The java code will not see any of the archived objects in the - // subgraphs referenced from k in this case. - return; + return record; +} + +void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { + if (!do_init) { + if (k->class_loader_data() == NULL) { + Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); + assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); + } + } else { + assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); + if (k->is_instance_klass()) { + InstanceKlass* ik = InstanceKlass::cast(k); + ik->initialize(CHECK); + } else if (k->is_objArray_klass()) { + ObjArrayKlass* oak = ObjArrayKlass::cast(k); + oak->initialize(CHECK); + } + } +} + +void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS) { + verify_the_heap(k, "before"); + + // Load the subgraph entry fields from the record and store them back to + // the corresponding fields within the mirror. + oop m = k->java_mirror(); + Array* entry_field_records = record->entry_field_records(); + if (entry_field_records != NULL) { + int efr_len = entry_field_records->length(); + assert(efr_len % 3 == 0, "sanity"); + for (int i = 0; i < efr_len; i += 3) { + int field_offset = entry_field_records->at(i); + int root_index = entry_field_records->at(i+1); + int is_closed_archive = entry_field_records->at(i+2); + oop v = get_root(root_index, /*clear=*/true); + m->obj_field_put(field_offset, v); + log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); } - // Load the subgraph entry fields from the record and store them back to - // the corresponding fields within the mirror. - oop m = k->java_mirror(); - Array* entry_field_records = record->entry_field_records(); + // Done. Java code can see the archived sub-graphs referenced from k's + // mirror after this point. + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", + k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); + } + } + + verify_the_heap(k, "after "); +} + +void HeapShared::clear_archived_roots_of(Klass* k) { + unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); + const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); + if (record != NULL) { + Array* entry_field_records = record->entry_field_records(); if (entry_field_records != NULL) { int efr_len = entry_field_records->length(); assert(efr_len % 3 == 0, "sanity"); - for (i = 0; i < efr_len;) { - int field_offset = entry_field_records->at(i); - narrowOop nv = CompressedOops::narrow_oop_cast(entry_field_records->at(i+1)); - int is_closed_archive = entry_field_records->at(i+2); - oop v; - if (is_closed_archive == 0) { - // It's an archived object in the open archive heap regions, not shared. - // The object refereced by the field becomes 'known' by GC from this - // point. All objects in the subgraph reachable from the object are - // also 'known' by GC. - v = materialize_archived_object(nv); - } else { - // Shared object in the closed archive heap regions. Decode directly. - assert(!CompressedOops::is_null(nv), "shared object is null"); - v = HeapShared::decode_from_archive(nv); - } - m->obj_field_put(field_offset, v); - i += 3; - - log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); - } - - // Done. Java code can see the archived sub-graphs referenced from k's - // mirror after this point. - if (log_is_enabled(Info, cds, heap)) { - ResourceMark rm; - log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT, - k->external_name(), p2i(k)); + for (int i = 0; i < efr_len; i += 3) { + int root_index = entry_field_records->at(i+1); + clear_root(root_index); } } } } diff a/src/hotspot/share/memory/heapShared.hpp b/src/hotspot/share/memory/heapShared.hpp --- a/src/hotspot/share/memory/heapShared.hpp +++ b/src/hotspot/share/memory/heapShared.hpp @@ -62,18 +62,27 @@ // object sub-graphs can be accessed at runtime. GrowableArray* _subgraph_object_klasses; // A list of _k's static fields as the entry points of archived sub-graphs. // For each entry field, it is a tuple of field_offset, field_value and // is_closed_archive flag. - GrowableArray* _subgraph_entry_fields; + GrowableArray* _subgraph_entry_fields; + // Does this KlassSubGraphInfo belong to the arcived full module graph bool _is_full_module_graph; + + // Does this KlassSubGraphInfo references any classes that were loaded while + // JvmtiExport::is_early_phase()!=true. If so, this KlassSubGraphInfo cannot be + // used at runtime if JVMTI ClassFileLoadHook is enabled. + bool _has_non_early_klasses; + static bool is_non_early_klass(Klass* k); + public: KlassSubGraphInfo(Klass* k, bool is_full_module_graph) : _k(k), _subgraph_object_klasses(NULL), _subgraph_entry_fields(NULL), - _is_full_module_graph(is_full_module_graph) {} + _is_full_module_graph(is_full_module_graph), + _has_non_early_klasses(false) {} ~KlassSubGraphInfo() { if (_subgraph_object_klasses != NULL) { delete _subgraph_object_klasses; } if (_subgraph_entry_fields != NULL) { @@ -83,45 +92,48 @@ Klass* klass() { return _k; } GrowableArray* subgraph_object_klasses() { return _subgraph_object_klasses; } - GrowableArray* subgraph_entry_fields() { + GrowableArray* subgraph_entry_fields() { return _subgraph_entry_fields; } void add_subgraph_entry_field(int static_field_offset, oop v, bool is_closed_archive); void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k); int num_subgraph_object_klasses() { return _subgraph_object_klasses == NULL ? 0 : _subgraph_object_klasses->length(); } bool is_full_module_graph() const { return _is_full_module_graph; } + bool has_non_early_klasses() const { return _has_non_early_klasses; } }; // An archived record of object sub-graphs reachable from static // fields within _k's mirror. The record is reloaded from the archive // at runtime. class ArchivedKlassSubGraphInfoRecord { private: Klass* _k; bool _is_full_module_graph; + bool _has_non_early_klasses; // contains pairs of field offset and value for each subgraph entry field - Array* _entry_field_records; + Array* _entry_field_records; // klasses of objects in archived sub-graphs referenced from the entry points // (static fields) in the containing class Array* _subgraph_object_klasses; public: ArchivedKlassSubGraphInfoRecord() : _k(NULL), _entry_field_records(NULL), _subgraph_object_klasses(NULL) {} void init(KlassSubGraphInfo* info); Klass* klass() const { return _k; } - Array* entry_field_records() const { return _entry_field_records; } + Array* entry_field_records() const { return _entry_field_records; } Array* subgraph_object_klasses() const { return _subgraph_object_klasses; } bool is_full_module_graph() const { return _is_full_module_graph; } + bool has_non_early_klasses() const { return _has_non_early_klasses; } }; #endif // INCLUDE_CDS_JAVA_HEAP class HeapShared: AllStatic { friend class VerifySharedOopClosure; @@ -252,11 +264,20 @@ static bool has_been_seen_during_subgraph_recording(oop obj); static void set_has_been_seen_during_subgraph_recording(oop obj); static void check_module_oop(oop orig_module_obj); - + static void copy_roots(); + + static void resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], + int num, TRAPS); + static void resolve_classes_for_subgraph_of(Klass* k, TRAPS); + static void clear_archived_roots_of(Klass* k); + static const ArchivedKlassSubGraphInfoRecord* + resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS); + static void resolve_or_init(Klass* k, bool do_init, TRAPS); + static void init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS); public: static void reset_archived_object_states(TRAPS); static void create_archived_object_cache() { _archived_object_cache = new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache(); @@ -293,10 +314,36 @@ bool is_closed_archive, TRAPS); static ResourceBitMap calculate_oopmap(MemRegion region); static void add_to_dumped_interned_strings(oop string); + + // We use the HeapShared::roots() array to make sure that objects stored in the + // archived heap regions are not prematurely collected. These roots include: + // + // - mirrors of classes that have not yet been loaded. + // - ConstantPool::resolved_references() of classes that have not yet been loaded. + // - ArchivedKlassSubGraphInfoRecords that have not been initialized + // - java.lang.Module objects that have not yet been added to the module graph + // + // When a mirror M becomes referenced by a newly loaded class K, M will be removed + // from HeapShared::roots() via clear_root(), and K will be responsible for + // keeping M alive. + // + // Other types of roots are also cleared similarly when they become referenced. + + // Dump-time only. Returns the index of the root, which can be used at run time to read + // the root using get_root(index, ...). + static int append_root(oop obj); + + // Dump-time and runtime + static objArrayOop roots(); + static oop get_root(int index, bool clear=false); + + // Run-time only + static void set_roots(narrowOop roots); + static void clear_root(int index); #endif // INCLUDE_CDS_JAVA_HEAP public: static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN; @@ -325,15 +372,19 @@ } static bool open_archive_heap_region_mapped() { CDS_JAVA_HEAP_ONLY(return _open_archive_heap_region_mapped); NOT_CDS_JAVA_HEAP_RETURN_(false); } + static bool is_mapped() { + return closed_archive_heap_region_mapped() && open_archive_heap_region_mapped(); + } static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN; inline static bool is_archived_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false); + static void resolve_classes(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static void initialize_from_archived_subgraph(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN; // NarrowOops stored in the CDS archive may use a different encoding scheme // than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl. // To decode them, do not use CompressedOops::decode_not_null. Use this diff a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp +++ b/src/hotspot/share/memory/metaspaceShared.cpp @@ -1443,11 +1443,11 @@ // map_heap_regions() compares the current narrow oop and klass encodings // with the archived ones, so it must be done after all encodings are determined. static_mapinfo->map_heap_regions(); - disable_full_module_graph(); // Disabled temporarily for JDK-8253081 + // disable_full_module_graph(); // Disabled temporarily for JDK-8253081 } }); log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled"); log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled"); } else { diff a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -369,10 +369,11 @@ _cache->archived_references() != NULL) { oop archived = _cache->archived_references(); // Create handle for the archived resolved reference array object Handle refs_handle(THREAD, archived); set_resolved_references(loader_data->add_handle(refs_handle)); + _cache->clear_archived_references(); } else #endif { // No mapped archived resolved reference array // Recreate the object array and add to ClassLoaderData. diff a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -772,19 +772,26 @@ set_reference_map(NULL); } #if INCLUDE_CDS_JAVA_HEAP oop ConstantPoolCache::archived_references() { - if (CompressedOops::is_null(_archived_references)) { + if (_archived_references_index < 0) { return NULL; } - return HeapShared::materialize_archived_object(_archived_references); + return HeapShared::get_root(_archived_references_index); +} + +void ConstantPoolCache::clear_archived_references() { + if (_archived_references_index >= 0) { + HeapShared::clear_root(_archived_references_index); + _archived_references_index = -1; + } } void ConstantPoolCache::set_archived_references(oop o) { assert(DumpSharedSpaces, "called only during runtime"); - _archived_references = CompressedOops::encode(o); + _archived_references_index = HeapShared::append_root(o); } #endif #if INCLUDE_JVMTI // RedefineClasses() API support: diff a/src/hotspot/share/oops/cpCache.hpp b/src/hotspot/share/oops/cpCache.hpp --- a/src/hotspot/share/oops/cpCache.hpp +++ b/src/hotspot/share/oops/cpCache.hpp @@ -1,7 +1,7 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. @@ -415,11 +415,11 @@ // object index to original constant pool index OopHandle _resolved_references; Array* _reference_map; // The narrowOop pointer to the archived resolved_references. Set at CDS dump // time when caching java heap object is supported. - CDS_JAVA_HEAP_ONLY(narrowOop _archived_references;) + CDS_JAVA_HEAP_ONLY(int _archived_references_index;) // Sizing debug_only(friend class ClassVerifier;) // Constructor @@ -442,10 +442,11 @@ void metaspace_pointers_do(MetaspaceClosure* it); MetaspaceObj::Type type() const { return ConstantPoolCacheType; } oop archived_references() NOT_CDS_JAVA_HEAP_RETURN_(NULL); void set_archived_references(oop o) NOT_CDS_JAVA_HEAP_RETURN; + void clear_archived_references() NOT_CDS_JAVA_HEAP_RETURN; inline oop resolved_references(); void set_resolved_references(OopHandle s) { _resolved_references = s; } Array* reference_map() const { return _reference_map; } void set_reference_map(Array* o) { _reference_map = o; } diff a/src/hotspot/share/oops/cpCache.inline.hpp b/src/hotspot/share/oops/cpCache.inline.hpp --- a/src/hotspot/share/oops/cpCache.inline.hpp +++ b/src/hotspot/share/oops/cpCache.inline.hpp @@ -87,11 +87,11 @@ const intStack& inverse_index_map, const intStack& invokedynamic_inverse_index_map, const intStack& invokedynamic_references_map) : _length(length), _constant_pool(NULL) { - CDS_JAVA_HEAP_ONLY(_archived_references = narrowOop::null;) + CDS_JAVA_HEAP_ONLY(_archived_references_index = -1;) initialize(inverse_index_map, invokedynamic_inverse_index_map, invokedynamic_references_map); for (int i = 0; i < length; i++) { assert(entry_at(i)->is_f1_null(), "Failed to clear?"); } diff a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp --- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp @@ -50,11 +50,17 @@ if (Devirtualizer::do_metadata(closure)) { Klass* klass = java_lang_Class::as_Klass_raw(obj); // We'll get NULL for primitive mirrors. if (klass != NULL) { - if (klass->is_instance_klass() && klass->class_loader_data()->has_class_mirror_holder()) { + if (klass->class_loader_data() == NULL) { + // This is a mirror that belongs to a shared class that has not be loaded yet. + // It's only reachable via HeapShared::roots(). All of its fields should be zero + // so there's no need to scan. + assert(klass->is_shared(), "must be"); + return; + } else if (klass->is_instance_klass() && klass->class_loader_data()->has_class_mirror_holder()) { // A non-strong hidden class or an unsafe anonymous class doesn't have its own class loader, // so when handling the java mirror for the class we need to make sure its class // loader data is claimed, this is done by calling do_cld explicitly. // For non-anonymous classes the call to do_cld is made when the class // loader itself is handled. diff a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -201,12 +201,12 @@ // The constructor is also used from CppVtableCloner, // which doesn't zero out the memory before calling the constructor. Klass::Klass(KlassID id) : _id(id), _prototype_header(markWord::prototype()), _shared_class_path_index(-1) { - CDS_ONLY(_shared_class_flags = 0;) - CDS_JAVA_HEAP_ONLY(_archived_mirror = narrowOop::null;) + CDS_ONLY(_shared_class_flags = 0); + CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1); _primary_supers[0] = this; set_super_check_offset(in_bytes(primary_supers_offset())); } jint Klass::array_layout_helper(BasicType etype) { @@ -571,14 +571,13 @@ // If an exception happened during CDS restore, some of these fields may already be // set. We leave the class on the CLD list, even if incomplete so that we don't // modify the CLD list outside a safepoint. if (class_loader_data() == NULL) { - // Restore class_loader_data to the null class loader data set_class_loader_data(loader_data); - // Add to null class loader list first before creating the mirror + // Add to class loader list first before creating the mirror // (same order as class file parsing) loader_data->add_class(this); } Handle loader(THREAD, loader_data->class_loader()); @@ -595,11 +594,11 @@ module_entry = ModuleEntryTable::javabase_moduleEntry(); } // Obtain java.lang.Module, if available Handle module_handle(THREAD, ((module_entry != NULL) ? module_entry->module() : (oop)NULL)); - if (this->has_raw_archived_mirror()) { + if (this->has_archived_mirror_index()) { ResourceMark rm(THREAD); log_debug(cds, mirror)("%s has raw archived mirror", external_name()); if (HeapShared::open_archive_heap_region_mapped()) { bool present = java_lang_Class::restore_archived_mirror(this, loader, module_handle, protection_domain, @@ -610,11 +609,11 @@ } // No archived mirror data log_debug(cds, mirror)("No archived mirror data for %s", external_name()); clear_java_mirror_handle(); - this->clear_has_raw_archived_mirror(); + this->clear_archived_mirror_index(); } // Only recreate it if not present. A previous attempt to restore may have // gotten an OOM later but keep the mirror if it was created. if (java_mirror() == NULL) { @@ -622,25 +621,26 @@ java_lang_Class::create_mirror(this, loader, module_handle, protection_domain, Handle(), CHECK); } } #if INCLUDE_CDS_JAVA_HEAP -// Used at CDS dump time to access the archived mirror. No GC barrier. -oop Klass::archived_java_mirror_raw() { - assert(has_raw_archived_mirror(), "must have raw archived mirror"); - return CompressedOops::decode(_archived_mirror); +oop Klass::archived_java_mirror() { + assert(has_archived_mirror_index(), "must have archived mirror"); + return HeapShared::get_root(_archived_mirror_index); } -narrowOop Klass::archived_java_mirror_raw_narrow() { - assert(has_raw_archived_mirror(), "must have raw archived mirror"); - return _archived_mirror; +void Klass::clear_archived_mirror_index() { + if (_archived_mirror_index >= 0) { + HeapShared::clear_root(_archived_mirror_index); + } + _archived_mirror_index = -1; } // No GC barrier -void Klass::set_archived_java_mirror_raw(oop m) { +void Klass::set_archived_java_mirror(oop m) { assert(DumpSharedSpaces, "called only during runtime"); - _archived_mirror = CompressedOops::encode(m); + _archived_mirror_index = HeapShared::append_root(m); } #endif // INCLUDE_CDS_JAVA_HEAP Klass* Klass::array_klass_or_null(int rank) { EXCEPTION_MARK; diff a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -174,18 +174,15 @@ #if INCLUDE_CDS // Flags of the current shared class. u2 _shared_class_flags; enum { - _has_raw_archived_mirror = 1, _archived_lambda_proxy_is_available = 2 }; #endif - // The _archived_mirror is set at CDS dump time pointing to the cached mirror - // in the open archive heap region when archiving java object is supported. - CDS_JAVA_HEAP_ONLY(narrowOop _archived_mirror;) + CDS_JAVA_HEAP_ONLY(int _archived_mirror_index;) protected: // Constructor Klass(KlassID id); @@ -260,13 +257,12 @@ // java mirror oop java_mirror() const; oop java_mirror_no_keepalive() const; void set_java_mirror(Handle m); - oop archived_java_mirror_raw() NOT_CDS_JAVA_HEAP_RETURN_(NULL); // no GC barrier - narrowOop archived_java_mirror_raw_narrow() NOT_CDS_JAVA_HEAP_RETURN_(narrowOop::null); // no GC barrier - void set_archived_java_mirror_raw(oop m) NOT_CDS_JAVA_HEAP_RETURN; // no GC barrier + oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(NULL); + void set_archived_java_mirror(oop m) NOT_CDS_JAVA_HEAP_RETURN; // Temporary mirror switch used by RedefineClasses void replace_java_mirror(oop mirror); // Set java mirror OopHandle to NULL for CDS @@ -305,21 +301,17 @@ void set_shared_classpath_index(int index) { _shared_class_path_index = index; }; - void set_has_raw_archived_mirror() { - CDS_ONLY(_shared_class_flags |= _has_raw_archived_mirror;) - } - void clear_has_raw_archived_mirror() { - CDS_ONLY(_shared_class_flags &= ~_has_raw_archived_mirror;) - } - bool has_raw_archived_mirror() const { - CDS_ONLY(return (_shared_class_flags & _has_raw_archived_mirror) != 0;) - NOT_CDS(return false;) + bool has_archived_mirror_index() const { + CDS_JAVA_HEAP_ONLY(return _archived_mirror_index >= 0); + NOT_CDS_JAVA_HEAP( return false); } + void clear_archived_mirror_index() NOT_CDS_JAVA_HEAP_RETURN; + void set_lambda_proxy_is_available() { CDS_ONLY(_shared_class_flags |= _archived_lambda_proxy_is_available;) } void clear_lambda_proxy_is_available() { CDS_ONLY(_shared_class_flags &= ~_archived_lambda_proxy_is_available;) @@ -532,11 +524,11 @@ virtual void remove_unshareable_info(); virtual void remove_java_mirror(); bool is_unshareable_info_restored() const { assert(is_shared(), "use this for shared classes only"); - if (has_raw_archived_mirror()) { + if (has_archived_mirror_index()) { // _java_mirror is not a valid OopHandle but rather an encoded reference in the shared heap return false; } else if (_java_mirror.ptr_raw() == NULL) { return false; } else { diff a/test/hotspot/jtreg/runtime/cds/serviceability/ReplaceCriticalClasses.java b/test/hotspot/jtreg/runtime/cds/serviceability/ReplaceCriticalClasses.java --- a/test/hotspot/jtreg/runtime/cds/serviceability/ReplaceCriticalClasses.java +++ b/test/hotspot/jtreg/runtime/cds/serviceability/ReplaceCriticalClasses.java @@ -170,14 +170,11 @@ agent); if (whitebox) { opts.addSuffix("-XX:+WhiteBoxAPI", "-Xbootclasspath/a:" + ClassFileInstaller.getJarPath("whitebox.jar")); } - if (subgraph) { - opts.addSuffix("-Xlog:cds,cds+heap"); - } - + opts.addSuffix("-Xlog:cds,cds+heap"); opts.addSuffix("ReplaceCriticalClasses", "child", shared, klassName);