diff a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/memory/heapShared.cpp --- a/src/hotspot/share/memory/heapShared.cpp +++ b/src/hotspot/share/memory/heapShared.cpp @@ -46,12 +46,15 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/fieldStreams.inline.hpp" +#include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" +#include "oops/oopHandle.hpp" #include "runtime/fieldDescriptor.inline.hpp" +#include "runtime/init.hpp" #include "runtime/javaCalls.hpp" #include "runtime/safepointVerifiers.hpp" #include "utilities/bitMap.inline.hpp" #if INCLUDE_G1GC #include "gc/g1/g1CollectedHeap.hpp" @@ -102,19 +105,31 @@ const static int num_open_archive_subgraph_entry_fields = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); const static int num_fmg_open_archive_subgraph_entry_fields = sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); +static GrowableArrayCHeap* _pending_roots = NULL; +static objArrayOop _dumptime_roots = NULL; // FIXME -- combine this with _runtime_roots?? +static narrowOop _runtime_roots_narrow; +static OopHandle _runtime_roots; + //////////////////////////////////////////////////////////////// // // Java heap object archiving support // //////////////////////////////////////////////////////////////// void HeapShared::fixup_mapped_heap_regions() { FileMapInfo *mapinfo = FileMapInfo::current_info(); mapinfo->fixup_mapped_heap_regions(); set_archive_heap_region_fixed(); + if (is_mapped()) { + _runtime_roots = OopHandle(Universe::vm_global(), HeapShared::materialize_archived_object(_runtime_roots_narrow)); + if (!MetaspaceShared::use_full_module_graph()) { + // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). + ClassLoaderDataShared::clear_archived_oops(); + } + } SystemDictionaryShared::update_archived_mirror_native_pointers(); } unsigned HeapShared::oop_hash(oop const& p) { assert(!p->mark().has_bias_pattern(), @@ -163,10 +178,70 @@ } else { return NULL; } } +int HeapShared::append_root(oop obj) { + assert(DumpSharedSpaces, "dump-time only"); + + // No GC should happen since we aren't scanning _pending_roots. + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + + if (_pending_roots == NULL) { + _pending_roots = new GrowableArrayCHeap(500); + } + + return _pending_roots->append(obj); +} + +objArrayOop HeapShared::roots() { + if (DumpSharedSpaces) { + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + return _dumptime_roots; + } else { + assert(UseSharedSpaces, "must be"); + objArrayOop roots = (objArrayOop)_runtime_roots.resolve(); + assert(roots != NULL, "should have been initialized"); + return roots; + } +} + +void HeapShared::set_roots(narrowOop roots) { + assert(UseSharedSpaces, "runtime only"); + assert(open_archive_heap_region_mapped(), "must be"); + _runtime_roots_narrow = roots; +} + +oop HeapShared::get_root(int index, bool clear) { + assert(index >= 0, "sanity"); + if (DumpSharedSpaces) { + assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + assert(_pending_roots != NULL, "sanity"); + return _pending_roots->at(index); + } else { + assert(UseSharedSpaces, "must be"); + assert(!_runtime_roots.is_empty(), "must have loaded shared heap"); + oop result = roots()->obj_at(index); + if (clear) { + clear_root(index); + } + return result; + } +} + +void HeapShared::clear_root(int index) { + assert(index >= 0, "sanity"); + assert(UseSharedSpaces, "must be"); + if (open_archive_heap_region_mapped()) { + if (log_is_enabled(Debug, cds, heap)) { + oop old = roots()->obj_at(index); + log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); + } + roots()->obj_at_put(index, NULL); + } +} + oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) { assert(DumpSharedSpaces, "dump-time only"); oop ao = find_archived_heap_object(obj); if (ao != NULL) { @@ -198,12 +273,15 @@ DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); ArchivedObjectCache* cache = archived_object_cache(); cache->put(obj, archived_oop); - log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, - p2i(obj), p2i(archived_oop)); + if (log_is_enabled(Debug, cds, heap)) { + ResourceMark rm; + log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", + p2i(obj), p2i(archived_oop), obj->klass()->external_name()); + } } else { log_error(cds, heap)( "Cannot allocate space for object " PTR_FORMAT " in archived heap region", p2i(obj)); vm_exit(1); @@ -255,10 +333,11 @@ log_info(cds)("Run GC done"); } } } +// Returns an objArray that contains all the roots of the archived objects void HeapShared::archive_java_heap_objects(GrowableArray *closed, GrowableArray *open) { if (!is_heap_object_archiving_allowed()) { log_info(cds)( "Archived java heap is not supported as UseG1GC, " @@ -281,14 +360,10 @@ copy_closed_archive_heap_objects(closed); log_info(cds)("Dumping objects to open archive heap region ..."); copy_open_archive_heap_objects(open); - if (MetaspaceShared::use_full_module_graph()) { - ClassLoaderDataShared::init_archived_oops(); - } - destroy_archived_object_cache(); } G1HeapVerifier::verify_archive_regions(); } @@ -333,16 +408,48 @@ archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, num_fmg_open_archive_subgraph_entry_fields, false /* is_closed_archive */, true /* is_full_module_graph */, THREAD); + ClassLoaderDataShared::init_archived_oops(); } + copy_roots(); + G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, os::vm_allocation_granularity()); } +// Copy _pending_archive_roots into an objArray +void HeapShared::copy_roots() { + int length = _pending_roots != NULL ? _pending_roots->length() : 0; + int size = objArrayOopDesc::object_size(length); + Klass *k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass + HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); + + memset(mem, 0, size * BytesPerWord); // Is this correct?? + { + // This is copied from MemAllocator::finish + if (UseBiasedLocking) { + oopDesc::set_mark(mem, k->prototype_header()); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + } + oopDesc::release_set_klass(mem, k); + } + { + // This is copied from ObjArrayAllocator::initialize + arrayOopDesc::set_length(mem, length); + } + + _dumptime_roots = (objArrayOop)mem; + for (int i = 0; i < length; i++) { + _dumptime_roots->obj_at_put(i, _pending_roots->at(i)); + } + log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem); +} + void HeapShared::init_narrow_oop_decoding(address base, int shift) { _narrow_oop_base = base; _narrow_oop_shift = shift; } @@ -378,14 +485,14 @@ void KlassSubGraphInfo::add_subgraph_entry_field( int static_field_offset, oop v, bool is_closed_archive) { assert(DumpSharedSpaces, "dump time only"); if (_subgraph_entry_fields == NULL) { _subgraph_entry_fields = - new(ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass); + new(ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass); } - _subgraph_entry_fields->append((juint)static_field_offset); - _subgraph_entry_fields->append(CompressedOops::narrow_oop_value(v)); + _subgraph_entry_fields->append(static_field_offset); + _subgraph_entry_fields->append(HeapShared::append_root(v)); _subgraph_entry_fields->append(is_closed_archive ? 1 : 0); } // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. // Only objects of boot classes can be included in sub-graph. @@ -441,26 +548,52 @@ log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); } } _subgraph_object_klasses->append_if_missing(relocated_k); + _has_non_early_klasses |= is_non_early_klass(orig_k); +} + +bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { + if (k->is_objArray_klass()) { + k = ObjArrayKlass::cast(k)->bottom_klass(); + } + if (k->is_instance_klass()) { + if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { + ResourceMark rm; + log_info(cds, heap)("non-early: %s", k->external_name()); + return true; + } else { + return false; + } + } else { + return false; + } } // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { _k = info->klass(); _entry_field_records = NULL; _subgraph_object_klasses = NULL; _is_full_module_graph = info->is_full_module_graph(); + _has_non_early_klasses = info->has_non_early_klasses(); + + if (_has_non_early_klasses) { + ResourceMark rm; + log_info(cds, heap)( + "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", + _k->external_name()); + } // populate the entry fields - GrowableArray* entry_fields = info->subgraph_entry_fields(); + GrowableArray* entry_fields = info->subgraph_entry_fields(); if (entry_fields != NULL) { int num_entry_fields = entry_fields->length(); assert(num_entry_fields % 3 == 0, "sanity"); _entry_field_records = - MetaspaceShared::new_ro_array(num_entry_fields); + MetaspaceShared::new_ro_array(num_entry_fields); for (int i = 0 ; i < num_entry_fields; i++) { _entry_field_records->at_put(i, entry_fields->at(i)); } } @@ -529,96 +662,196 @@ void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { _run_time_subgraph_info_table.serialize_header(soc); } +static void verify_the_heap(Klass* k, const char* which) { + if (VerifyArchivedFields) { + ResourceMark rm; + log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", + which, k->external_name()); + VM_Verify verify_op; + VMThread::execute(&verify_op); +#if 0 + // For some reason, this causes jtreg to lock up with + // "jtreg -vmoptions:-XX:+VerifyArchivedFields HelloTest.java" + if (is_init_completed()) { + FlagSetting fs1(VerifyBeforeGC, true); + FlagSetting fs2(VerifyDuringGC, true); + FlagSetting fs3(VerifyAfterGC, true); + Universe::heap()->collect(GCCause::_java_lang_system_gc); + } +#endif + } +} + +// Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() +// have a valid klass. I.e., oopDesc::klass() must have already been resolved. +// +// Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI +// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In +// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. +void HeapShared::resolve_classes(TRAPS) { + if (!is_mapped()) { + return; // nothing to do + } + resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, + num_closed_archive_subgraph_entry_fields, + CHECK); + resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, + num_open_archive_subgraph_entry_fields, + CHECK); + resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, + num_fmg_open_archive_subgraph_entry_fields, + CHECK); +} + +void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], + int num, TRAPS) { + for (int i = 0; i < num; i++) { + ArchivableStaticFieldInfo* info = &fields[i]; + TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); + InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); + assert(k != NULL && k->is_shared_boot_class(), "sanity"); + resolve_classes_for_subgraph_of(k, CHECK); + } +} + +void HeapShared::resolve_classes_for_subgraph_of(Klass* k, TRAPS) { + const ArchivedKlassSubGraphInfoRecord* record = resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + if (record == NULL) { + clear_archived_roots_of(k); + } +} + void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) { - if (!open_archive_heap_region_mapped()) { + if (!is_mapped()) { return; // nothing to do } + + const ArchivedKlassSubGraphInfoRecord* record = + resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); + + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + // None of the field value will be set if there was an exception when initializing the classes. + // The java code will not see any of the archived objects in the + // subgraphs referenced from k in this case. + return; + } + + if (record != NULL) { + init_archived_fields_for(k, record, THREAD); + } +} + +const ArchivedKlassSubGraphInfoRecord* +HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); // Initialize from archived data. Currently this is done only // during VM initialization time. No lock is needed. if (record != NULL) { if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { - return; + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", + k->external_name()); + } + return NULL; } - int i; + if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", + k->external_name()); + } + return NULL; + } + + resolve_or_init(k, do_init, CHECK_NULL); + // Load/link/initialize the klasses of the objects in the subgraph. // NULL class loader is used. Array* klasses = record->subgraph_object_klasses(); if (klasses != NULL) { - for (i = 0; i < klasses->length(); i++) { - Klass* obj_k = klasses->at(i); - Klass* resolved_k = SystemDictionary::resolve_or_null( - (obj_k)->name(), THREAD); - if (resolved_k != obj_k) { - assert(!SystemDictionary::is_well_known_klass(resolved_k), - "shared well-known classes must not be replaced by JVMTI ClassFileLoadHook"); - ResourceMark rm(THREAD); - log_info(cds, heap)("Failed to load subgraph because %s was not loaded from archive", - resolved_k->external_name()); - return; - } - if ((obj_k)->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(obj_k); - ik->initialize(THREAD); - } else if ((obj_k)->is_objArray_klass()) { - ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); - oak->initialize(THREAD); - } + for (int i = 0; i < klasses->length(); i++) { + resolve_or_init(klasses->at(i), do_init, CHECK_NULL); } } + } - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - // None of the field value will be set if there was an exception. - // The java code will not see any of the archived objects in the - // subgraphs referenced from k in this case. - return; + return record; +} + +void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { + if (!do_init) { + if (k->class_loader_data() == NULL) { + Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); + assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); + } + } else { + assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); + if (k->is_instance_klass()) { + InstanceKlass* ik = InstanceKlass::cast(k); + ik->initialize(CHECK); + } else if (k->is_objArray_klass()) { + ObjArrayKlass* oak = ObjArrayKlass::cast(k); + oak->initialize(CHECK); + } + } +} + +void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS) { + verify_the_heap(k, "before"); + + // Load the subgraph entry fields from the record and store them back to + // the corresponding fields within the mirror. + oop m = k->java_mirror(); + Array* entry_field_records = record->entry_field_records(); + if (entry_field_records != NULL) { + int efr_len = entry_field_records->length(); + assert(efr_len % 3 == 0, "sanity"); + for (int i = 0; i < efr_len; i += 3) { + int field_offset = entry_field_records->at(i); + int root_index = entry_field_records->at(i+1); + int is_closed_archive = entry_field_records->at(i+2); + oop v = get_root(root_index, /*clear=*/true); + m->obj_field_put(field_offset, v); + log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); } - // Load the subgraph entry fields from the record and store them back to - // the corresponding fields within the mirror. - oop m = k->java_mirror(); - Array* entry_field_records = record->entry_field_records(); + // Done. Java code can see the archived sub-graphs referenced from k's + // mirror after this point. + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", + k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); + } + } + + verify_the_heap(k, "after "); +} + +void HeapShared::clear_archived_roots_of(Klass* k) { + unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); + const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); + if (record != NULL) { + Array* entry_field_records = record->entry_field_records(); if (entry_field_records != NULL) { int efr_len = entry_field_records->length(); assert(efr_len % 3 == 0, "sanity"); - for (i = 0; i < efr_len;) { - int field_offset = entry_field_records->at(i); - narrowOop nv = CompressedOops::narrow_oop_cast(entry_field_records->at(i+1)); - int is_closed_archive = entry_field_records->at(i+2); - oop v; - if (is_closed_archive == 0) { - // It's an archived object in the open archive heap regions, not shared. - // The object refereced by the field becomes 'known' by GC from this - // point. All objects in the subgraph reachable from the object are - // also 'known' by GC. - v = materialize_archived_object(nv); - } else { - // Shared object in the closed archive heap regions. Decode directly. - assert(!CompressedOops::is_null(nv), "shared object is null"); - v = HeapShared::decode_from_archive(nv); - } - m->obj_field_put(field_offset, v); - i += 3; - - log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); - } - - // Done. Java code can see the archived sub-graphs referenced from k's - // mirror after this point. - if (log_is_enabled(Info, cds, heap)) { - ResourceMark rm; - log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT, - k->external_name(), p2i(k)); + for (int i = 0; i < efr_len; i += 3) { + int root_index = entry_field_records->at(i+1); + clear_root(root_index); } } } }