< prev index next >

src/hotspot/share/memory/heapShared.hpp

Print this page
*** 62,18 ***
    // object sub-graphs can be accessed at runtime.
    GrowableArray<Klass*>* _subgraph_object_klasses;
    // A list of _k's static fields as the entry points of archived sub-graphs.
    // For each entry field, it is a tuple of field_offset, field_value and
    // is_closed_archive flag.
!   GrowableArray<juint>*  _subgraph_entry_fields;
  
    bool _is_full_module_graph;
   public:
    KlassSubGraphInfo(Klass* k, bool is_full_module_graph) :
      _k(k),  _subgraph_object_klasses(NULL),
      _subgraph_entry_fields(NULL),
!     _is_full_module_graph(is_full_module_graph) {}
    ~KlassSubGraphInfo() {
      if (_subgraph_object_klasses != NULL) {
        delete _subgraph_object_klasses;
      }
      if (_subgraph_entry_fields != NULL) {
--- 62,27 ---
    // object sub-graphs can be accessed at runtime.
    GrowableArray<Klass*>* _subgraph_object_klasses;
    // A list of _k's static fields as the entry points of archived sub-graphs.
    // For each entry field, it is a tuple of field_offset, field_value and
    // is_closed_archive flag.
!   GrowableArray<int>* _subgraph_entry_fields;
  
+   // Does this KlassSubGraphInfo belong to the arcived full module graph
    bool _is_full_module_graph;
+ 
+   // Does this KlassSubGraphInfo references any classes that were loaded while
+   // JvmtiExport::is_early_phase()!=true. If so, this KlassSubGraphInfo cannot be
+   // used at runtime if JVMTI ClassFileLoadHook is enabled.
+   bool _has_non_early_klasses;
+   static bool is_non_early_klass(Klass* k);
+ 
   public:
    KlassSubGraphInfo(Klass* k, bool is_full_module_graph) :
      _k(k),  _subgraph_object_klasses(NULL),
      _subgraph_entry_fields(NULL),
!     _is_full_module_graph(is_full_module_graph),
+     _has_non_early_klasses(false) {}
    ~KlassSubGraphInfo() {
      if (_subgraph_object_klasses != NULL) {
        delete _subgraph_object_klasses;
      }
      if (_subgraph_entry_fields != NULL) {

*** 83,45 ***
  
    Klass* klass()            { return _k; }
    GrowableArray<Klass*>* subgraph_object_klasses() {
      return _subgraph_object_klasses;
    }
!   GrowableArray<juint>*  subgraph_entry_fields() {
      return _subgraph_entry_fields;
    }
    void add_subgraph_entry_field(int static_field_offset, oop v,
                                  bool is_closed_archive);
    void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k);
    int num_subgraph_object_klasses() {
      return _subgraph_object_klasses == NULL ? 0 :
             _subgraph_object_klasses->length();
    }
    bool is_full_module_graph() const { return _is_full_module_graph; }
  };
  
  // An archived record of object sub-graphs reachable from static
  // fields within _k's mirror. The record is reloaded from the archive
  // at runtime.
  class ArchivedKlassSubGraphInfoRecord {
   private:
    Klass* _k;
    bool _is_full_module_graph;
  
    // contains pairs of field offset and value for each subgraph entry field
!   Array<juint>* _entry_field_records;
  
    // klasses of objects in archived sub-graphs referenced from the entry points
    // (static fields) in the containing class
    Array<Klass*>* _subgraph_object_klasses;
   public:
    ArchivedKlassSubGraphInfoRecord() :
      _k(NULL), _entry_field_records(NULL), _subgraph_object_klasses(NULL) {}
    void init(KlassSubGraphInfo* info);
    Klass* klass() const { return _k; }
!   Array<juint>*  entry_field_records() const { return _entry_field_records; }
    Array<Klass*>* subgraph_object_klasses() const { return _subgraph_object_klasses; }
    bool is_full_module_graph() const { return _is_full_module_graph; }
  };
  #endif // INCLUDE_CDS_JAVA_HEAP
  
  class HeapShared: AllStatic {
    friend class VerifySharedOopClosure;
--- 92,48 ---
  
    Klass* klass()            { return _k; }
    GrowableArray<Klass*>* subgraph_object_klasses() {
      return _subgraph_object_klasses;
    }
!   GrowableArray<int>* subgraph_entry_fields() {
      return _subgraph_entry_fields;
    }
    void add_subgraph_entry_field(int static_field_offset, oop v,
                                  bool is_closed_archive);
    void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k);
    int num_subgraph_object_klasses() {
      return _subgraph_object_klasses == NULL ? 0 :
             _subgraph_object_klasses->length();
    }
    bool is_full_module_graph() const { return _is_full_module_graph; }
+   bool has_non_early_klasses() const { return _has_non_early_klasses; }
  };
  
  // An archived record of object sub-graphs reachable from static
  // fields within _k's mirror. The record is reloaded from the archive
  // at runtime.
  class ArchivedKlassSubGraphInfoRecord {
   private:
    Klass* _k;
    bool _is_full_module_graph;
+   bool _has_non_early_klasses;
  
    // contains pairs of field offset and value for each subgraph entry field
!   Array<int>* _entry_field_records;
  
    // klasses of objects in archived sub-graphs referenced from the entry points
    // (static fields) in the containing class
    Array<Klass*>* _subgraph_object_klasses;
   public:
    ArchivedKlassSubGraphInfoRecord() :
      _k(NULL), _entry_field_records(NULL), _subgraph_object_klasses(NULL) {}
    void init(KlassSubGraphInfo* info);
    Klass* klass() const { return _k; }
!   Array<int>* entry_field_records() const { return _entry_field_records; }
    Array<Klass*>* subgraph_object_klasses() const { return _subgraph_object_klasses; }
    bool is_full_module_graph() const { return _is_full_module_graph; }
+   bool has_non_early_klasses() const { return _has_non_early_klasses; }
  };
  #endif // INCLUDE_CDS_JAVA_HEAP
  
  class HeapShared: AllStatic {
    friend class VerifySharedOopClosure;

*** 252,11 ***
  
    static bool has_been_seen_during_subgraph_recording(oop obj);
    static void set_has_been_seen_during_subgraph_recording(oop obj);
  
    static void check_module_oop(oop orig_module_obj);
! 
   public:
    static void reset_archived_object_states(TRAPS);
    static void create_archived_object_cache() {
      _archived_object_cache =
        new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache();
--- 264,20 ---
  
    static bool has_been_seen_during_subgraph_recording(oop obj);
    static void set_has_been_seen_during_subgraph_recording(oop obj);
  
    static void check_module_oop(oop orig_module_obj);
!   static void copy_roots();
+ 
+   static void resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
+                                             int num, TRAPS);
+   static void resolve_classes_for_subgraph_of(Klass* k, TRAPS);
+   static void clear_archived_roots_of(Klass* k);
+   static const ArchivedKlassSubGraphInfoRecord*
+                resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS);
+   static void resolve_or_init(Klass* k, bool do_init, TRAPS);
+   static void init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record, TRAPS);
   public:
    static void reset_archived_object_states(TRAPS);
    static void create_archived_object_cache() {
      _archived_object_cache =
        new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache();

*** 293,10 ***
--- 314,36 ---
                                              bool is_closed_archive,
                                              TRAPS);
  
    static ResourceBitMap calculate_oopmap(MemRegion region);
    static void add_to_dumped_interned_strings(oop string);
+ 
+   // We use the HeapShared::roots() array to make sure that objects stored in the
+   // archived heap regions are not prematurely collected. These roots include:
+   //
+   //    - mirrors of classes that have not yet been loaded.
+   //    - ConstantPool::resolved_references() of classes that have not yet been loaded.
+   //    - ArchivedKlassSubGraphInfoRecords that have not been initialized
+   //    - java.lang.Module objects that have not yet been added to the module graph
+   //
+   // When a mirror M becomes referenced by a newly loaded class K, M will be removed
+   // from HeapShared::roots() via clear_root(), and K will be responsible for
+   // keeping M alive.
+   //
+   // Other types of roots are also cleared similarly when they become referenced.
+ 
+   // Dump-time only. Returns the index of the root, which can be used at run time to read
+   // the root using get_root(index, ...).
+   static int append_root(oop obj);
+ 
+   // Dump-time and runtime
+   static objArrayOop roots();
+   static oop get_root(int index, bool clear=false);
+ 
+   // Run-time only
+   static void set_roots(narrowOop roots);
+   static void clear_root(int index);
  #endif // INCLUDE_CDS_JAVA_HEAP
  
   public:
    static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN;
  

*** 325,15 ***
--- 372,19 ---
    }
    static bool open_archive_heap_region_mapped() {
      CDS_JAVA_HEAP_ONLY(return _open_archive_heap_region_mapped);
      NOT_CDS_JAVA_HEAP_RETURN_(false);
    }
+   static bool is_mapped() {
+     return closed_archive_heap_region_mapped() && open_archive_heap_region_mapped();
+   }
  
    static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
  
    inline static bool is_archived_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
  
+   static void resolve_classes(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
    static void initialize_from_archived_subgraph(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
  
    // NarrowOops stored in the CDS archive may use a different encoding scheme
    // than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl.
    // To decode them, do not use CompressedOops::decode_not_null. Use this
< prev index next >