1 /*
   2  * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/systemDictionaryShared.hpp"
  31 #include "logging/log.hpp"
  32 #include "memory/archiveUtils.inline.hpp"
  33 #include "memory/dynamicArchive.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceClosure.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compressedOops.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "prims/jvmtiRedefineClasses.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "runtime/vmOperations.hpp"
  47 #include "utilities/bitMap.inline.hpp"
  48 
  49 #ifndef O_BINARY       // if defined (Win32) use binary files.
  50 #define O_BINARY 0     // otherwise do nothing.
  51 #endif
  52 
  53 class DynamicArchiveBuilder : ResourceObj {
  54   static unsigned my_hash(const address& a) {
  55     return primitive_hash<address>(a);
  56   }
  57   static bool my_equals(const address& a0, const address& a1) {
  58     return primitive_equals<address>(a0, a1);
  59   }
  60   typedef ResourceHashtable<
  61       address, address,
  62       DynamicArchiveBuilder::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
  63       DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address>
  64       16384, ResourceObj::C_HEAP> RelocationTable;
  65   RelocationTable _new_loc_table;
  66 
  67   static intx _buffer_to_target_delta;
  68 
  69   DumpRegion* _current_dump_space;
  70 
  71   static size_t reserve_alignment() {
  72     return os::vm_allocation_granularity();
  73   }
  74 
  75   static const int _total_dump_regions = 3;
  76   int _num_dump_regions_used;
  77 
  78 public:
  79   void mark_pointer(address* ptr_loc) {
  80     ArchivePtrMarker::mark_pointer(ptr_loc);
  81   }
  82 
  83   DumpRegion* current_dump_space() const {
  84     return _current_dump_space;
  85   }
  86 
  87   bool is_in_buffer_space(address p) const {
  88     return (_alloc_bottom <= p && p < (address)current_dump_space()->top());
  89   }
  90 
  91   template <typename T> bool is_in_target_space(T target_obj) const {
  92     address buff_obj = address(target_obj) - _buffer_to_target_delta;
  93     return is_in_buffer_space(buff_obj);
  94   }
  95 
  96   template <typename T> bool is_in_buffer_space(T obj) const {
  97     return is_in_buffer_space(address(obj));
  98   }
  99 
 100   template <typename T> T to_target_no_check(T obj) const {
 101     return (T)(address(obj) + _buffer_to_target_delta);
 102   }
 103 
 104   template <typename T> T to_target(T obj) const {
 105     assert(is_in_buffer_space(obj), "must be");
 106     return (T)(address(obj) + _buffer_to_target_delta);
 107   }
 108 
 109   template <typename T> T get_new_loc(T obj) {
 110     address* pp = _new_loc_table.get((address)obj);
 111     if (pp == NULL) {
 112       // Excluded klasses are not copied
 113       return NULL;
 114     } else {
 115       return (T)*pp;
 116     }
 117   }
 118 
 119   address get_new_loc(MetaspaceClosure::Ref* ref) {
 120     return get_new_loc(ref->obj());
 121   }
 122 
 123   template <typename T> bool has_new_loc(T obj) {
 124     address* pp = _new_loc_table.get((address)obj);
 125     return pp != NULL;
 126   }
 127 
 128   static int dynamic_dump_method_comparator(Method* a, Method* b) {
 129     Symbol* a_name = a->name();
 130     Symbol* b_name = b->name();
 131 
 132     if (a_name == b_name) {
 133       return 0;
 134     }
 135 
 136     if (!MetaspaceShared::is_in_shared_metaspace(a_name)) {
 137       // a_name points to a Symbol in the top archive.
 138       // When this method is called, a_name is still pointing to the output space.
 139       // Translate it to point to the output space, so that it can be compared with
 140       // Symbols in the base archive.
 141       a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta);
 142     }
 143     if (!MetaspaceShared::is_in_shared_metaspace(b_name)) {
 144       b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta);
 145     }
 146 
 147     return a_name->fast_compare(b_name);
 148   }
 149 
 150 protected:
 151   enum FollowMode {
 152     make_a_copy, point_to_it, set_to_null
 153   };
 154 
 155 public:
 156   void copy(MetaspaceClosure::Ref* ref, bool read_only) {
 157     int bytes = ref->size() * BytesPerWord;
 158     address old_obj = ref->obj();
 159     address new_obj = copy_impl(ref, read_only, bytes);
 160 
 161     assert(new_obj != NULL, "must be");
 162     assert(new_obj != old_obj, "must be");
 163     bool isnew = _new_loc_table.put(old_obj, new_obj);
 164     assert(isnew, "must be");
 165   }
 166 
 167   // Make a shallow copy of each eligible MetaspaceObj into the buffer.
 168   class ShallowCopier: public UniqueMetaspaceClosure {
 169     DynamicArchiveBuilder* _builder;
 170     bool _read_only;
 171   public:
 172     ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only)
 173       : _builder(shuffler), _read_only(read_only) {}
 174 
 175     virtual bool do_unique_ref(Ref* orig_obj, bool read_only) {
 176       // This method gets called on each *original* object
 177       // reachable from _builder->iterate_roots(). Each orig_obj is
 178       // called exactly once.
 179       FollowMode mode = _builder->follow_ref(orig_obj);
 180 
 181       if (mode == point_to_it) {
 182         if (read_only == _read_only) {
 183           log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
 184                                   MetaspaceObj::type_name(orig_obj->msotype()));
 185           address p = orig_obj->obj();
 186           bool isnew = _builder->_new_loc_table.put(p, p);
 187           assert(isnew, "must be");
 188         }
 189         return false;
 190       }
 191 
 192       if (mode == set_to_null) {
 193         log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
 194                                 MetaspaceObj::type_name(orig_obj->msotype()));
 195         return false;
 196       }
 197 
 198       if (read_only == _read_only) {
 199         // Make a shallow copy of orig_obj in a buffer (maintained
 200         // by copy_impl in a subclass of DynamicArchiveBuilder).
 201         _builder->copy(orig_obj, read_only);
 202       }
 203       return true;
 204     }
 205   };
 206 
 207   // Relocate all embedded pointer fields within a MetaspaceObj's shallow copy
 208   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
 209     DynamicArchiveBuilder* _builder;
 210   public:
 211     ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler)
 212       : _builder(shuffler) {}
 213 
 214     // This method gets called on each *original* object reachable
 215     // from _builder->iterate_roots(). Each orig_obj is
 216     // called exactly once.
 217     virtual bool do_unique_ref(Ref* orig_ref, bool read_only) {
 218       FollowMode mode = _builder->follow_ref(orig_ref);
 219 
 220       if (mode == point_to_it) {
 221         // We did not make a copy of this object
 222         // and we have nothing to update
 223         assert(_builder->get_new_loc(orig_ref) == NULL ||
 224                _builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be");
 225         return false;
 226       }
 227 
 228       if (mode == set_to_null) {
 229         // We did not make a copy of this object
 230         // and we have nothing to update
 231         assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to");
 232         return false;
 233       }
 234 
 235       // - orig_obj points to the original object.
 236       // - new_obj points to the shallow copy (created by ShallowCopier)
 237       //   of orig_obj. new_obj is NULL if the orig_obj is excluded
 238       address orig_obj = orig_ref->obj();
 239       address new_obj  = _builder->get_new_loc(orig_ref);
 240 
 241       assert(new_obj != orig_obj, "must be");
 242 #ifdef ASSERT
 243       if (new_obj == NULL) {
 244         if (orig_ref->msotype() == MetaspaceObj::ClassType) {
 245           Klass* k = (Klass*)orig_obj;
 246           assert(k->is_instance_klass() &&
 247                  SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)),
 248                  "orig_obj must be excluded Class");
 249         }
 250       }
 251 #endif
 252 
 253       log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s", p2i(new_obj),
 254                               MetaspaceObj::type_name(orig_ref->msotype()));
 255       if (new_obj != NULL) {
 256         EmbeddedRefUpdater updater(_builder, orig_obj, new_obj);
 257         orig_ref->metaspace_pointers_do(&updater);
 258       }
 259 
 260       return true; // keep recursing until every object is visited exactly once.
 261     }
 262 
 263     virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
 264       assert(type == _method_entry_ref, "only special type allowed for now");
 265       address obj = ref->obj();
 266       address new_obj = _builder->get_new_loc(ref);
 267       size_t offset = pointer_delta(p, obj,  sizeof(u1));
 268       intptr_t* new_p = (intptr_t*)(new_obj + offset);
 269       assert(*p == *new_p, "must be a copy");
 270       ArchivePtrMarker::mark_pointer((address*)new_p);
 271     }
 272   };
 273 
 274   class EmbeddedRefUpdater: public MetaspaceClosure {
 275     DynamicArchiveBuilder* _builder;
 276     address _orig_obj;
 277     address _new_obj;
 278   public:
 279     EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) :
 280       _builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {}
 281 
 282     // This method gets called once for each pointer field F of orig_obj.
 283     // We update new_obj->F to point to the new location of orig_obj->F.
 284     //
 285     // Example: Klass*  0x100 is copied to 0x400
 286     //          Symbol* 0x200 is copied to 0x500
 287     //
 288     // Let orig_obj == 0x100; and
 289     //     new_obj  == 0x400; and
 290     //     ((Klass*)orig_obj)->_name == 0x200;
 291     // Then this function effectively assigns
 292     //     ((Klass*)new_obj)->_name = 0x500;
 293     virtual bool do_ref(Ref* ref, bool read_only) {
 294       address new_pointee = NULL;
 295 
 296       if (ref->not_null()) {
 297         address old_pointee = ref->obj();
 298 
 299         FollowMode mode = _builder->follow_ref(ref);
 300         if (mode == point_to_it) {
 301           new_pointee = old_pointee;
 302         } else if (mode == set_to_null) {
 303           new_pointee = NULL;
 304         } else {
 305           new_pointee = _builder->get_new_loc(old_pointee);
 306         }
 307       }
 308 
 309       const char* kind = MetaspaceObj::type_name(ref->msotype());
 310       // offset of this field inside the original object
 311       intx offset = (address)ref->addr() - _orig_obj;
 312       _builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset);
 313 
 314       // We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods
 315       // may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass
 316       // that would contain pointers. Therefore, we must mark the pointers after
 317       // sort_methods(), using PointerMarker.
 318       return false; // Do not recurse.
 319     }
 320   };
 321 
 322   class ExternalRefUpdater: public MetaspaceClosure {
 323     DynamicArchiveBuilder* _builder;
 324 
 325   public:
 326     ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 327 
 328     virtual bool do_ref(Ref* ref, bool read_only) {
 329       // ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer
 330       if (ref->not_null()) {
 331         address new_loc = _builder->get_new_loc(ref);
 332         const char* kind = MetaspaceObj::type_name(ref->msotype());
 333         _builder->update_pointer(ref->addr(), new_loc, kind, 0);
 334         _builder->mark_pointer(ref->addr());
 335       }
 336       return false; // Do not recurse.
 337     }
 338   };
 339 
 340   class PointerMarker: public UniqueMetaspaceClosure {
 341     DynamicArchiveBuilder* _builder;
 342 
 343   public:
 344     PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 345 
 346     virtual bool do_unique_ref(Ref* ref, bool read_only) {
 347       if (_builder->is_in_buffer_space(ref->obj())) {
 348         EmbeddedRefMarker ref_marker(_builder);
 349         ref->metaspace_pointers_do(&ref_marker);
 350         return true; // keep recursing until every buffered object is visited exactly once.
 351       } else {
 352         return false;
 353       }
 354     }
 355   };
 356 
 357   class EmbeddedRefMarker: public MetaspaceClosure {
 358     DynamicArchiveBuilder* _builder;
 359 
 360   public:
 361     EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 362     virtual bool do_ref(Ref* ref, bool read_only) {
 363       if (ref->not_null()) {
 364         _builder->mark_pointer(ref->addr());
 365       }
 366       return false; // Do not recurse.
 367     }
 368   };
 369 
 370   void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) {
 371     // Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj()
 372     if (is_mso_pointer) {
 373       const uintx FLAG_MASK = 0x03;
 374       uintx mask_bits = uintx(*addr) & FLAG_MASK;
 375       value = (address)(uintx(value) | mask_bits);
 376     }
 377 
 378     if (*addr != value) {
 379       log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT,
 380                               kind, int(offset), p2i(addr), p2i(*addr), p2i(value));
 381       *addr = value;
 382     }
 383   }
 384 
 385 private:
 386   GrowableArray<Symbol*>* _symbols; // symbols to dump
 387   GrowableArray<InstanceKlass*>* _klasses; // klasses to dump
 388 
 389   void append(InstanceKlass* k) { _klasses->append(k); }
 390   void append(Symbol* s)        { _symbols->append(s); }
 391 
 392   class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 393     DynamicArchiveBuilder* _builder;
 394     bool _read_only;
 395 
 396   public:
 397     GatherKlassesAndSymbols(DynamicArchiveBuilder* builder)
 398       : _builder(builder) {}
 399 
 400     virtual bool do_unique_ref(Ref* ref, bool read_only) {
 401       if (_builder->follow_ref(ref) != make_a_copy) {
 402         return false;
 403       }
 404       if (ref->msotype() == MetaspaceObj::ClassType) {
 405         Klass* klass = (Klass*)ref->obj();
 406         assert(klass->is_klass(), "must be");
 407         if (klass->is_instance_klass()) {
 408           InstanceKlass* ik = InstanceKlass::cast(klass);
 409           assert(!SystemDictionaryShared::is_excluded_class(ik), "must be");
 410           _builder->append(ik);
 411           _builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
 412         }
 413       } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 414         _builder->append((Symbol*)ref->obj());
 415       }
 416 
 417       int bytes = ref->size() * BytesPerWord;
 418       _builder->_estimated_metsapceobj_bytes += bytes;
 419 
 420       return true;
 421     }
 422   };
 423 
 424   FollowMode follow_ref(MetaspaceClosure::Ref *ref) {
 425     address obj = ref->obj();
 426     if (MetaspaceShared::is_in_shared_metaspace(obj)) {
 427       // Don't dump existing shared metadata again.
 428       return point_to_it;
 429     } else if (ref->msotype() == MetaspaceObj::MethodDataType) {
 430       return set_to_null;
 431     } else {
 432       if (ref->msotype() == MetaspaceObj::ClassType) {
 433         Klass* klass = (Klass*)ref->obj();
 434         assert(klass->is_klass(), "must be");
 435         if (klass->is_instance_klass()) {
 436           InstanceKlass* ik = InstanceKlass::cast(klass);
 437           if (SystemDictionaryShared::is_excluded_class(ik)) {
 438             ResourceMark rm;
 439             log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
 440             return set_to_null;
 441           }
 442         } else if (klass->is_array_klass()) {
 443           // Don't support archiving of array klasses for now.
 444           ResourceMark rm;
 445           log_debug(cds, dynamic)("Skipping class (array): %s", klass->external_name());
 446           return set_to_null;
 447         }
 448       }
 449 
 450       return make_a_copy;
 451     }
 452   }
 453 
 454   address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) {
 455     if (ref->msotype() == MetaspaceObj::ClassType) {
 456       // Save a pointer immediate in front of an InstanceKlass, so
 457       // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
 458       // without building another hashtable. See RunTimeSharedClassInfo::get_for()
 459       // in systemDictionaryShared.cpp.
 460       address obj = ref->obj();
 461       Klass* klass = (Klass*)obj;
 462       if (klass->is_instance_klass()) {
 463         SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 464         current_dump_space()->allocate(sizeof(address), BytesPerWord);
 465       }
 466     }
 467     address p = (address)current_dump_space()->allocate(bytes);
 468     address obj = ref->obj();
 469     log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s",
 470                             p2i(obj), p2i(p), bytes,
 471                             MetaspaceObj::type_name(ref->msotype()));
 472     memcpy(p, obj, bytes);
 473     intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p);
 474     if (cloned_vtable != NULL) {
 475       update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false);
 476       mark_pointer((address*)p);
 477     }
 478 
 479     return (address)p;
 480   }
 481 
 482   DynamicArchiveHeader *_header;
 483   address _alloc_bottom;
 484   address _last_verified_top;
 485   size_t _other_region_used_bytes;
 486 
 487   // Conservative estimate for number of bytes needed for:
 488   size_t _estimated_metsapceobj_bytes;   // all archived MetsapceObj's.
 489   size_t _estimated_hashtable_bytes;     // symbol table and dictionaries
 490   size_t _estimated_trampoline_bytes;    // method entry trampolines
 491 
 492   size_t estimate_archive_size();
 493   size_t estimate_trampoline_size();
 494   size_t estimate_class_file_size();
 495   address reserve_space_and_init_buffer_to_target_delta();
 496   void init_header(address addr);
 497   void release_header();
 498   void make_trampolines();
 499   void make_klasses_shareable();
 500   void sort_methods(InstanceKlass* ik) const;
 501   void set_symbols_permanent();
 502   void relocate_buffer_to_target();
 503   void write_archive(char* serialized_data);
 504 
 505   void init_first_dump_space(address reserved_bottom) {
 506     DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
 507     DumpRegion* rw_space = MetaspaceShared::read_write_dump_space();
 508 
 509     // Use the same MC->RW->RO ordering as in the base archive.
 510     MetaspaceShared::init_shared_dump_space(mc_space);
 511     _current_dump_space = mc_space;
 512     _last_verified_top = reserved_bottom;
 513     _num_dump_regions_used = 1;
 514   }
 515 
 516   void reserve_buffers_for_trampolines() {
 517     size_t n = _estimated_trampoline_bytes;
 518     assert(n >= SharedRuntime::trampoline_size(), "dont want to be empty");
 519     MetaspaceShared::misc_code_space_alloc(n);
 520   }
 521 
 522 public:
 523   DynamicArchiveBuilder() {
 524     _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal);
 525     _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal);
 526 
 527     _estimated_metsapceobj_bytes = 0;
 528     _estimated_hashtable_bytes = 0;
 529     _estimated_trampoline_bytes = 0;
 530 
 531     _num_dump_regions_used = 0;
 532   }
 533 
 534   void start_dump_space(DumpRegion* next) {
 535     address bottom = _last_verified_top;
 536     address top = (address)(current_dump_space()->top());
 537     _other_region_used_bytes += size_t(top - bottom);
 538 
 539     MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs());
 540     _current_dump_space = next;
 541     _num_dump_regions_used ++;
 542 
 543     _last_verified_top = (address)(current_dump_space()->top());
 544   }
 545 
 546   void verify_estimate_size(size_t estimate, const char* which) {
 547     address bottom = _last_verified_top;
 548     address top = (address)(current_dump_space()->top());
 549     size_t used = size_t(top - bottom) + _other_region_used_bytes;
 550     int diff = int(estimate) - int(used);
 551 
 552     log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 553     assert(diff >= 0, "Estimate is too small");
 554 
 555     _last_verified_top = top;
 556     _other_region_used_bytes = 0;
 557   }
 558 
 559   // Do this before and after the archive dump to see if any corruption
 560   // is caused by dynamic dumping.
 561   void verify_universe(const char* info) {
 562     if (VerifyBeforeExit) {
 563       log_info(cds)("Verify %s", info);
 564       HandleMark hm;
 565       // Among other things, this ensures that Eden top is correct.
 566       Universe::heap()->prepare_for_verify();
 567       Universe::verify(info);
 568     }
 569   }
 570 
 571   void doit() {
 572     verify_universe("Before CDS dynamic dump");
 573     DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
 574     SystemDictionaryShared::check_excluded_classes();
 575 
 576     {
 577       ResourceMark rm;
 578       GatherKlassesAndSymbols gatherer(this);
 579 
 580       SystemDictionaryShared::dumptime_classes_do(&gatherer);
 581       SymbolTable::metaspace_pointers_do(&gatherer);
 582       FileMapInfo::metaspace_pointers_do(&gatherer);
 583 
 584       gatherer.finish();
 585     }
 586 
 587     // rw space starts ...
 588     address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
 589     init_header(reserved_bottom);
 590 
 591     CHeapBitMap ptrmap;
 592     ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top());
 593 
 594     reserve_buffers_for_trampolines();
 595     verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
 596 
 597     start_dump_space(MetaspaceShared::read_write_dump_space());
 598 
 599     log_info(cds, dynamic)("Copying %d klasses and %d symbols",
 600                            _klasses->length(), _symbols->length());
 601 
 602     {
 603       assert(current_dump_space() == MetaspaceShared::read_write_dump_space(),
 604              "Current dump space is not rw space");
 605       // shallow-copy RW objects, if necessary
 606       ResourceMark rm;
 607       ShallowCopier rw_copier(this, false);
 608       iterate_roots(&rw_copier);
 609     }
 610 
 611     // ro space starts ...
 612     DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
 613     {
 614       start_dump_space(ro_space);
 615 
 616       // shallow-copy RO objects, if necessary
 617       ResourceMark rm;
 618       ShallowCopier ro_copier(this, true);
 619       iterate_roots(&ro_copier);
 620     }
 621 
 622     {
 623       log_info(cds)("Relocating embedded pointers ... ");
 624       ResourceMark rm;
 625       ShallowCopyEmbeddedRefRelocator emb_reloc(this);
 626       iterate_roots(&emb_reloc);
 627     }
 628 
 629     {
 630       log_info(cds)("Relocating external roots ... ");
 631       ResourceMark rm;
 632       ExternalRefUpdater ext_reloc(this);
 633       iterate_roots(&ext_reloc);
 634     }
 635 
 636     verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs");
 637 
 638     char* serialized_data;
 639     {
 640       set_symbols_permanent();
 641 
 642       // Write the symbol table and system dictionaries to the RO space.
 643       // Note that these tables still point to the *original* objects
 644       // (because they were not processed by ExternalRefUpdater), so
 645       // they would need to call DynamicArchive::original_to_target() to
 646       // get the correct addresses.
 647       assert(current_dump_space() == ro_space, "Must be RO space");
 648       SymbolTable::write_to_archive(false);
 649       SystemDictionaryShared::write_to_archive(false);
 650 
 651       serialized_data = ro_space->top();
 652       WriteClosure wc(ro_space);
 653       SymbolTable::serialize_shared_table_header(&wc, false);
 654       SystemDictionaryShared::serialize_dictionary_headers(&wc, false);
 655     }
 656 
 657     verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
 658 
 659     make_trampolines();
 660     make_klasses_shareable();
 661 
 662     {
 663       log_info(cds)("Adjust lambda proxy class dictionary");
 664       SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
 665     }
 666 
 667     {
 668       log_info(cds)("Final relocation of pointers ... ");
 669       ResourceMark rm;
 670       PointerMarker marker(this);
 671       iterate_roots(&marker);
 672       relocate_buffer_to_target();
 673     }
 674 
 675     write_archive(serialized_data);
 676     release_header();
 677 
 678     assert(_num_dump_regions_used == _total_dump_regions, "must be");
 679     verify_universe("After CDS dynamic dump");
 680   }
 681 
 682   void iterate_roots(MetaspaceClosure* it) {
 683     int i;
 684     int num_klasses = _klasses->length();
 685     for (i = 0; i < num_klasses; i++) {
 686       it->push(&_klasses->at(i));
 687     }
 688 
 689     int num_symbols = _symbols->length();
 690     for (i = 0; i < num_symbols; i++) {
 691       it->push(&_symbols->at(i));
 692     }
 693 
 694     FileMapInfo::metaspace_pointers_do(it);
 695 
 696     // Do not call these again, as we have already collected all the classes and symbols
 697     // that we want to archive. Also, these calls would corrupt the tables when
 698     // ExternalRefUpdater is used.
 699     //
 700     // SystemDictionaryShared::dumptime_classes_do(it);
 701     // SymbolTable::metaspace_pointers_do(it);
 702 
 703     it->finish();
 704   }
 705 };
 706 
 707 intx DynamicArchiveBuilder::_buffer_to_target_delta;
 708 
 709 
 710 size_t DynamicArchiveBuilder::estimate_archive_size() {
 711   // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
 712   _estimated_hashtable_bytes = 0;
 713   _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive();
 714   _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive();
 715 
 716   _estimated_trampoline_bytes = estimate_trampoline_size();
 717 
 718   size_t total = 0;
 719 
 720   total += _estimated_metsapceobj_bytes;
 721   total += _estimated_hashtable_bytes;
 722   total += _estimated_trampoline_bytes;
 723 
 724   // allow fragmentation at the end of each dump region
 725   total += _total_dump_regions * reserve_alignment();
 726 
 727   return align_up(total, reserve_alignment());
 728 }
 729 
 730 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
 731   size_t total = estimate_archive_size();
 732   ReservedSpace rs(total);
 733   if (!rs.is_reserved()) {
 734     log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
 735     vm_direct_exit(0);
 736   }
 737 
 738   address buffer_base = (address)rs.base();
 739   log_info(cds, dynamic)("Reserved output buffer space at    : " PTR_FORMAT " [%d bytes]",
 740                          p2i(buffer_base), (int)total);
 741   MetaspaceShared::set_shared_rs(rs);
 742 
 743   // At run time, we will mmap the dynamic archive at target_space_bottom.
 744   // However, at dump time, we may not be able to write into the target_space,
 745   // as it's occupied by dynamically loaded Klasses. So we allocate a buffer
 746   // at an arbitrary location chosen by the OS. We will write all the dynamically
 747   // archived classes into this buffer. At the final stage of dumping, we relocate
 748   // all pointers that are inside the buffer_space to point to their (runtime)
 749   // target location inside thetarget_space.
 750   address target_space_bottom =
 751     (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment());
 752   _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base);
 753 
 754   log_info(cds, dynamic)("Target archive space at            : " PTR_FORMAT, p2i(target_space_bottom));
 755   log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta));
 756 
 757   return buffer_base;
 758 }
 759 
 760 void DynamicArchiveBuilder::init_header(address reserved_bottom) {
 761   _alloc_bottom = reserved_bottom;
 762   _last_verified_top = reserved_bottom;
 763   _other_region_used_bytes = 0;
 764 
 765   init_first_dump_space(reserved_bottom);
 766 
 767   FileMapInfo* mapinfo = new FileMapInfo(false);
 768   assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
 769   _header = mapinfo->dynamic_header();
 770 
 771   Thread* THREAD = Thread::current();
 772   FileMapInfo* base_info = FileMapInfo::current_info();
 773   _header->set_base_header_crc(base_info->crc());
 774   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
 775     _header->set_base_region_crc(i, base_info->space_crc(i));
 776   }
 777   _header->populate(base_info, os::vm_allocation_granularity());
 778 }
 779 
 780 void DynamicArchiveBuilder::release_header() {
 781   // We temporarily allocated a dynamic FileMapInfo for dumping, which makes it appear we
 782   // have mapped a dynamic archive, but we actually have not. We are in a safepoint now.
 783   // Let's free it so that if class loading happens after we leave the safepoint, nothing
 784   // bad will happen.
 785   assert(SafepointSynchronize::is_at_safepoint(), "must be");
 786   FileMapInfo *mapinfo = FileMapInfo::dynamic_info();
 787   assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be");
 788   delete mapinfo;
 789   assert(!DynamicArchive::is_mapped(), "must be");
 790   _header = NULL;
 791 }
 792 
 793 size_t DynamicArchiveBuilder::estimate_trampoline_size() {
 794   size_t total = 0;
 795   size_t each_method_bytes =
 796     align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
 797     align_up(sizeof(AdapterHandlerEntry*), BytesPerWord);
 798 
 799   for (int i = 0; i < _klasses->length(); i++) {
 800     InstanceKlass* ik = _klasses->at(i);
 801     Array<Method*>* methods = ik->methods();
 802     total += each_method_bytes * methods->length();
 803   }
 804   if (total == 0) {
 805     // We have nothing to archive, but let's avoid having an empty region.
 806     total = SharedRuntime::trampoline_size();
 807   }
 808   return total;
 809 }
 810 
 811 void DynamicArchiveBuilder::make_trampolines() {
 812   DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
 813   char* p = mc_space->base();
 814   for (int i = 0; i < _klasses->length(); i++) {
 815     InstanceKlass* ik = _klasses->at(i);
 816     Array<Method*>* methods = ik->methods();
 817     for (int j = 0; j < methods->length(); j++) {
 818       Method* m = methods->at(j);
 819       address c2i_entry_trampoline = (address)p;
 820       p += SharedRuntime::trampoline_size();
 821       assert(p >= mc_space->base() && p <= mc_space->top(), "must be");
 822       m->set_from_compiled_entry(to_target(c2i_entry_trampoline));
 823 
 824       AdapterHandlerEntry** adapter_trampoline =(AdapterHandlerEntry**)p;
 825       p += sizeof(AdapterHandlerEntry*);
 826       assert(p >= mc_space->base() && p <= mc_space->top(), "must be");
 827       *adapter_trampoline = NULL;
 828       m->set_adapter_trampoline(to_target(adapter_trampoline));
 829     }
 830   }
 831 
 832   guarantee(p <= mc_space->top(), "Estimate of trampoline size is insufficient");
 833 }
 834 
 835 void DynamicArchiveBuilder::make_klasses_shareable() {
 836   int i, count = _klasses->length();
 837 
 838   InstanceKlass::disable_method_binary_search();
 839   for (i = 0; i < count; i++) {
 840     InstanceKlass* ik = _klasses->at(i);
 841     sort_methods(ik);
 842   }
 843 
 844   for (i = 0; i < count; i++) {
 845     InstanceKlass* ik = _klasses->at(i);
 846     ik->assign_class_loader_type();
 847 
 848     MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 849     ik->remove_unshareable_info();
 850 
 851     assert(ik->array_klasses() == NULL, "sanity");
 852 
 853     if (log_is_enabled(Debug, cds, dynamic)) {
 854       ResourceMark rm;
 855       log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name());
 856     }
 857   }
 858 }
 859 
 860 // The address order of the copied Symbols may be different than when the original
 861 // klasses were created. Re-sort all the tables. See Method::sort_methods().
 862 void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
 863   assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
 864   if (MetaspaceShared::is_in_shared_metaspace(ik)) {
 865     // We have reached a supertype that's already in the base archive
 866     return;
 867   }
 868 
 869   if (ik->java_mirror() == NULL) {
 870     // NULL mirror means this class has already been visited and methods are already sorted
 871     return;
 872   }
 873   ik->remove_java_mirror();
 874 
 875   if (log_is_enabled(Debug, cds, dynamic)) {
 876     ResourceMark rm;
 877     log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name());
 878   }
 879 
 880   // Make sure all supertypes have been sorted
 881   sort_methods(ik->java_super());
 882   Array<InstanceKlass*>* interfaces = ik->local_interfaces();
 883   int len = interfaces->length();
 884   for (int i = 0; i < len; i++) {
 885     sort_methods(interfaces->at(i));
 886   }
 887 
 888 #ifdef ASSERT
 889   if (ik->methods() != NULL) {
 890     for (int m = 0; m < ik->methods()->length(); m++) {
 891       Symbol* name = ik->methods()->at(m)->name();
 892       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 893     }
 894   }
 895   if (ik->default_methods() != NULL) {
 896     for (int m = 0; m < ik->default_methods()->length(); m++) {
 897       Symbol* name = ik->default_methods()->at(m)->name();
 898       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 899     }
 900   }
 901 #endif
 902 
 903   Thread* THREAD = Thread::current();
 904   Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator);
 905   if (ik->default_methods() != NULL) {
 906     Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator);
 907   }
 908   ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 909   ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 910 }
 911 
 912 void DynamicArchiveBuilder::set_symbols_permanent() {
 913   int count = _symbols->length();
 914   for (int i=0; i<count; i++) {
 915     Symbol* s = _symbols->at(i);
 916     s->set_permanent();
 917 
 918     if (log_is_enabled(Trace, cds, dynamic)) {
 919       ResourceMark rm;
 920       log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(s)), s->as_quoted_ascii());
 921     }
 922   }
 923 }
 924 
 925 class RelocateBufferToTarget: public BitMapClosure {
 926   DynamicArchiveBuilder *_builder;
 927   address* _buffer_bottom;
 928   intx _buffer_to_target_delta;
 929  public:
 930   RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) :
 931     _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {}
 932 
 933   bool do_bit(size_t offset) {
 934     address* p = _buffer_bottom + offset;
 935     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 936 
 937     address old_ptr = *p;
 938     if (_builder->is_in_buffer_space(old_ptr)) {
 939       address new_ptr = old_ptr + _buffer_to_target_delta;
 940       log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT,
 941                               (int)offset, p2i(p), p2i(_builder->to_target(p)),
 942                               p2i(old_ptr), p2i(new_ptr));
 943       *p = new_ptr;
 944     }
 945 
 946     return true; // keep iterating
 947   }
 948 };
 949 
 950 void DynamicArchiveBuilder::relocate_buffer_to_target() {
 951   RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta);
 952   ArchivePtrMarker::ptrmap()->iterate(&patcher);
 953 
 954   Array<u8>* table = FileMapInfo::saved_shared_path_table().table();
 955   SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size());
 956   _header->set_shared_path_table(runtime_table);
 957 
 958   address relocatable_base = (address)SharedBaseAddress;
 959   address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta;
 960 
 961   intx addr_delta = MetaspaceShared::final_delta();
 962   if (addr_delta == 0) {
 963     ArchivePtrMarker::compact(relocatable_base, relocatable_end);
 964   } else {
 965     // The base archive is NOT mapped at Arguments::default_SharedBaseAddress() (due to ASLR).
 966     // This means that the current content of the dynamic archive is based on a random
 967     // address. Let's relocate all the pointers, so that it can be mapped to
 968     // Arguments::default_SharedBaseAddress() without runtime relocation.
 969     //
 970     // Note: both the base and dynamic archive are written with
 971     // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress()
 972 
 973     // Patch all pointers that are marked by ptrmap within this region,
 974     // where we have just dumped all the metaspace data.
 975     address patch_base = (address)_alloc_bottom;
 976     address patch_end  = (address)current_dump_space()->top();
 977 
 978     // the current value of the pointers to be patched must be within this
 979     // range (i.e., must point to either the top archive (as currently mapped), or to the
 980     // (targeted address of) the top archive)
 981     address valid_old_base = relocatable_base;
 982     address valid_old_end  = relocatable_end;
 983     size_t base_plus_top_size = valid_old_end - valid_old_base;
 984     size_t top_size = patch_end - patch_base;
 985     size_t base_size = base_plus_top_size - top_size;
 986     assert(base_plus_top_size > base_size, "no overflow");
 987     assert(base_plus_top_size > top_size, "no overflow");
 988 
 989     // after patching, the pointers must point inside this range
 990     // (the requested location of the archive, as mapped at runtime).
 991     address valid_new_base = (address)Arguments::default_SharedBaseAddress();
 992     address valid_new_end  = valid_new_base + base_plus_top_size;
 993 
 994     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 995                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes",
 996                    p2i(patch_base + base_size), p2i(patch_end),
 997                    p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta);
 998 
 999     SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
1000                                       valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap());
1001     ArchivePtrMarker::ptrmap()->iterate(&patcher);
1002     ArchivePtrMarker::compact(patcher.max_non_null_offset());
1003   }
1004 }
1005 
1006 void DynamicArchiveBuilder::write_archive(char* serialized_data) {
1007   int num_klasses = _klasses->length();
1008   int num_symbols = _symbols->length();
1009 
1010   _header->set_serialized_data(to_target(serialized_data));
1011 
1012   FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
1013   assert(dynamic_info != NULL, "Sanity");
1014 
1015   // Now write the archived data including the file offsets.
1016   const char* archive_name = Arguments::GetSharedDynamicArchivePath();
1017   dynamic_info->open_for_write(archive_name);
1018   MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL);
1019   dynamic_info->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
1020   dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
1021   dynamic_info->write_header();
1022   dynamic_info->close();
1023 
1024   address base = to_target(_alloc_bottom);
1025   address top  = address(current_dump_space()->top()) + _buffer_to_target_delta;
1026   size_t file_size = pointer_delta(top, base, sizeof(char));
1027 
1028   base += MetaspaceShared::final_delta();
1029   top += MetaspaceShared::final_delta();
1030   log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
1031                          " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
1032                          p2i(base), p2i(top), _header->header_size(), file_size);
1033   log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
1034 }
1035 
1036 
1037 class VM_PopulateDynamicDumpSharedSpace: public VM_Operation {
1038   DynamicArchiveBuilder* _builder;
1039 public:
1040   VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {}
1041   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1042   void doit() {
1043     ResourceMark rm;
1044     if (SystemDictionaryShared::empty_dumptime_table()) {
1045       log_warning(cds, dynamic)("There is no class to be included in the dynamic archive.");
1046       return;
1047     }
1048     if (AllowArchivingWithJavaAgent) {
1049       warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
1050               "for testing purposes only and should not be used in a production environment");
1051     }
1052     FileMapInfo::check_nonempty_dir_in_shared_path_table();
1053 
1054     _builder->doit();
1055   }
1056 };
1057 
1058 
1059 void DynamicArchive::dump() {
1060   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
1061     log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified");
1062     return;
1063   }
1064 
1065   DynamicArchiveBuilder builder;
1066   _builder = &builder;
1067   VM_PopulateDynamicDumpSharedSpace op(&builder);
1068   VMThread::execute(&op);
1069   _builder = NULL;
1070 }
1071 
1072 address DynamicArchive::original_to_buffer_impl(address orig_obj) {
1073   assert(DynamicDumpSharedSpaces, "must be");
1074   address buff_obj = _builder->get_new_loc(orig_obj);
1075   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
1076   assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced");
1077   assert(_builder->is_in_buffer_space(buff_obj), "must be");
1078   return buff_obj;
1079 }
1080 
1081 address DynamicArchive::buffer_to_target_impl(address buff_obj) {
1082   assert(DynamicDumpSharedSpaces, "must be");
1083   assert(_builder->is_in_buffer_space(buff_obj), "must be");
1084   return _builder->to_target(buff_obj);
1085 }
1086 
1087 address DynamicArchive::original_to_target_impl(address orig_obj) {
1088   assert(DynamicDumpSharedSpaces, "must be");
1089   if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) {
1090     // This happens when the top archive points to a Symbol* in the base archive.
1091     return orig_obj;
1092   }
1093   address buff_obj = _builder->get_new_loc(orig_obj);
1094   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
1095   if (buff_obj == orig_obj) {
1096     // We are storing a pointer to an original object into the dynamic buffer. E.g.,
1097     // a Symbol* that used by both the base and top archives.
1098     assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be");
1099     return orig_obj;
1100   } else {
1101     return _builder->to_target(buff_obj);
1102   }
1103 }
1104 
1105 uintx DynamicArchive::object_delta_uintx(void* buff_obj) {
1106   assert(DynamicDumpSharedSpaces, "must be");
1107   address target_obj = _builder->to_target_no_check(address(buff_obj));
1108   assert(uintx(target_obj) >= SharedBaseAddress, "must be");
1109   return uintx(target_obj) - SharedBaseAddress;
1110 }
1111 
1112 bool DynamicArchive::is_in_target_space(void *obj) {
1113   assert(DynamicDumpSharedSpaces, "must be");
1114   return _builder->is_in_target_space(obj);
1115 }
1116 
1117 
1118 DynamicArchiveBuilder* DynamicArchive::_builder = NULL;
1119 
1120 
1121 bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
1122   // Check if the recorded base archive matches with the current one
1123   FileMapInfo* base_info = FileMapInfo::current_info();
1124   DynamicArchiveHeader* dynamic_header = dynamic_info->dynamic_header();
1125 
1126   // Check the header crc
1127   if (dynamic_header->base_header_crc() != base_info->crc()) {
1128     FileMapInfo::fail_continue("Archive header checksum verification failed.");
1129     return false;
1130   }
1131 
1132   // Check each space's crc
1133   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
1134     if (dynamic_header->base_region_crc(i) != base_info->space_crc(i)) {
1135       FileMapInfo::fail_continue("Archive region #%d checksum verification failed.", i);
1136       return false;
1137     }
1138   }
1139 
1140   // Validate the dynamic archived shared path table, and set the global
1141   // _shared_path_table to that.
1142   if (!dynamic_info->validate_shared_path_table()) {
1143     return false;
1144   }
1145   return true;
1146 }