1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/systemDictionaryShared.hpp"
  31 #include "logging/log.hpp"
  32 #include "memory/archiveUtils.hpp"
  33 #include "memory/dynamicArchive.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceClosure.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compressedOops.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "prims/jvmtiRedefineClasses.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "runtime/vmOperations.hpp"
  47 #include "utilities/bitMap.inline.hpp"
  48 
  49 #ifndef O_BINARY       // if defined (Win32) use binary files.
  50 #define O_BINARY 0     // otherwise do nothing.
  51 #endif
  52 
  53 class DynamicArchiveBuilder : ResourceObj {
  54   static unsigned my_hash(const address& a) {
  55     return primitive_hash<address>(a);
  56   }
  57   static bool my_equals(const address& a0, const address& a1) {
  58     return primitive_equals<address>(a0, a1);
  59   }
  60   typedef ResourceHashtable<
  61       address, address,
  62       DynamicArchiveBuilder::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
  63       DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address>
  64       16384, ResourceObj::C_HEAP> RelocationTable;
  65   RelocationTable _new_loc_table;
  66 
  67   intx _buffer_to_target_delta;
  68 
  69   DumpRegion* _current_dump_space;
  70 
  71   static size_t reserve_alignment() {
  72     return Metaspace::reserve_alignment();
  73   }
  74 
  75   static const int _total_dump_regions = 3;
  76   int _num_dump_regions_used;
  77 
  78 public:
  79   void mark_pointer(address* ptr_loc) {
  80     ArchivePtrMarker::mark_pointer(ptr_loc);
  81   }
  82 
  83   DumpRegion* current_dump_space() const {
  84     return _current_dump_space;
  85   }
  86 
  87   bool is_in_buffer_space(address p) const {
  88     return (_alloc_bottom <= p && p < (address)current_dump_space()->top());
  89   }
  90 
  91   template <typename T> bool is_in_target_space(T target_obj) const {
  92     address buff_obj = address(target_obj) - _buffer_to_target_delta;
  93     return is_in_buffer_space(buff_obj);
  94   }
  95 
  96   template <typename T> bool is_in_buffer_space(T obj) const {
  97     return is_in_buffer_space(address(obj));
  98   }
  99 
 100   template <typename T> T to_target_no_check(T obj) const {
 101     return (T)(address(obj) + _buffer_to_target_delta);
 102   }
 103 
 104   template <typename T> T to_target(T obj) const {
 105     assert(is_in_buffer_space(obj), "must be");
 106     return (T)(address(obj) + _buffer_to_target_delta);
 107   }
 108 
 109   template <typename T> T get_new_loc(T obj) {
 110     address* pp = _new_loc_table.get((address)obj);
 111     if (pp == NULL) {
 112       // Excluded klasses are not copied
 113       return NULL;
 114     } else {
 115       return (T)*pp;
 116     }
 117   }
 118 
 119   address get_new_loc(MetaspaceClosure::Ref* ref) {
 120     return get_new_loc(ref->obj());
 121   }
 122 
 123   template <typename T> bool has_new_loc(T obj) {
 124     address* pp = _new_loc_table.get((address)obj);
 125     return pp != NULL;
 126   }
 127 
 128   static intx _method_comparator_name_delta;
 129 
 130   static int dynamic_dump_method_comparator(Method* a, Method* b) {
 131     Symbol* a_name = a->name();
 132     Symbol* b_name = b->name();
 133 
 134     if (!MetaspaceShared::is_in_shared_metaspace(a_name)) {
 135       // a_name points to a Symbol in the top archive.
 136       // When this method is called, a_name is still pointing to the output space.
 137       // Translate it to point to the output space, so that it can be compared with
 138       // Symbols in the base archive.
 139       a_name = (Symbol*)(address(a_name) + _method_comparator_name_delta);
 140     }
 141     if (!MetaspaceShared::is_in_shared_metaspace(b_name)) {
 142       b_name = (Symbol*)(address(b_name) + _method_comparator_name_delta);
 143     }
 144 
 145     return a_name->fast_compare(b_name);
 146   }
 147 
 148 protected:
 149   enum FollowMode {
 150     make_a_copy, point_to_it, set_to_null
 151   };
 152 
 153 public:
 154   void copy(MetaspaceClosure::Ref* ref, bool read_only) {
 155     int bytes = ref->size() * BytesPerWord;
 156     address old_obj = ref->obj();
 157     address new_obj = copy_impl(ref, read_only, bytes);
 158 
 159     assert(new_obj != NULL, "must be");
 160     assert(new_obj != old_obj, "must be");
 161     bool isnew = _new_loc_table.put(old_obj, new_obj);
 162     assert(isnew, "must be");
 163   }
 164 
 165   // Make a shallow copy of each eligible MetaspaceObj into the buffer.
 166   class ShallowCopier: public UniqueMetaspaceClosure {
 167     DynamicArchiveBuilder* _builder;
 168     bool _read_only;
 169   public:
 170     ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only)
 171       : _builder(shuffler), _read_only(read_only) {}
 172 
 173     virtual bool do_unique_ref(Ref* orig_obj, bool read_only) {
 174       // This method gets called on each *original* object
 175       // reachable from _builder->iterate_roots(). Each orig_obj is
 176       // called exactly once.
 177       FollowMode mode = _builder->follow_ref(orig_obj);
 178 
 179       if (mode == point_to_it) {
 180         if (read_only == _read_only) {
 181           log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
 182                                   MetaspaceObj::type_name(orig_obj->msotype()));
 183           address p = orig_obj->obj();
 184           bool isnew = _builder->_new_loc_table.put(p, p);
 185           assert(isnew, "must be");
 186         }
 187         return false;
 188       }
 189 
 190       if (mode == set_to_null) {
 191         log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
 192                                 MetaspaceObj::type_name(orig_obj->msotype()));
 193         return false;
 194       }
 195 
 196       if (read_only == _read_only) {
 197         // Make a shallow copy of orig_obj in a buffer (maintained
 198         // by copy_impl in a subclass of DynamicArchiveBuilder).
 199         _builder->copy(orig_obj, read_only);
 200       }
 201       return true;
 202     }
 203   };
 204 
 205   // Relocate all embedded pointer fields within a MetaspaceObj's shallow copy
 206   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
 207     DynamicArchiveBuilder* _builder;
 208   public:
 209     ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler)
 210       : _builder(shuffler) {}
 211 
 212     // This method gets called on each *original* object reachable
 213     // from _builder->iterate_roots(). Each orig_obj is
 214     // called exactly once.
 215     virtual bool do_unique_ref(Ref* orig_ref, bool read_only) {
 216       FollowMode mode = _builder->follow_ref(orig_ref);
 217 
 218       if (mode == point_to_it) {
 219         // We did not make a copy of this object
 220         // and we have nothing to update
 221         assert(_builder->get_new_loc(orig_ref) == NULL ||
 222                _builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be");
 223         return false;
 224       }
 225 
 226       if (mode == set_to_null) {
 227         // We did not make a copy of this object
 228         // and we have nothing to update
 229         assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to");
 230         return false;
 231       }
 232 
 233       // - orig_obj points to the original object.
 234       // - new_obj points to the shallow copy (created by ShallowCopier)
 235       //   of orig_obj. new_obj is NULL if the orig_obj is excluded
 236       address orig_obj = orig_ref->obj();
 237       address new_obj  = _builder->get_new_loc(orig_ref);
 238 
 239       assert(new_obj != orig_obj, "must be");
 240 #ifdef ASSERT
 241       if (new_obj == NULL) {
 242         if (orig_ref->msotype() == MetaspaceObj::ClassType) {
 243           Klass* k = (Klass*)orig_obj;
 244           assert(k->is_instance_klass() &&
 245                  SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)),
 246                  "orig_obj must be excluded Class");
 247         }
 248       }
 249 #endif
 250 
 251       log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s", p2i(new_obj),
 252                               MetaspaceObj::type_name(orig_ref->msotype()));
 253       if (new_obj != NULL) {
 254         EmbeddedRefUpdater updater(_builder, orig_obj, new_obj);
 255         orig_ref->metaspace_pointers_do(&updater);
 256       }
 257 
 258       return true; // keep recursing until every object is visited exactly once.
 259     }
 260 
 261     virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
 262       assert(type == _method_entry_ref, "only special type allowed for now");
 263       address obj = ref->obj();
 264       address new_obj = _builder->get_new_loc(ref);
 265       size_t offset = pointer_delta(p, obj,  sizeof(u1));
 266       intptr_t* new_p = (intptr_t*)(new_obj + offset);
 267       assert(*p == *new_p, "must be a copy");
 268       ArchivePtrMarker::mark_pointer((address*)new_p);
 269     }
 270   };
 271 
 272   class EmbeddedRefUpdater: public MetaspaceClosure {
 273     DynamicArchiveBuilder* _builder;
 274     address _orig_obj;
 275     address _new_obj;
 276   public:
 277     EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) :
 278       _builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {}
 279 
 280     // This method gets called once for each pointer field F of orig_obj.
 281     // We update new_obj->F to point to the new location of orig_obj->F.
 282     //
 283     // Example: Klass*  0x100 is copied to 0x400
 284     //          Symbol* 0x200 is copied to 0x500
 285     //
 286     // Let orig_obj == 0x100; and
 287     //     new_obj  == 0x400; and
 288     //     ((Klass*)orig_obj)->_name == 0x200;
 289     // Then this function effectively assigns
 290     //     ((Klass*)new_obj)->_name = 0x500;
 291     virtual bool do_ref(Ref* ref, bool read_only) {
 292       address new_pointee = NULL;
 293 
 294       if (ref->not_null()) {
 295         address old_pointee = ref->obj();
 296 
 297         FollowMode mode = _builder->follow_ref(ref);
 298         if (mode == point_to_it) {
 299           new_pointee = old_pointee;
 300         } else if (mode == set_to_null) {
 301           new_pointee = NULL;
 302         } else {
 303           new_pointee = _builder->get_new_loc(old_pointee);
 304         }
 305       }
 306 
 307       const char* kind = MetaspaceObj::type_name(ref->msotype());
 308       // offset of this field inside the original object
 309       intx offset = (address)ref->addr() - _orig_obj;
 310       _builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset);
 311 
 312       // We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods
 313       // may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass
 314       // that would contain pointers. Therefore, we must mark the pointers after
 315       // sort_methods(), using PointerMarker.
 316       return false; // Do not recurse.
 317     }
 318   };
 319 
 320   class ExternalRefUpdater: public MetaspaceClosure {
 321     DynamicArchiveBuilder* _builder;
 322 
 323   public:
 324     ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 325 
 326     virtual bool do_ref(Ref* ref, bool read_only) {
 327       // ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer
 328       if (ref->not_null()) {
 329         address new_loc = _builder->get_new_loc(ref);
 330         const char* kind = MetaspaceObj::type_name(ref->msotype());
 331         _builder->update_pointer(ref->addr(), new_loc, kind, 0);
 332         _builder->mark_pointer(ref->addr());
 333       }
 334       return false; // Do not recurse.
 335     }
 336   };
 337 
 338   class PointerMarker: public UniqueMetaspaceClosure {
 339     DynamicArchiveBuilder* _builder;
 340 
 341   public:
 342     PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 343 
 344     virtual bool do_unique_ref(Ref* ref, bool read_only) {
 345       if (_builder->is_in_buffer_space(ref->obj())) {
 346         EmbeddedRefMarker ref_marker(_builder);
 347         ref->metaspace_pointers_do(&ref_marker);
 348         return true; // keep recursing until every buffered object is visited exactly once.
 349       } else {
 350         return false;
 351       }
 352     }
 353   };
 354 
 355   class EmbeddedRefMarker: public MetaspaceClosure {
 356     DynamicArchiveBuilder* _builder;
 357 
 358   public:
 359     EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
 360     virtual bool do_ref(Ref* ref, bool read_only) {
 361       if (ref->not_null()) {
 362         _builder->mark_pointer(ref->addr());
 363       }
 364       return false; // Do not recurse.
 365     }
 366   };
 367 
 368   void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) {
 369     // Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj()
 370     if (is_mso_pointer) {
 371       const uintx FLAG_MASK = 0x03;
 372       uintx mask_bits = uintx(*addr) & FLAG_MASK;
 373       value = (address)(uintx(value) | mask_bits);
 374     }
 375 
 376     if (*addr != value) {
 377       log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT,
 378                               kind, int(offset), p2i(addr), p2i(*addr), p2i(value));
 379       *addr = value;
 380     }
 381   }
 382 
 383 private:
 384   GrowableArray<Symbol*>* _symbols; // symbols to dump
 385   GrowableArray<InstanceKlass*>* _klasses; // klasses to dump
 386 
 387   void append(InstanceKlass* k) { _klasses->append(k); }
 388   void append(Symbol* s)        { _symbols->append(s); }
 389 
 390   class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
 391     DynamicArchiveBuilder* _builder;
 392     bool _read_only;
 393 
 394   public:
 395     GatherKlassesAndSymbols(DynamicArchiveBuilder* builder)
 396       : _builder(builder) {}
 397 
 398     virtual bool do_unique_ref(Ref* ref, bool read_only) {
 399       if (_builder->follow_ref(ref) != make_a_copy) {
 400         return false;
 401       }
 402       if (ref->msotype() == MetaspaceObj::ClassType) {
 403         Klass* klass = (Klass*)ref->obj();
 404         assert(klass->is_klass(), "must be");
 405         if (klass->is_instance_klass()) {
 406           InstanceKlass* ik = InstanceKlass::cast(klass);
 407           assert(!SystemDictionaryShared::is_excluded_class(ik), "must be");
 408           _builder->append(ik);
 409           _builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
 410         }
 411       } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 412         _builder->append((Symbol*)ref->obj());
 413       }
 414 
 415       int bytes = ref->size() * BytesPerWord;
 416       _builder->_estimated_metsapceobj_bytes += bytes;
 417 
 418       return true;
 419     }
 420   };
 421 
 422   FollowMode follow_ref(MetaspaceClosure::Ref *ref) {
 423     address obj = ref->obj();
 424     if (MetaspaceShared::is_in_shared_metaspace(obj)) {
 425       // Don't dump existing shared metadata again.
 426       return point_to_it;
 427     } else if (ref->msotype() == MetaspaceObj::MethodDataType) {
 428       return set_to_null;
 429     } else {
 430       if (ref->msotype() == MetaspaceObj::ClassType) {
 431         Klass* klass = (Klass*)ref->obj();
 432         assert(klass->is_klass(), "must be");
 433         if (klass->is_instance_klass()) {
 434           InstanceKlass* ik = InstanceKlass::cast(klass);
 435           if (SystemDictionaryShared::is_excluded_class(ik)) {
 436             ResourceMark rm;
 437             log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
 438             return set_to_null;
 439           }
 440         } else if (klass->is_array_klass()) {
 441           // Don't support archiving of array klasses for now.
 442           ResourceMark rm;
 443           log_debug(cds, dynamic)("Skipping class (array): %s", klass->external_name());
 444           return set_to_null;
 445         }
 446       }
 447 
 448       return make_a_copy;
 449     }
 450   }
 451 
 452   address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) {
 453     if (ref->msotype() == MetaspaceObj::ClassType) {
 454       // Save a pointer immediate in front of an InstanceKlass, so
 455       // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
 456       // without building another hashtable. See RunTimeSharedClassInfo::get_for()
 457       // in systemDictionaryShared.cpp.
 458       address obj = ref->obj();
 459       Klass* klass = (Klass*)obj;
 460       if (klass->is_instance_klass()) {
 461         SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 462         current_dump_space()->allocate(sizeof(address), BytesPerWord);
 463       }
 464     }
 465     address p = (address)current_dump_space()->allocate(bytes);
 466     address obj = ref->obj();
 467     log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s",
 468                             p2i(obj), p2i(p), bytes,
 469                             MetaspaceObj::type_name(ref->msotype()));
 470     memcpy(p, obj, bytes);
 471     intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p);
 472     if (cloned_vtable != NULL) {
 473       update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false);
 474       mark_pointer((address*)p);
 475     }
 476 
 477     return (address)p;
 478   }
 479 
 480   DynamicArchiveHeader *_header;
 481   address _alloc_bottom;
 482   address _last_verified_top;
 483   size_t _other_region_used_bytes;
 484 
 485   // Conservative estimate for number of bytes needed for:
 486   size_t _estimated_metsapceobj_bytes;   // all archived MetsapceObj's.
 487   size_t _estimated_hashtable_bytes;     // symbol table and dictionaries
 488   size_t _estimated_trampoline_bytes;    // method entry trampolines
 489 
 490   size_t estimate_archive_size();
 491   size_t estimate_trampoline_size();
 492   size_t estimate_class_file_size();
 493   address reserve_space_and_init_buffer_to_target_delta();
 494   void init_header(address addr);
 495   void make_trampolines();
 496   void make_klasses_shareable();
 497   void sort_methods(InstanceKlass* ik) const;
 498   void set_symbols_permanent();
 499   void relocate_buffer_to_target();
 500   void write_archive(char* serialized_data_start);
 501   void write_regions(FileMapInfo* dynamic_info);
 502 
 503   void init_first_dump_space(address reserved_bottom) {
 504     address first_space_base = reserved_bottom;
 505     DumpRegion* rw_space = MetaspaceShared::read_write_dump_space();
 506     MetaspaceShared::init_shared_dump_space(rw_space, first_space_base);
 507     _current_dump_space = rw_space;
 508     _last_verified_top = first_space_base;
 509     _num_dump_regions_used = 1;
 510   }
 511 
 512 public:
 513   DynamicArchiveBuilder() {
 514     _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal);
 515     _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal);
 516 
 517     _estimated_metsapceobj_bytes = 0;
 518     _estimated_hashtable_bytes = 0;
 519     _estimated_trampoline_bytes = 0;
 520 
 521     _num_dump_regions_used = 0;
 522   }
 523 
 524   void start_dump_space(DumpRegion* next) {
 525     address bottom = _last_verified_top;
 526     address top = (address)(current_dump_space()->top());
 527     _other_region_used_bytes += size_t(top - bottom);
 528 
 529     MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs());
 530     _current_dump_space = next;
 531     _num_dump_regions_used ++;
 532 
 533     _last_verified_top = (address)(current_dump_space()->top());
 534   }
 535 
 536   void verify_estimate_size(size_t estimate, const char* which) {
 537     address bottom = _last_verified_top;
 538     address top = (address)(current_dump_space()->top());
 539     size_t used = size_t(top - bottom) + _other_region_used_bytes;
 540     int diff = int(estimate) - int(used);
 541 
 542     log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 543     assert(diff >= 0, "Estimate is too small");
 544 
 545     _last_verified_top = top;
 546     _other_region_used_bytes = 0;
 547   }
 548 
 549   // Do this before and after the archive dump to see if any corruption
 550   // is caused by dynamic dumping.
 551   void verify_universe(const char* info) {
 552     if (VerifyBeforeExit) {
 553       log_info(cds)("Verify %s", info);
 554       HandleMark hm;
 555       // Among other things, this ensures that Eden top is correct.
 556       Universe::heap()->prepare_for_verify();
 557       Universe::verify(info);
 558     }
 559   }
 560 
 561   void doit() {
 562     verify_universe("Before CDS dynamic dump");
 563     DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
 564     SystemDictionaryShared::check_excluded_classes();
 565 
 566     {
 567       ResourceMark rm;
 568       GatherKlassesAndSymbols gatherer(this);
 569 
 570       SystemDictionaryShared::dumptime_classes_do(&gatherer);
 571       SymbolTable::metaspace_pointers_do(&gatherer);
 572       FileMapInfo::metaspace_pointers_do(&gatherer);
 573 
 574       gatherer.finish();
 575     }
 576 
 577     // rw space starts ...
 578     address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
 579     init_header(reserved_bottom);
 580 
 581     CHeapBitMap ptrmap;
 582     ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top());
 583 
 584     verify_estimate_size(sizeof(DynamicArchiveHeader), "header");
 585 
 586     log_info(cds, dynamic)("Copying %d klasses and %d symbols",
 587                            _klasses->length(), _symbols->length());
 588 
 589     {
 590       assert(current_dump_space() == MetaspaceShared::read_write_dump_space(),
 591              "Current dump space is not rw space");
 592       // shallow-copy RW objects, if necessary
 593       ResourceMark rm;
 594       ShallowCopier rw_copier(this, false);
 595       iterate_roots(&rw_copier);
 596     }
 597 
 598     // ro space starts ...
 599     DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
 600     {
 601       start_dump_space(ro_space);
 602 
 603       // shallow-copy RO objects, if necessary
 604       ResourceMark rm;
 605       ShallowCopier ro_copier(this, true);
 606       iterate_roots(&ro_copier);
 607     }
 608 
 609     {
 610       log_info(cds)("Relocating embedded pointers ... ");
 611       ResourceMark rm;
 612       ShallowCopyEmbeddedRefRelocator emb_reloc(this);
 613       iterate_roots(&emb_reloc);
 614     }
 615 
 616     {
 617       log_info(cds)("Relocating external roots ... ");
 618       ResourceMark rm;
 619       ExternalRefUpdater ext_reloc(this);
 620       iterate_roots(&ext_reloc);
 621     }
 622 
 623     verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs");
 624 
 625     char* serialized_data_start;
 626     {
 627       set_symbols_permanent();
 628 
 629       // Write the symbol table and system dictionaries to the RO space.
 630       // Note that these tables still point to the *original* objects
 631       // (because they were not processed by ExternalRefUpdater), so
 632       // they would need to call DynamicArchive::original_to_target() to
 633       // get the correct addresses.
 634       assert(current_dump_space() == ro_space, "Must be RO space");
 635       SymbolTable::write_to_archive(false);
 636       SystemDictionaryShared::write_to_archive(false);
 637 
 638       serialized_data_start = ro_space->top();
 639       WriteClosure wc(ro_space);
 640       SymbolTable::serialize_shared_table_header(&wc, false);
 641       SystemDictionaryShared::serialize_dictionary_headers(&wc, false);
 642     }
 643 
 644     verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
 645 
 646     // mc space starts ...
 647     {
 648       start_dump_space(MetaspaceShared::misc_code_dump_space());
 649       make_trampolines();
 650     }
 651 
 652     verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
 653 
 654     make_klasses_shareable();
 655 
 656     {
 657       log_info(cds)("Final relocation of pointers ... ");
 658       ResourceMark rm;
 659       PointerMarker marker(this);
 660       iterate_roots(&marker);
 661       relocate_buffer_to_target();
 662     }
 663 
 664     write_archive(serialized_data_start);
 665 
 666     assert(_num_dump_regions_used == _total_dump_regions, "must be");
 667     verify_universe("After CDS dynamic dump");
 668   }
 669 
 670   void iterate_roots(MetaspaceClosure* it) {
 671     int i;
 672     int num_klasses = _klasses->length();
 673     for (i = 0; i < num_klasses; i++) {
 674       it->push(&_klasses->at(i));
 675     }
 676 
 677     int num_symbols = _symbols->length();
 678     for (i = 0; i < num_symbols; i++) {
 679       it->push(&_symbols->at(i));
 680     }
 681 
 682     FileMapInfo::metaspace_pointers_do(it);
 683 
 684     // Do not call these again, as we have already collected all the classes and symbols
 685     // that we want to archive. Also, these calls would corrupt the tables when
 686     // ExternalRefUpdater is used.
 687     //
 688     // SystemDictionaryShared::dumptime_classes_do(it);
 689     // SymbolTable::metaspace_pointers_do(it);
 690 
 691     it->finish();
 692   }
 693 };
 694 
 695 intx DynamicArchiveBuilder::_method_comparator_name_delta;
 696 
 697 
 698 size_t DynamicArchiveBuilder::estimate_archive_size() {
 699   // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
 700   _estimated_hashtable_bytes = 0;
 701   _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive();
 702   _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive();
 703 
 704   _estimated_trampoline_bytes = estimate_trampoline_size();
 705 
 706   size_t total = 0;
 707 
 708   total += _estimated_metsapceobj_bytes;
 709   total += _estimated_hashtable_bytes;
 710   total += _estimated_trampoline_bytes;
 711 
 712   // allow fragmentation at the end of each dump region
 713   total += _total_dump_regions * reserve_alignment();
 714 
 715   return align_up(total, reserve_alignment());
 716 }
 717 
 718 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
 719   size_t total = estimate_archive_size();
 720   ReservedSpace rs = MetaspaceShared::reserve_shared_space(total);
 721   if (!rs.is_reserved()) {
 722     log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
 723     vm_direct_exit(0);
 724   }
 725 
 726   address buffer_base = (address)rs.base();
 727   log_info(cds, dynamic)("Reserved output buffer space at    : " PTR_FORMAT " [%d bytes]",
 728                          p2i(buffer_base), (int)total);
 729   MetaspaceShared::set_shared_rs(rs);
 730 
 731   // At run time, we will mmap the dynamic archive at target_space_bottom.
 732   // However, at dump time, we may not be able to write into the target_space,
 733   // as it's occupied by dynamically loaded Klasses. So we allocate a buffer
 734   // at an arbitrary location chosen by the OS. We will write all the dynamically
 735   // archived classes into this buffer. At the final stage of dumping, we relocate
 736   // all pointers that are inside the buffer_space to point to their (runtime)
 737   // target location inside thetarget_space.
 738   address target_space_bottom =
 739     (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment());
 740   _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base);
 741 
 742   log_info(cds, dynamic)("Target archive space at            : " PTR_FORMAT, p2i(target_space_bottom));
 743   log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta));
 744 
 745   return buffer_base;
 746 }
 747 
 748 void DynamicArchiveBuilder::init_header(address reserved_bottom) {
 749   _alloc_bottom = reserved_bottom;
 750   _last_verified_top = reserved_bottom;
 751   _other_region_used_bytes = 0;
 752 
 753   init_first_dump_space(reserved_bottom);
 754 
 755   FileMapInfo* mapinfo = new FileMapInfo(false);
 756   _header = mapinfo->dynamic_header();
 757 
 758   Thread* THREAD = Thread::current();
 759   FileMapInfo* base_info = FileMapInfo::current_info();
 760   _header->set_base_header_crc(base_info->crc());
 761   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
 762     _header->set_base_region_crc(i, base_info->space_crc(i));
 763   }
 764   _header->populate(base_info, os::vm_allocation_granularity());
 765 }
 766 
 767 size_t DynamicArchiveBuilder::estimate_trampoline_size() {
 768   size_t total = 0;
 769   size_t each_method_bytes =
 770     align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
 771     align_up(sizeof(AdapterHandlerEntry*), BytesPerWord);
 772 
 773   for (int i = 0; i < _klasses->length(); i++) {
 774     InstanceKlass* ik = _klasses->at(i);
 775     Array<Method*>* methods = ik->methods();
 776     total += each_method_bytes * methods->length();
 777   }
 778   if (total == 0) {
 779     // We have nothing to archive, but let's avoid having an empty region.
 780     total = SharedRuntime::trampoline_size();
 781   }
 782   return total;
 783 }
 784 
 785 void DynamicArchiveBuilder::make_trampolines() {
 786   for (int i = 0; i < _klasses->length(); i++) {
 787     InstanceKlass* ik = _klasses->at(i);
 788     Array<Method*>* methods = ik->methods();
 789     for (int j = 0; j < methods->length(); j++) {
 790       Method* m = methods->at(j);
 791       address c2i_entry_trampoline =
 792         (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
 793       m->set_from_compiled_entry(to_target(c2i_entry_trampoline));
 794       AdapterHandlerEntry** adapter_trampoline =
 795         (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*));
 796       *adapter_trampoline = NULL;
 797       m->set_adapter_trampoline(to_target(adapter_trampoline));
 798     }
 799   }
 800 
 801   if (MetaspaceShared::misc_code_dump_space()->used() == 0) {
 802     // We have nothing to archive, but let's avoid having an empty region.
 803     MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
 804   }
 805 }
 806 
 807 void DynamicArchiveBuilder::make_klasses_shareable() {
 808   int i, count = _klasses->length();
 809 
 810   InstanceKlass::disable_method_binary_search();
 811   _method_comparator_name_delta = _buffer_to_target_delta;
 812   for (i = 0; i < count; i++) {
 813     InstanceKlass* ik = _klasses->at(i);
 814     sort_methods(ik);
 815   }
 816 
 817   for (i = 0; i < count; i++) {
 818     InstanceKlass* ik = _klasses->at(i);
 819     ClassLoaderData *cld = ik->class_loader_data();
 820     if (cld->is_boot_class_loader_data()) {
 821       ik->set_class_loader_type(ClassLoader::BOOT_LOADER);
 822     }
 823     else if (cld->is_platform_class_loader_data()) {
 824       ik->set_class_loader_type(ClassLoader::PLATFORM_LOADER);
 825     }
 826     else if (cld->is_system_class_loader_data()) {
 827       ik->set_class_loader_type(ClassLoader::APP_LOADER);
 828     }
 829 
 830     MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik);
 831     ik->remove_unshareable_info();
 832 
 833     assert(ik->array_klasses() == NULL, "sanity");
 834 
 835     if (log_is_enabled(Debug, cds, dynamic)) {
 836       ResourceMark rm;
 837       log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name());
 838     }
 839   }
 840 }
 841 
 842 // The address order of the copied Symbols may be different than when the original
 843 // klasses were created. Re-sort all the tables. See Method::sort_methods().
 844 void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
 845   assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
 846   if (MetaspaceShared::is_in_shared_metaspace(ik)) {
 847     // We have reached a supertype that's already in the base archive
 848     return;
 849   }
 850 
 851   if (ik->java_mirror() == NULL) {
 852     // NULL mirror means this class has already been visited and methods are already sorted
 853     return;
 854   }
 855   ik->remove_java_mirror();
 856 
 857   if (log_is_enabled(Debug, cds, dynamic)) {
 858     ResourceMark rm;
 859     log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name());
 860   }
 861 
 862   // Make sure all supertypes have been sorted
 863   sort_methods(ik->java_super());
 864   Array<InstanceKlass*>* interfaces = ik->local_interfaces();
 865   int len = interfaces->length();
 866   for (int i = 0; i < len; i++) {
 867     sort_methods(interfaces->at(i));
 868   }
 869 
 870 #ifdef ASSERT
 871   if (ik->methods() != NULL) {
 872     for (int m = 0; m < ik->methods()->length(); m++) {
 873       Symbol* name = ik->methods()->at(m)->name();
 874       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 875     }
 876   }
 877   if (ik->default_methods() != NULL) {
 878     for (int m = 0; m < ik->default_methods()->length(); m++) {
 879       Symbol* name = ik->default_methods()->at(m)->name();
 880       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 881     }
 882   }
 883 #endif
 884 
 885   Thread* THREAD = Thread::current();
 886   Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator);
 887   if (ik->default_methods() != NULL) {
 888     Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator);
 889   }
 890   ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 891   ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 892 }
 893 
 894 void DynamicArchiveBuilder::set_symbols_permanent() {
 895   int count = _symbols->length();
 896   for (int i=0; i<count; i++) {
 897     Symbol* s = _symbols->at(i);
 898     s->set_permanent();
 899 
 900     if (log_is_enabled(Trace, cds, dynamic)) {
 901       ResourceMark rm;
 902       log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(s)), s->as_quoted_ascii());
 903     }
 904   }
 905 }
 906 
 907 class RelocateBufferToTarget: public BitMapClosure {
 908   DynamicArchiveBuilder *_builder;
 909   address* _buffer_bottom;
 910   intx _buffer_to_target_delta;
 911  public:
 912   RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) :
 913     _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {}
 914 
 915   bool do_bit(size_t offset) {
 916     address* p = _buffer_bottom + offset;
 917     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 918 
 919     address old_ptr = *p;
 920     if (_builder->is_in_buffer_space(old_ptr)) {
 921       address new_ptr = old_ptr + _buffer_to_target_delta;
 922       log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT,
 923                               (int)offset, p2i(p), p2i(_builder->to_target(p)),
 924                               p2i(old_ptr), p2i(new_ptr));
 925       *p = new_ptr;
 926     }
 927 
 928     return true; // keep iterating
 929   }
 930 };
 931 
 932 void DynamicArchiveBuilder::relocate_buffer_to_target() {
 933   RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta);
 934   ArchivePtrMarker::ptrmap()->iterate(&patcher);
 935 
 936   Array<u8>* table = FileMapInfo::shared_path_table().table();
 937   SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size());
 938   _header->set_shared_path_table(runtime_table);
 939 
 940   address relocatable_base = (address)SharedBaseAddress;
 941   address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta;
 942 
 943   ArchivePtrMarker::compact(relocatable_base, relocatable_end);
 944 
 945   intx addr_delta = MetaspaceShared::final_delta();
 946   if (addr_delta != 0) {
 947     // Patch all pointers that are marked by ptrmap within this region,
 948     // where we have just dumped all the metaspace data.
 949     address patch_base = (address)_alloc_bottom;
 950     address patch_end  = (address)current_dump_space()->top();
 951 
 952     // debug only -- the current value of the pointers to be patched must be within this
 953     // range (i.e., must point to either the top archive (as currently mapped), or to the
 954     // (targeted address of) the top archive)
 955     address valid_old_base = relocatable_base;
 956     address valid_old_end  = relocatable_end;
 957     size_t base_plus_top_size = valid_old_end - valid_old_base;
 958     size_t top_size = patch_end - patch_base;
 959     size_t base_size = base_plus_top_size - top_size;
 960     assert(base_plus_top_size > base_size, "no overflow");
 961     assert(base_plus_top_size > top_size, "no overflow");
 962     
 963     // debug only -- after patching, the pointers must point inside this range
 964     // (the requested location of the archive, as mapped at runtime).
 965     address valid_new_base = (address)Arguments::default_SharedBaseAddress();
 966     address valid_new_end  = valid_new_base + base_plus_top_size;
 967 
 968     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 969                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes", p2i(patch_base + base_size), p2i(patch_end),
 970                    p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta);
 971 
 972     SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
 973                                 valid_new_base, valid_new_end, addr_delta);
 974     ArchivePtrMarker::ptrmap()->iterate(&patcher);
 975   }
 976 }
 977 
 978 void DynamicArchiveBuilder::write_regions(FileMapInfo* dynamic_info) {
 979   dynamic_info->write_region(MetaspaceShared::rw,
 980                              MetaspaceShared::read_write_dump_space()->base(),
 981                              MetaspaceShared::read_write_dump_space()->used(),
 982                              /*read_only=*/false,/*allow_exec=*/false);
 983   dynamic_info->write_region(MetaspaceShared::ro,
 984                              MetaspaceShared::read_only_dump_space()->base(),
 985                              MetaspaceShared::read_only_dump_space()->used(),
 986                              /*read_only=*/true, /*allow_exec=*/false);
 987   dynamic_info->write_region(MetaspaceShared::mc,
 988                              MetaspaceShared::misc_code_dump_space()->base(),
 989                              MetaspaceShared::misc_code_dump_space()->used(),
 990                              /*read_only=*/false,/*allow_exec=*/true);
 991   dynamic_info->write_bitmap_region(ArchivePtrMarker::ptrmap());
 992 }
 993 
 994 void DynamicArchiveBuilder::write_archive(char* serialized_data_start) {
 995   int num_klasses = _klasses->length();
 996   int num_symbols = _symbols->length();
 997 
 998   _header->set_serialized_data_start(to_target(serialized_data_start));
 999 
1000   FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
1001   assert(dynamic_info != NULL, "Sanity");
1002 
1003   // Now write the archived data including the file offsets.
1004   const char* archive_name = Arguments::GetSharedDynamicArchivePath();
1005   dynamic_info->open_for_write(archive_name);
1006   write_regions(dynamic_info);
1007   dynamic_info->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
1008   dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
1009   dynamic_info->write_header();
1010   dynamic_info->close();
1011 
1012   address base = to_target(_alloc_bottom);
1013   address top  = address(current_dump_space()->top()) + _buffer_to_target_delta;
1014   size_t file_size = pointer_delta(top, base, sizeof(char));
1015 
1016   base += MetaspaceShared::final_delta();
1017   top += MetaspaceShared::final_delta();
1018   log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
1019                          " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
1020                          p2i(base), p2i(top), _header->header_size(), file_size);
1021   log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
1022 }
1023 
1024 
1025 class VM_PopulateDynamicDumpSharedSpace: public VM_Operation {
1026   DynamicArchiveBuilder* _builder;
1027 public:
1028   VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {}
1029   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1030   void doit() {
1031     ResourceMark rm;
1032     if (SystemDictionaryShared::empty_dumptime_table()) {
1033       log_warning(cds, dynamic)("There is no class to be included in the dynamic archive.");
1034       return;
1035     }
1036     if (AllowArchivingWithJavaAgent) {
1037       warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
1038               "for testing purposes only and should not be used in a production environment");
1039     }
1040     FileMapInfo::check_nonempty_dir_in_shared_path_table();
1041 
1042     _builder->doit();
1043   }
1044 };
1045 
1046 
1047 void DynamicArchive::dump() {
1048   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
1049     log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified");
1050     return;
1051   }
1052 
1053   DynamicArchiveBuilder builder;
1054   _builder = &builder;
1055   VM_PopulateDynamicDumpSharedSpace op(&builder);
1056   VMThread::execute(&op);
1057   _builder = NULL;
1058 }
1059 
1060 address DynamicArchive::original_to_buffer_impl(address orig_obj) {
1061   assert(DynamicDumpSharedSpaces, "must be");
1062   address buff_obj = _builder->get_new_loc(orig_obj);
1063   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
1064   assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced");
1065   assert(_builder->is_in_buffer_space(buff_obj), "must be");
1066   return buff_obj;
1067 }
1068 
1069 address DynamicArchive::buffer_to_target_impl(address buff_obj) {
1070   assert(DynamicDumpSharedSpaces, "must be");
1071   assert(_builder->is_in_buffer_space(buff_obj), "must be");
1072   return _builder->to_target(buff_obj);
1073 }
1074 
1075 address DynamicArchive::original_to_target_impl(address orig_obj) {
1076   assert(DynamicDumpSharedSpaces, "must be");
1077   if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) {
1078     // This happens when the top archive points to a Symbol* in the base archive.
1079     return orig_obj;
1080   }
1081   address buff_obj = _builder->get_new_loc(orig_obj);
1082   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
1083   if (buff_obj == orig_obj) {
1084     // We are storing a pointer to an original object into the dynamic buffer. E.g.,
1085     // a Symbol* that used by both the base and top archives.
1086     assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be");
1087     return orig_obj;
1088   } else {
1089     return _builder->to_target(buff_obj);
1090   }
1091 }
1092 
1093 uintx DynamicArchive::object_delta_uintx(void* buff_obj) {
1094   assert(DynamicDumpSharedSpaces, "must be");
1095   address target_obj = _builder->to_target_no_check(address(buff_obj));
1096   assert(uintx(target_obj) >= SharedBaseAddress, "must be");
1097   return uintx(target_obj) - SharedBaseAddress;
1098 }
1099 
1100 bool DynamicArchive::is_in_target_space(void *obj) {
1101   assert(DynamicDumpSharedSpaces, "must be");
1102   return _builder->is_in_target_space(obj);
1103 }
1104 
1105 
1106 DynamicArchiveBuilder* DynamicArchive::_builder = NULL;
1107 
1108 
1109 bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
1110   // Check if the recorded base archive matches with the current one
1111   FileMapInfo* base_info = FileMapInfo::current_info();
1112   DynamicArchiveHeader* dynamic_header = dynamic_info->dynamic_header();
1113 
1114   // Check the header crc
1115   if (dynamic_header->base_header_crc() != base_info->crc()) {
1116     FileMapInfo::fail_continue("Archive header checksum verification failed.");
1117     return false;
1118   }
1119 
1120   // Check each space's crc
1121   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
1122     if (dynamic_header->base_region_crc(i) != base_info->space_crc(i)) {
1123       FileMapInfo::fail_continue("Archive region #%d checksum verification failed.", i);
1124       return false;
1125     }
1126   }
1127 
1128   // Validate the dynamic archived shared path table, and set the global
1129   // _shared_path_table to that.
1130   if (!dynamic_info->validate_shared_path_table()) {
1131     return false;
1132   }
1133   return true;
1134 }