1 /*
   2  * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionaryShared.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/archiveBuilder.hpp"
  32 #include "memory/archiveUtils.inline.hpp"
  33 #include "memory/dynamicArchive.hpp"
  34 #include "memory/metaspaceClosure.hpp"
  35 #include "memory/metaspaceShared.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "runtime/os.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/vmThread.hpp"
  40 #include "runtime/vmOperations.hpp"
  41 #include "utilities/align.hpp"
  42 #include "utilities/bitMap.inline.hpp"
  43 
  44 
  45 class DynamicArchiveBuilder : public ArchiveBuilder {
  46 public:
  47   static intx _buffer_to_target_delta;
  48   DumpRegion* _current_dump_space;
  49 
  50   static size_t reserve_alignment() {
  51     return os::vm_allocation_granularity();
  52   }
  53 
  54   static const int _total_dump_regions = 3;
  55   int _num_dump_regions_used;
  56 
  57 public:
  58   void mark_pointer(address* ptr_loc) {
  59     ArchivePtrMarker::mark_pointer(ptr_loc);
  60   }
  61 
  62   DumpRegion* current_dump_space() const {
  63     return _current_dump_space;
  64   }
  65 
  66   bool is_in_buffer_space(address p) const {
  67     return (_alloc_bottom <= p && p < (address)current_dump_space()->top());
  68   }
  69 
  70   template <typename T> bool is_in_target_space(T target_obj) const {
  71     address buff_obj = address(target_obj) - _buffer_to_target_delta;
  72     return is_in_buffer_space(buff_obj);
  73   }
  74 
  75   template <typename T> bool is_in_buffer_space(T obj) const {
  76     return is_in_buffer_space(address(obj));
  77   }
  78 
  79   template <typename T> T to_target_no_check(T obj) const {
  80     return (T)(address(obj) + _buffer_to_target_delta);
  81   }
  82 
  83   template <typename T> T to_target(T obj) const {
  84     assert(is_in_buffer_space(obj), "must be");
  85     return (T)(address(obj) + _buffer_to_target_delta);
  86   }
  87 
  88   template <typename T> T get_dumped_addr(T obj) {
  89     return (T)ArchiveBuilder::get_dumped_addr((address)obj);
  90   }
  91 
  92   static int dynamic_dump_method_comparator(Method* a, Method* b) {
  93     Symbol* a_name = a->name();
  94     Symbol* b_name = b->name();
  95 
  96     if (a_name == b_name) {
  97       return 0;
  98     }
  99 
 100     if (!MetaspaceShared::is_in_shared_metaspace(a_name)) {
 101       // a_name points to a Symbol in the top archive.
 102       // When this method is called, a_name is still pointing to the output space.
 103       // Translate it to point to the output space, so that it can be compared with
 104       // Symbols in the base archive.
 105       a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta);
 106     }
 107     if (!MetaspaceShared::is_in_shared_metaspace(b_name)) {
 108       b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta);
 109     }
 110 
 111     return a_name->fast_compare(b_name);
 112   }
 113 
 114 public:
 115   DynamicArchiveHeader *_header;
 116   address _alloc_bottom;
 117   address _last_verified_top;
 118   size_t _other_region_used_bytes;
 119 
 120   // Conservative estimate for number of bytes needed for:
 121   size_t _estimated_hashtable_bytes;     // symbol table and dictionaries
 122   size_t _estimated_trampoline_bytes;    // method entry trampolines
 123 
 124   size_t estimate_archive_size();
 125   size_t estimate_trampoline_size();
 126   size_t estimate_class_file_size();
 127   address reserve_space_and_init_buffer_to_target_delta();
 128   void init_header(address addr);
 129   void release_header();
 130   void make_trampolines();
 131   void make_klasses_shareable();
 132   void sort_methods(InstanceKlass* ik) const;
 133   void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
 134   void relocate_buffer_to_target();
 135   void write_archive(char* serialized_data);
 136 
 137   void init_first_dump_space(address reserved_bottom) {
 138     DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
 139     DumpRegion* rw_space = MetaspaceShared::read_write_dump_space();
 140 
 141     // Use the same MC->RW->RO ordering as in the base archive.
 142     MetaspaceShared::init_shared_dump_space(mc_space);
 143     _current_dump_space = mc_space;
 144     _last_verified_top = reserved_bottom;
 145     _num_dump_regions_used = 1;
 146   }
 147 
 148   void reserve_buffers_for_trampolines() {
 149     size_t n = _estimated_trampoline_bytes;
 150     assert(n >= SharedRuntime::trampoline_size(), "dont want to be empty");
 151     MetaspaceShared::misc_code_space_alloc(n);
 152   }
 153 
 154 public:
 155   DynamicArchiveBuilder() : ArchiveBuilder(NULL, NULL) {
 156     _estimated_hashtable_bytes = 0;
 157     _estimated_trampoline_bytes = 0;
 158 
 159     _num_dump_regions_used = 0;
 160   }
 161 
 162   void start_dump_space(DumpRegion* next) {
 163     address bottom = _last_verified_top;
 164     address top = (address)(current_dump_space()->top());
 165     _other_region_used_bytes += size_t(top - bottom);
 166 
 167     MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs());
 168     _current_dump_space = next;
 169     _num_dump_regions_used ++;
 170 
 171     _last_verified_top = (address)(current_dump_space()->top());
 172   }
 173 
 174   void verify_estimate_size(size_t estimate, const char* which) {
 175     address bottom = _last_verified_top;
 176     address top = (address)(current_dump_space()->top());
 177     size_t used = size_t(top - bottom) + _other_region_used_bytes;
 178     int diff = int(estimate) - int(used);
 179 
 180     log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
 181     assert(diff >= 0, "Estimate is too small");
 182 
 183     _last_verified_top = top;
 184     _other_region_used_bytes = 0;
 185   }
 186 
 187   // Do this before and after the archive dump to see if any corruption
 188   // is caused by dynamic dumping.
 189   void verify_universe(const char* info) {
 190     if (VerifyBeforeExit) {
 191       log_info(cds)("Verify %s", info);
 192       // Among other things, this ensures that Eden top is correct.
 193       Universe::heap()->prepare_for_verify();
 194       Universe::verify(info);
 195     }
 196   }
 197 
 198   void doit() {
 199     verify_universe("Before CDS dynamic dump");
 200     DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
 201     SystemDictionaryShared::check_excluded_classes();
 202 
 203     gather_klasses_and_symbols();
 204 
 205     // rw space starts ...
 206     address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
 207     set_dump_regions(MetaspaceShared::read_write_dump_space(), MetaspaceShared::read_only_dump_space());
 208     init_header(reserved_bottom);
 209 
 210     CHeapBitMap ptrmap;
 211     ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top());
 212 
 213     reserve_buffers_for_trampolines();
 214     verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
 215 
 216     gather_source_objs();
 217     start_dump_space(MetaspaceShared::read_write_dump_space());
 218 
 219     log_info(cds, dynamic)("Copying %d klasses and %d symbols",
 220                            klasses()->length(), symbols()->length());
 221 
 222     dump_rw_region();
 223 
 224     // ro space starts ...
 225     DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
 226     start_dump_space(ro_space);
 227     dump_ro_region();
 228     relocate_pointers();
 229 
 230     verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs");
 231 
 232     char* serialized_data;
 233     {
 234       // Write the symbol table and system dictionaries to the RO space.
 235       // Note that these tables still point to the *original* objects, so
 236       // they would need to call DynamicArchive::original_to_target() to
 237       // get the correct addresses.
 238       assert(current_dump_space() == ro_space, "Must be RO space");
 239       SymbolTable::write_to_archive(false);
 240       SystemDictionaryShared::write_to_archive(false);
 241 
 242       serialized_data = ro_space->top();
 243       WriteClosure wc(ro_space);
 244       SymbolTable::serialize_shared_table_header(&wc, false);
 245       SystemDictionaryShared::serialize_dictionary_headers(&wc, false);
 246     }
 247 
 248     verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
 249 
 250     make_trampolines();
 251 
 252     log_info(cds)("Make classes shareable");
 253     make_klasses_shareable();
 254 
 255     log_info(cds)("Adjust lambda proxy class dictionary");
 256     SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
 257 
 258     log_info(cds)("Final relocation of pointers ... ");
 259     relocate_buffer_to_target();
 260 
 261     write_archive(serialized_data);
 262     release_header();
 263 
 264     assert(_num_dump_regions_used == _total_dump_regions, "must be");
 265     verify_universe("After CDS dynamic dump");
 266   }
 267 
 268   virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
 269     if (!is_relocating_pointers) {
 270       SystemDictionaryShared::dumptime_classes_do(it);
 271       SymbolTable::metaspace_pointers_do(it);
 272     }
 273     FileMapInfo::metaspace_pointers_do(it);
 274   }
 275 };
 276 
 277 intx DynamicArchiveBuilder::_buffer_to_target_delta;
 278 
 279 size_t DynamicArchiveBuilder::estimate_archive_size() {
 280   // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
 281   _estimated_hashtable_bytes = 0;
 282   _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive();
 283   _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive();
 284 
 285   _estimated_trampoline_bytes = estimate_trampoline_size();
 286 
 287   size_t total = 0;
 288 
 289   total += _estimated_metsapceobj_bytes;
 290   total += _estimated_hashtable_bytes;
 291   total += _estimated_trampoline_bytes;
 292 
 293   // allow fragmentation at the end of each dump region
 294   total += _total_dump_regions * reserve_alignment();
 295 
 296   return align_up(total, reserve_alignment());
 297 }
 298 
 299 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
 300   size_t total = estimate_archive_size();
 301   ReservedSpace rs(total);
 302   if (!rs.is_reserved()) {
 303     log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
 304     vm_direct_exit(0);
 305   }
 306 
 307   address buffer_base = (address)rs.base();
 308   log_info(cds, dynamic)("Reserved output buffer space at    : " PTR_FORMAT " [%d bytes]",
 309                          p2i(buffer_base), (int)total);
 310   MetaspaceShared::set_shared_rs(rs);
 311 
 312   // At run time, we will mmap the dynamic archive at target_space_bottom.
 313   // However, at dump time, we may not be able to write into the target_space,
 314   // as it's occupied by dynamically loaded Klasses. So we allocate a buffer
 315   // at an arbitrary location chosen by the OS. We will write all the dynamically
 316   // archived classes into this buffer. At the final stage of dumping, we relocate
 317   // all pointers that are inside the buffer_space to point to their (runtime)
 318   // target location inside thetarget_space.
 319   address target_space_bottom =
 320     (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment());
 321   _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base);
 322 
 323   log_info(cds, dynamic)("Target archive space at            : " PTR_FORMAT, p2i(target_space_bottom));
 324   log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta));
 325 
 326   return buffer_base;
 327 }
 328 
 329 void DynamicArchiveBuilder::init_header(address reserved_bottom) {
 330   _alloc_bottom = reserved_bottom;
 331   _last_verified_top = reserved_bottom;
 332   _other_region_used_bytes = 0;
 333 
 334   init_first_dump_space(reserved_bottom);
 335 
 336   FileMapInfo* mapinfo = new FileMapInfo(false);
 337   assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
 338   _header = mapinfo->dynamic_header();
 339 
 340   Thread* THREAD = Thread::current();
 341   FileMapInfo* base_info = FileMapInfo::current_info();
 342   _header->set_base_header_crc(base_info->crc());
 343   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
 344     _header->set_base_region_crc(i, base_info->space_crc(i));
 345   }
 346   _header->populate(base_info, os::vm_allocation_granularity());
 347 }
 348 
 349 void DynamicArchiveBuilder::release_header() {
 350   // We temporarily allocated a dynamic FileMapInfo for dumping, which makes it appear we
 351   // have mapped a dynamic archive, but we actually have not. We are in a safepoint now.
 352   // Let's free it so that if class loading happens after we leave the safepoint, nothing
 353   // bad will happen.
 354   assert(SafepointSynchronize::is_at_safepoint(), "must be");
 355   FileMapInfo *mapinfo = FileMapInfo::dynamic_info();
 356   assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be");
 357   delete mapinfo;
 358   assert(!DynamicArchive::is_mapped(), "must be");
 359   _header = NULL;
 360 }
 361 
 362 size_t DynamicArchiveBuilder::estimate_trampoline_size() {
 363   size_t total = 0;
 364   size_t each_method_bytes =
 365     align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
 366     align_up(sizeof(AdapterHandlerEntry*), BytesPerWord);
 367 
 368   for (int i = 0; i < klasses()->length(); i++) {
 369     Klass* k = klasses()->at(i);
 370     if (k->is_instance_klass()) {
 371       Array<Method*>* methods = InstanceKlass::cast(k)->methods();
 372       total += each_method_bytes * methods->length();
 373     }
 374   }
 375   if (total == 0) {
 376     // We have nothing to archive, but let's avoid having an empty region.
 377     total = SharedRuntime::trampoline_size();
 378   }
 379   return total;
 380 }
 381 
 382 void DynamicArchiveBuilder::make_trampolines() {
 383   DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
 384   char* p = mc_space->base();
 385   for (int i = 0; i < klasses()->length(); i++) {
 386     Klass* k = klasses()->at(i);
 387     if (!k->is_instance_klass()) {
 388       continue;
 389     }
 390     InstanceKlass* ik = InstanceKlass::cast(k);
 391     Array<Method*>* methods = ik->methods();
 392     for (int j = 0; j < methods->length(); j++) {
 393       Method* m = methods->at(j);
 394       address c2i_entry_trampoline = (address)p;
 395       p += SharedRuntime::trampoline_size();
 396       assert(p >= mc_space->base() && p <= mc_space->top(), "must be");
 397       m->set_from_compiled_entry(to_target(c2i_entry_trampoline));
 398 
 399       AdapterHandlerEntry** adapter_trampoline =(AdapterHandlerEntry**)p;
 400       p += sizeof(AdapterHandlerEntry*);
 401       assert(p >= mc_space->base() && p <= mc_space->top(), "must be");
 402       *adapter_trampoline = NULL;
 403       m->set_adapter_trampoline(to_target(adapter_trampoline));
 404     }
 405   }
 406 
 407   guarantee(p <= mc_space->top(), "Estimate of trampoline size is insufficient");
 408 }
 409 
 410 void DynamicArchiveBuilder::make_klasses_shareable() {
 411   int i, count = klasses()->length();
 412 
 413   InstanceKlass::disable_method_binary_search();
 414   for (i = 0; i < count; i++) {
 415     Klass* k = klasses()->at(i);
 416     if (k->is_instance_klass()) {
 417       sort_methods(InstanceKlass::cast(k));
 418     }
 419   }
 420 
 421   for (i = 0; i < count; i++) {
 422     Klass* k = klasses()->at(i);
 423     if (!k->is_instance_klass()) {
 424       continue;
 425     }
 426     InstanceKlass* ik = InstanceKlass::cast(k);
 427     ik->assign_class_loader_type();
 428 
 429     MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
 430     ik->remove_unshareable_info();
 431 
 432     assert(ik->array_klasses() == NULL, "sanity");
 433 
 434     if (log_is_enabled(Debug, cds, dynamic)) {
 435       ResourceMark rm;
 436       log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name());
 437     }
 438   }
 439 }
 440 
 441 // The address order of the copied Symbols may be different than when the original
 442 // klasses were created. Re-sort all the tables. See Method::sort_methods().
 443 void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
 444   assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
 445   if (MetaspaceShared::is_in_shared_metaspace(ik)) {
 446     // We have reached a supertype that's already in the base archive
 447     return;
 448   }
 449 
 450   if (ik->java_mirror() == NULL) {
 451     // NULL mirror means this class has already been visited and methods are already sorted
 452     return;
 453   }
 454   ik->remove_java_mirror();
 455 
 456   if (log_is_enabled(Debug, cds, dynamic)) {
 457     ResourceMark rm;
 458     log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name());
 459   }
 460 
 461   // Method sorting may re-layout the [iv]tables, which would change the offset(s)
 462   // of the locations in an InstanceKlass that would contain pointers. Let's clear
 463   // all the existing pointer marking bits, and re-mark the pointers after sorting.
 464   remark_pointers_for_instance_klass(ik, false);
 465 
 466   // Make sure all supertypes have been sorted
 467   sort_methods(ik->java_super());
 468   Array<InstanceKlass*>* interfaces = ik->local_interfaces();
 469   int len = interfaces->length();
 470   for (int i = 0; i < len; i++) {
 471     sort_methods(interfaces->at(i));
 472   }
 473 
 474 #ifdef ASSERT
 475   if (ik->methods() != NULL) {
 476     for (int m = 0; m < ik->methods()->length(); m++) {
 477       Symbol* name = ik->methods()->at(m)->name();
 478       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 479     }
 480   }
 481   if (ik->default_methods() != NULL) {
 482     for (int m = 0; m < ik->default_methods()->length(); m++) {
 483       Symbol* name = ik->default_methods()->at(m)->name();
 484       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
 485     }
 486   }
 487 #endif
 488 
 489   Thread* THREAD = Thread::current();
 490   Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator);
 491   if (ik->default_methods() != NULL) {
 492     Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator);
 493   }
 494   ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 495   ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
 496 
 497   // Set all the pointer marking bits after sorting.
 498   remark_pointers_for_instance_klass(ik, true);
 499 }
 500 
 501 template<bool should_mark>
 502 class PointerRemarker: public MetaspaceClosure {
 503 public:
 504   virtual bool do_ref(Ref* ref, bool read_only) {
 505     if (should_mark) {
 506       ArchivePtrMarker::mark_pointer(ref->addr());
 507     } else {
 508       ArchivePtrMarker::clear_pointer(ref->addr());
 509     }
 510     return false; // don't recurse
 511   }
 512 };
 513 
 514 void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const {
 515   if (should_mark) {
 516     PointerRemarker<true> marker;
 517     k->metaspace_pointers_do(&marker);
 518     marker.finish();
 519   } else {
 520     PointerRemarker<false> marker;
 521     k->metaspace_pointers_do(&marker);
 522     marker.finish();
 523   }
 524 }
 525 
 526 class RelocateBufferToTarget: public BitMapClosure {
 527   DynamicArchiveBuilder *_builder;
 528   address* _buffer_bottom;
 529   intx _buffer_to_target_delta;
 530  public:
 531   RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) :
 532     _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {}
 533 
 534   bool do_bit(size_t offset) {
 535     address* p = _buffer_bottom + offset;
 536     assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
 537 
 538     address old_ptr = *p;
 539     if (_builder->is_in_buffer_space(old_ptr)) {
 540       address new_ptr = old_ptr + _buffer_to_target_delta;
 541       log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT,
 542                               (int)offset, p2i(p), p2i(_builder->to_target(p)),
 543                               p2i(old_ptr), p2i(new_ptr));
 544       *p = new_ptr;
 545     }
 546 
 547     return true; // keep iterating
 548   }
 549 };
 550 
 551 void DynamicArchiveBuilder::relocate_buffer_to_target() {
 552   RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta);
 553   ArchivePtrMarker::ptrmap()->iterate(&patcher);
 554 
 555   Array<u8>* table = FileMapInfo::saved_shared_path_table().table();
 556   SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size());
 557   _header->set_shared_path_table(runtime_table);
 558 
 559   address relocatable_base = (address)SharedBaseAddress;
 560   address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta;
 561 
 562   intx addr_delta = MetaspaceShared::final_delta();
 563   if (addr_delta == 0) {
 564     ArchivePtrMarker::compact(relocatable_base, relocatable_end);
 565   } else {
 566     // The base archive is NOT mapped at MetaspaceShared::requested_base_address() (due to ASLR).
 567     // This means that the current content of the dynamic archive is based on a random
 568     // address. Let's relocate all the pointers, so that it can be mapped to
 569     // MetaspaceShared::requested_base_address() without runtime relocation.
 570     //
 571     // Note: both the base and dynamic archive are written with
 572     // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
 573 
 574     // Patch all pointers that are marked by ptrmap within this region,
 575     // where we have just dumped all the metaspace data.
 576     address patch_base = (address)_alloc_bottom;
 577     address patch_end  = (address)current_dump_space()->top();
 578 
 579     // the current value of the pointers to be patched must be within this
 580     // range (i.e., must point to either the top archive (as currently mapped), or to the
 581     // (targeted address of) the top archive)
 582     address valid_old_base = relocatable_base;
 583     address valid_old_end  = relocatable_end;
 584     size_t base_plus_top_size = valid_old_end - valid_old_base;
 585     size_t top_size = patch_end - patch_base;
 586     size_t base_size = base_plus_top_size - top_size;
 587     assert(base_plus_top_size > base_size, "no overflow");
 588     assert(base_plus_top_size > top_size, "no overflow");
 589 
 590     // after patching, the pointers must point inside this range
 591     // (the requested location of the archive, as mapped at runtime).
 592     address valid_new_base = (address)MetaspaceShared::requested_base_address();
 593     address valid_new_end  = valid_new_base + base_plus_top_size;
 594 
 595     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
 596                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes",
 597                    p2i(patch_base + base_size), p2i(patch_end),
 598                    p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta);
 599 
 600     SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
 601                                       valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap());
 602     ArchivePtrMarker::ptrmap()->iterate(&patcher);
 603     ArchivePtrMarker::compact(patcher.max_non_null_offset());
 604   }
 605 }
 606 
 607 void DynamicArchiveBuilder::write_archive(char* serialized_data) {
 608   int num_klasses = klasses()->length();
 609   int num_symbols = symbols()->length();
 610 
 611   _header->set_serialized_data(to_target(serialized_data));
 612 
 613   FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
 614   assert(dynamic_info != NULL, "Sanity");
 615 
 616   // Now write the archived data including the file offsets.
 617   const char* archive_name = Arguments::GetSharedDynamicArchivePath();
 618   dynamic_info->open_for_write(archive_name);
 619   MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL);
 620   dynamic_info->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
 621   dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
 622   dynamic_info->write_header();
 623   dynamic_info->close();
 624 
 625   address base = to_target(_alloc_bottom);
 626   address top  = address(current_dump_space()->top()) + _buffer_to_target_delta;
 627   size_t file_size = pointer_delta(top, base, sizeof(char));
 628 
 629   base += MetaspaceShared::final_delta();
 630   top += MetaspaceShared::final_delta();
 631   log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
 632                          " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
 633                          p2i(base), p2i(top), _header->header_size(), file_size);
 634   log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
 635 }
 636 
 637 class VM_PopulateDynamicDumpSharedSpace: public VM_Operation {
 638   DynamicArchiveBuilder* _builder;
 639 public:
 640   VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {}
 641   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 642   void doit() {
 643     ResourceMark rm;
 644     if (SystemDictionaryShared::empty_dumptime_table()) {
 645       log_warning(cds, dynamic)("There is no class to be included in the dynamic archive.");
 646       return;
 647     }
 648     if (AllowArchivingWithJavaAgent) {
 649       warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
 650               "for testing purposes only and should not be used in a production environment");
 651     }
 652     FileMapInfo::check_nonempty_dir_in_shared_path_table();
 653 
 654     _builder->doit();
 655   }
 656 };
 657 
 658 
 659 void DynamicArchive::dump() {
 660   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
 661     log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified");
 662     return;
 663   }
 664 
 665   DynamicArchiveBuilder builder;
 666   _builder = &builder;
 667   VM_PopulateDynamicDumpSharedSpace op(&builder);
 668   VMThread::execute(&op);
 669   _builder = NULL;
 670 }
 671 
 672 address DynamicArchive::original_to_buffer_impl(address orig_obj) {
 673   assert(DynamicDumpSharedSpaces, "must be");
 674   address buff_obj = _builder->get_dumped_addr(orig_obj);
 675   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
 676   assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced");
 677   assert(_builder->is_in_buffer_space(buff_obj), "must be");
 678   return buff_obj;
 679 }
 680 
 681 address DynamicArchive::buffer_to_target_impl(address buff_obj) {
 682   assert(DynamicDumpSharedSpaces, "must be");
 683   assert(_builder->is_in_buffer_space(buff_obj), "must be");
 684   return _builder->to_target(buff_obj);
 685 }
 686 
 687 address DynamicArchive::original_to_target_impl(address orig_obj) {
 688   assert(DynamicDumpSharedSpaces, "must be");
 689   if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) {
 690     // This happens when the top archive points to a Symbol* in the base archive.
 691     return orig_obj;
 692   }
 693   address buff_obj = _builder->get_dumped_addr(orig_obj);
 694   assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
 695   if (buff_obj == orig_obj) {
 696     // We are storing a pointer to an original object into the dynamic buffer. E.g.,
 697     // a Symbol* that used by both the base and top archives.
 698     assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be");
 699     return orig_obj;
 700   } else {
 701     return _builder->to_target(buff_obj);
 702   }
 703 }
 704 
 705 uintx DynamicArchive::object_delta_uintx(void* buff_obj) {
 706   assert(DynamicDumpSharedSpaces, "must be");
 707   address target_obj = _builder->to_target_no_check(address(buff_obj));
 708   assert(uintx(target_obj) >= SharedBaseAddress, "must be");
 709   return uintx(target_obj) - SharedBaseAddress;
 710 }
 711 
 712 bool DynamicArchive::is_in_target_space(void *obj) {
 713   assert(DynamicDumpSharedSpaces, "must be");
 714   return _builder->is_in_target_space(obj);
 715 }
 716 
 717 
 718 DynamicArchiveBuilder* DynamicArchive::_builder = NULL;
 719 
 720 
 721 bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
 722   assert(!dynamic_info->is_static(), "must be");
 723   // Check if the recorded base archive matches with the current one
 724   FileMapInfo* base_info = FileMapInfo::current_info();
 725   DynamicArchiveHeader* dynamic_header = dynamic_info->dynamic_header();
 726 
 727   // Check the header crc
 728   if (dynamic_header->base_header_crc() != base_info->crc()) {
 729     FileMapInfo::fail_continue("Dynamic archive cannot be used: static archive header checksum verification failed.");
 730     return false;
 731   }
 732 
 733   // Check each space's crc
 734   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
 735     if (dynamic_header->base_region_crc(i) != base_info->space_crc(i)) {
 736       FileMapInfo::fail_continue("Dynamic archive cannot be used: static archive region #%d checksum verification failed.", i);
 737       return false;
 738     }
 739   }
 740 
 741   return true;
 742 }