1 /*
   2  * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/classLoaderDataShared.hpp"
  29 #include "classfile/classListParser.hpp"
  30 #include "classfile/classLoaderExt.hpp"
  31 #include "classfile/dictionary.hpp"
  32 #include "classfile/loaderConstraints.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/placeholders.hpp"
  35 #include "classfile/symbolTable.hpp"
  36 #include "classfile/stringTable.hpp"
  37 #include "classfile/systemDictionary.hpp"
  38 #include "classfile/systemDictionaryShared.hpp"
  39 #include "code/codeCache.hpp"
  40 #include "gc/shared/softRefPolicy.hpp"
  41 #include "interpreter/bytecodeStream.hpp"
  42 #include "interpreter/bytecodes.hpp"
  43 #include "logging/log.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "memory/archiveUtils.inline.hpp"
  46 #include "memory/dynamicArchive.hpp"
  47 #include "memory/filemap.hpp"
  48 #include "memory/heapShared.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/metaspaceClosure.hpp"
  51 #include "memory/metaspaceShared.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/compressedOops.inline.hpp"
  55 #include "oops/instanceClassLoaderKlass.hpp"
  56 #include "oops/instanceMirrorKlass.hpp"
  57 #include "oops/instanceRefKlass.hpp"
  58 #include "oops/methodData.hpp"
  59 #include "oops/objArrayKlass.hpp"
  60 #include "oops/objArrayOop.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "oops/typeArrayKlass.hpp"
  63 #include "prims/jvmtiRedefineClasses.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/os.hpp"
  66 #include "runtime/safepointVerifiers.hpp"
  67 #include "runtime/signature.hpp"
  68 #include "runtime/timerTrace.hpp"
  69 #include "runtime/vmThread.hpp"
  70 #include "runtime/vmOperations.hpp"
  71 #include "utilities/align.hpp"
  72 #include "utilities/bitMap.inline.hpp"
  73 #include "utilities/ostream.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/hashtable.inline.hpp"
  76 #if INCLUDE_G1GC
  77 #include "gc/g1/g1CollectedHeap.hpp"
  78 #endif
  79 
  80 ReservedSpace MetaspaceShared::_shared_rs;
  81 VirtualSpace MetaspaceShared::_shared_vs;
  82 ReservedSpace MetaspaceShared::_symbol_rs;
  83 VirtualSpace MetaspaceShared::_symbol_vs;
  84 MetaspaceSharedStats MetaspaceShared::_stats;
  85 bool MetaspaceShared::_has_error_classes;
  86 bool MetaspaceShared::_archive_loading_failed = false;
  87 bool MetaspaceShared::_remapped_readwrite = false;
  88 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
  89 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
  90 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
  91 intx MetaspaceShared::_relocation_delta;
  92 char* MetaspaceShared::_requested_base_address;
  93 bool MetaspaceShared::_use_optimized_module_handling = true;
  94 bool MetaspaceShared::_use_full_module_graph = true;
  95 
  96 // The CDS archive is divided into the following regions:
  97 //     mc  - misc code (the method entry trampolines, c++ vtables)
  98 //     rw  - read-write metadata
  99 //     ro  - read-only metadata and read-only tables
 100 //
 101 //     ca0 - closed archive heap space #0
 102 //     ca1 - closed archive heap space #1 (may be empty)
 103 //     oa0 - open archive heap space #0
 104 //     oa1 - open archive heap space #1 (may be empty)
 105 //
 106 // The mc, rw, and ro regions are linearly allocated, starting from
 107 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions
 108 // are page-aligned, and there's no gap between any consecutive regions.
 109 //
 110 // These 3 regions are populated in the following steps:
 111 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 112 //     temporarily allocated outside of the shared regions. Only the method entry
 113 //     trampolines are written into the mc region.
 114 // [2] C++ vtables are copied into the mc region.
 115 // [3] ArchiveCompactor copies RW metadata into the rw region.
 116 // [4] ArchiveCompactor copies RO metadata into the ro region.
 117 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 118 //     are copied into the ro region as read-only tables.
 119 //
 120 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
 121 // Their layout is independent of the other 4 regions.
 122 
 123 char* DumpRegion::expand_top_to(char* newtop) {
 124   assert(is_allocatable(), "must be initialized and not packed");
 125   assert(newtop >= _top, "must not grow backwards");
 126   if (newtop > _end) {
 127     MetaspaceShared::report_out_of_space(_name, newtop - _top);
 128     ShouldNotReachHere();
 129   }
 130 
 131   if (_rs == MetaspaceShared::shared_rs()) {
 132     uintx delta;
 133     if (DynamicDumpSharedSpaces) {
 134       delta = DynamicArchive::object_delta_uintx(newtop);
 135     } else {
 136       delta = MetaspaceShared::object_delta_uintx(newtop);
 137     }
 138     if (delta > MAX_SHARED_DELTA) {
 139       // This is just a sanity check and should not appear in any real world usage. This
 140       // happens only if you allocate more than 2GB of shared objects and would require
 141       // millions of shared classes.
 142       vm_exit_during_initialization("Out of memory in the CDS archive",
 143                                     "Please reduce the number of shared classes.");
 144     }
 145   }
 146 
 147   MetaspaceShared::commit_to(_rs, _vs, newtop);
 148   _top = newtop;
 149   return _top;
 150 }
 151 
 152 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
 153   char* p = (char*)align_up(_top, alignment);
 154   char* newtop = p + align_up(num_bytes, alignment);
 155   expand_top_to(newtop);
 156   memset(p, 0, newtop - p);
 157   return p;
 158 }
 159 
 160 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
 161   assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
 162   intptr_t *p = (intptr_t*)_top;
 163   char* newtop = _top + sizeof(intptr_t);
 164   expand_top_to(newtop);
 165   *p = n;
 166   if (need_to_mark) {
 167     ArchivePtrMarker::mark_pointer(p);
 168   }
 169 }
 170 
 171 void DumpRegion::print(size_t total_bytes) const {
 172   log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
 173                  _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
 174                  p2i(_base + MetaspaceShared::final_delta()));
 175 }
 176 
 177 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
 178   log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
 179                  _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
 180   if (strcmp(_name, failing_region) == 0) {
 181     log_error(cds)(" required = %d", int(needed_bytes));
 182   }
 183 }
 184 
 185 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
 186   _rs = rs;
 187   _vs = vs;
 188   // Start with 0 committed bytes. The memory will be committed as needed by
 189   // MetaspaceShared::commit_to().
 190   if (!_vs->initialize(*_rs, 0)) {
 191     fatal("Unable to allocate memory for shared space");
 192   }
 193   _base = _top = _rs->base();
 194   _end = _rs->end();
 195 }
 196 
 197 void DumpRegion::pack(DumpRegion* next) {
 198   assert(!is_packed(), "sanity");
 199   _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
 200   _is_packed = true;
 201   if (next != NULL) {
 202     next->_rs = _rs;
 203     next->_vs = _vs;
 204     next->_base = next->_top = this->_end;
 205     next->_end = _rs->end();
 206   }
 207 }
 208 
 209 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
 210 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 211 
 212 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
 213   first_space->init(&_shared_rs, &_shared_vs);
 214 }
 215 
 216 DumpRegion* MetaspaceShared::misc_code_dump_space() {
 217   return &_mc_region;
 218 }
 219 
 220 DumpRegion* MetaspaceShared::read_write_dump_space() {
 221   return &_rw_region;
 222 }
 223 
 224 DumpRegion* MetaspaceShared::read_only_dump_space() {
 225   return &_ro_region;
 226 }
 227 
 228 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
 229                                       ReservedSpace* rs) {
 230   current->pack(next);
 231 }
 232 
 233 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
 234   return _symbol_region.allocate(num_bytes);
 235 }
 236 
 237 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 238   return _mc_region.allocate(num_bytes);
 239 }
 240 
 241 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 242   return _ro_region.allocate(num_bytes);
 243 }
 244 
 245 char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) {
 246   return _rw_region.allocate(num_bytes);
 247 }
 248 
 249 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
 250 
 251 static bool shared_base_valid(char* shared_base) {
 252 #ifdef _LP64
 253   return CompressedKlassPointers::is_valid_base((address)shared_base);
 254 #else
 255   return true;
 256 #endif
 257 }
 258 
 259 static bool shared_base_too_high(char* shared_base, size_t cds_total) {
 260   if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) {
 261     // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
 262     // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around.
 263     return true;
 264   }
 265   if (max_uintx - uintx(shared_base) < uintx(cds_total)) {
 266     // The end of the archive will wrap around
 267     return true;
 268   }
 269 
 270   return false;
 271 }
 272 
 273 static char* compute_shared_base(size_t cds_total) {
 274   char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
 275   const char* err = NULL;
 276   if (shared_base_too_high(shared_base, cds_total)) {
 277     err = "too high";
 278   } else if (!shared_base_valid(shared_base)) {
 279     err = "invalid for this platform";
 280   }
 281   if (err) {
 282     log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT,
 283                      p2i((void*)SharedBaseAddress), err,
 284                      p2i((void*)Arguments::default_SharedBaseAddress()));
 285     SharedBaseAddress = Arguments::default_SharedBaseAddress();
 286     shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
 287   }
 288   assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity");
 289   return shared_base;
 290 }
 291 
 292 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 293   assert(DumpSharedSpaces, "should be called for dump time only");
 294 
 295   const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
 296 
 297 #ifdef _LP64
 298   // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1,
 299   //  will use that to house both the archives and the ccs. See below for
 300   //  details.
 301   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 302   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
 303 #else
 304   // We don't support archives larger than 256MB on 32-bit due to limited
 305   //  virtual address space.
 306   size_t cds_total = align_down(256*M, reserve_alignment);
 307 #endif
 308 
 309   char* shared_base = compute_shared_base(cds_total);
 310   _requested_base_address = shared_base;
 311 
 312   // Whether to use SharedBaseAddress as attach address.
 313   bool use_requested_base = true;
 314 
 315   if (shared_base == NULL) {
 316     use_requested_base = false;
 317   }
 318 
 319   if (ArchiveRelocationMode == 1) {
 320     log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
 321     use_requested_base = false;
 322   }
 323 
 324   // First try to reserve the space at the specified SharedBaseAddress.
 325   assert(!_shared_rs.is_reserved(), "must be");
 326   if (use_requested_base) {
 327     _shared_rs = ReservedSpace(cds_total, reserve_alignment,
 328                                false /* large */, (char*)shared_base);
 329     if (_shared_rs.is_reserved()) {
 330       assert(_shared_rs.base() == shared_base, "should match");
 331     } else {
 332       log_info(cds)("dumptime space reservation: failed to map at "
 333                     "SharedBaseAddress " PTR_FORMAT, p2i(shared_base));
 334     }
 335   }
 336   if (!_shared_rs.is_reserved()) {
 337     // Get a reserved space anywhere if attaching at the SharedBaseAddress
 338     //  fails:
 339     if (UseCompressedClassPointers) {
 340       // If we need to reserve class space as well, let the platform handle
 341       //  the reservation.
 342       LP64_ONLY(_shared_rs =
 343                 Metaspace::reserve_address_space_for_compressed_classes(cds_total);)
 344       NOT_LP64(ShouldNotReachHere();)
 345     } else {
 346       // anywhere is fine.
 347       _shared_rs = ReservedSpace(cds_total, reserve_alignment,
 348                                  false /* large */, (char*)NULL);
 349     }
 350   }
 351 
 352   if (!_shared_rs.is_reserved()) {
 353     vm_exit_during_initialization("Unable to reserve memory for shared space",
 354                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
 355   }
 356 
 357 #ifdef _LP64
 358 
 359   if (UseCompressedClassPointers) {
 360 
 361     assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity");
 362 
 363     // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space
 364     //  must be allocated near the cds such as that the compressed Klass pointer
 365     //  encoding can be used to en/decode pointers from both cds and ccs. Since
 366     //  Metaspace cannot do this (it knows nothing about cds), we do it for
 367     //  Metaspace here and pass it the space to use for ccs.
 368     //
 369     // We do this by reserving space for the ccs behind the archives. Note
 370     //  however that ccs follows a different alignment
 371     //  (Metaspace::reserve_alignment), so there may be a gap between ccs and
 372     //  cds.
 373     // We use a similar layout at runtime, see reserve_address_space_for_archives().
 374     //
 375     //                              +-- SharedBaseAddress (default = 0x800000000)
 376     //                              v
 377     // +-..---------+---------+ ... +----+----+----+--------+-----------------+
 378     // |    Heap    | Archive |     | MC | RW | RO | [gap]  |    class space  |
 379     // +-..---------+---------+ ... +----+----+----+--------+-----------------+
 380     // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB -->|
 381     //
 382     // Note: ccs must follow the archives, and the archives must start at the
 383     //  encoding base. However, the exact placement of ccs does not matter as
 384     //  long as it it resides in the encoding range of CompressedKlassPointers
 385     //  and comes after the archive.
 386     //
 387     // We do this by splitting up the allocated 4G into 3G of archive space,
 388     //  followed by 1G for the ccs:
 389     // + The upper 1 GB is used as the "temporary compressed class space"
 390     //   -- preload_classes() will store Klasses into this space.
 391     // + The lower 3 GB is used for the archive -- when preload_classes()
 392     //   is done, ArchiveCompactor will copy the class metadata into this
 393     //   space, first the RW parts, then the RO parts.
 394 
 395     // Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
 396     size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment());
 397     address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment());
 398     size_t archive_size = class_space_start - (address)_shared_rs.base();
 399 
 400     ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size);
 401     _shared_rs = _shared_rs.first_part(archive_size);
 402 
 403     // ... as does the size of ccs.
 404     tmp_class_space = tmp_class_space.first_part(class_space_size);
 405     CompressedClassSpaceSize = class_space_size;
 406 
 407     // Let Metaspace initialize ccs
 408     Metaspace::initialize_class_space(tmp_class_space);
 409 
 410     // and set up CompressedKlassPointers encoding.
 411     CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total);
 412 
 413     log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 414                   p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
 415 
 416     log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 417                   CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 418 
 419     assert(_shared_rs.end() == tmp_class_space.base() &&
 420            is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) &&
 421            is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) &&
 422            is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity");
 423   }
 424 
 425 #endif
 426 
 427   init_shared_dump_space(&_mc_region);
 428   SharedBaseAddress = (size_t)_shared_rs.base();
 429   log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 430                 _shared_rs.size(), p2i(_shared_rs.base()));
 431 
 432   // We don't want any valid object to be at the very bottom of the archive.
 433   // See ArchivePtrMarker::mark_pointer().
 434   MetaspaceShared::misc_code_space_alloc(16);
 435 
 436   size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
 437   _symbol_rs = ReservedSpace(symbol_rs_size);
 438   if (!_symbol_rs.is_reserved()) {
 439     vm_exit_during_initialization("Unable to reserve memory for symbols",
 440                                   err_msg(SIZE_FORMAT " bytes.", symbol_rs_size));
 441   }
 442   _symbol_region.init(&_symbol_rs, &_symbol_vs);
 443 }
 444 
 445 // Called by universe_post_init()
 446 void MetaspaceShared::post_initialize(TRAPS) {
 447   if (UseSharedSpaces) {
 448     int size = FileMapInfo::get_number_of_shared_paths();
 449     if (size > 0) {
 450       SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
 451       if (!DynamicDumpSharedSpaces) {
 452         FileMapInfo* info;
 453         if (FileMapInfo::dynamic_info() == NULL) {
 454           info = FileMapInfo::current_info();
 455         } else {
 456           info = FileMapInfo::dynamic_info();
 457         }
 458         ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index());
 459         ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index());
 460       }
 461     }
 462   }
 463 }
 464 
 465 static GrowableArrayCHeap<Handle, mtClassShared>* _extra_interned_strings = NULL;
 466 
 467 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
 468   _extra_interned_strings = new GrowableArrayCHeap<Handle, mtClassShared>(10000);
 469 
 470   HashtableTextDump reader(filename);
 471   reader.check_version("VERSION: 1.0");
 472 
 473   while (reader.remain() > 0) {
 474     int utf8_length;
 475     int prefix_type = reader.scan_prefix(&utf8_length);
 476     ResourceMark rm(THREAD);
 477     if (utf8_length == 0x7fffffff) {
 478       // buf_len will overflown 32-bit value.
 479       vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length));
 480     }
 481     int buf_len = utf8_length+1;
 482     char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len);
 483     reader.get_utf8(utf8_buffer, utf8_length);
 484     utf8_buffer[utf8_length] = '\0';
 485 
 486     if (prefix_type == HashtableTextDump::SymbolPrefix) {
 487       SymbolTable::new_permanent_symbol(utf8_buffer);
 488     } else{
 489       assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
 490       oop s = StringTable::intern(utf8_buffer, THREAD);
 491 
 492       if (HAS_PENDING_EXCEPTION) {
 493         log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d",
 494                                reader.last_line_no(), utf8_length);
 495         CLEAR_PENDING_EXCEPTION;
 496       } else {
 497 #if INCLUDE_G1GC
 498         if (UseG1GC) {
 499           typeArrayOop body = java_lang_String::value(s);
 500           const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
 501           if (hr->is_humongous()) {
 502             // Don't keep it alive, so it will be GC'ed before we dump the strings, in order
 503             // to maximize free heap space and minimize fragmentation.
 504             log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
 505                                 reader.last_line_no(), utf8_length);
 506             continue;
 507           }
 508         }
 509 #endif
 510         // Interned strings are GC'ed if there are no references to it, so let's
 511         // add a reference to keep this string alive.
 512         assert(s != NULL, "must succeed");
 513         Handle h(THREAD, s);
 514         _extra_interned_strings->append(h);
 515       }
 516     }
 517   }
 518 }
 519 
 520 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
 521   Arguments::assert_is_dumping_archive();
 522   char* base = rs->base();
 523   size_t need_committed_size = newtop - base;
 524   size_t has_committed_size = vs->committed_size();
 525   if (need_committed_size < has_committed_size) {
 526     return;
 527   }
 528 
 529   size_t min_bytes = need_committed_size - has_committed_size;
 530   size_t preferred_bytes = 1 * M;
 531   size_t uncommitted = vs->reserved_size() - has_committed_size;
 532 
 533   size_t commit =MAX2(min_bytes, preferred_bytes);
 534   commit = MIN2(commit, uncommitted);
 535   assert(commit <= uncommitted, "sanity");
 536 
 537   bool result = vs->expand_by(commit, false);
 538   if (rs == &_shared_rs) {
 539     ArchivePtrMarker::expand_ptr_end((address*)vs->high());
 540   }
 541 
 542   if (!result) {
 543     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
 544                                           need_committed_size));
 545   }
 546 
 547   assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
 548   const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
 549   log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
 550                  which, commit, vs->actual_committed_size(), vs->high());
 551 }
 552 
 553 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
 554   ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
 555 }
 556 
 557 // Read/write a data stream for restoring/preserving metadata pointers and
 558 // miscellaneous data from/to the shared archive file.
 559 
 560 void MetaspaceShared::serialize(SerializeClosure* soc) {
 561   int tag = 0;
 562   soc->do_tag(--tag);
 563 
 564   // Verify the sizes of various metadata in the system.
 565   soc->do_tag(sizeof(Method));
 566   soc->do_tag(sizeof(ConstMethod));
 567   soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
 568   soc->do_tag(sizeof(ConstantPool));
 569   soc->do_tag(sizeof(ConstantPoolCache));
 570   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
 571   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
 572   soc->do_tag(sizeof(Symbol));
 573 
 574   // Dump/restore miscellaneous metadata.
 575   JavaClasses::serialize_offsets(soc);
 576   Universe::serialize(soc);
 577   soc->do_tag(--tag);
 578 
 579   // Dump/restore references to commonly used names and signatures.
 580   vmSymbols::serialize(soc);
 581   soc->do_tag(--tag);
 582 
 583   // Dump/restore the symbol/string/subgraph_info tables
 584   SymbolTable::serialize_shared_table_header(soc);
 585   StringTable::serialize_shared_table_header(soc);
 586   HeapShared::serialize_subgraph_info_table_header(soc);
 587   SystemDictionaryShared::serialize_dictionary_headers(soc);
 588 
 589   InstanceMirrorKlass::serialize_offsets(soc);
 590 
 591   // Dump/restore well known classes (pointers)
 592   SystemDictionaryShared::serialize_well_known_klasses(soc);
 593   soc->do_tag(--tag);
 594 
 595   serialize_cloned_cpp_vtptrs(soc);
 596   soc->do_tag(--tag);
 597 
 598   CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc));
 599 
 600   soc->do_tag(666);
 601 }
 602 
 603 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) {
 604   if (DumpSharedSpaces) {
 605     if (_i2i_entry_code_buffers == NULL) {
 606       _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 607       _i2i_entry_code_buffers_size = total_size;
 608     }
 609   } else if (UseSharedSpaces) {
 610     assert(_i2i_entry_code_buffers != NULL, "must already been initialized");
 611   } else {
 612     return NULL;
 613   }
 614 
 615   assert(_i2i_entry_code_buffers_size == total_size, "must not change");
 616   return _i2i_entry_code_buffers;
 617 }
 618 
 619 uintx MetaspaceShared::object_delta_uintx(void* obj) {
 620   Arguments::assert_is_dumping_archive();
 621   if (DumpSharedSpaces) {
 622     assert(shared_rs()->contains(obj), "must be");
 623   } else {
 624     assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be");
 625   }
 626   address base_address = address(SharedBaseAddress);
 627   uintx deltax = address(obj) - base_address;
 628   return deltax;
 629 }
 630 
 631 // Global object for holding classes that have been loaded.  Since this
 632 // is run at a safepoint just before exit, this is the entire set of classes.
 633 static GrowableArray<Klass*>* _global_klass_objects;
 634 
 635 static int global_klass_compare(Klass** a, Klass **b) {
 636   return a[0]->name()->fast_compare(b[0]->name());
 637 }
 638 
 639 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
 640   return _global_klass_objects;
 641 }
 642 
 643 static void collect_array_classes(Klass* k) {
 644   _global_klass_objects->append_if_missing(k);
 645   if (k->is_array_klass()) {
 646     // Add in the array classes too
 647     ArrayKlass* ak = ArrayKlass::cast(k);
 648     Klass* h = ak->higher_dimension();
 649     if (h != NULL) {
 650       h->array_klasses_do(collect_array_classes);
 651     }
 652   }
 653 }
 654 
 655 class CollectClassesClosure : public KlassClosure {
 656   void do_klass(Klass* k) {
 657     if (k->is_instance_klass() &&
 658         SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) {
 659       // Don't add to the _global_klass_objects
 660     } else {
 661       _global_klass_objects->append_if_missing(k);
 662     }
 663     if (k->is_array_klass()) {
 664       // Add in the array classes too
 665       ArrayKlass* ak = ArrayKlass::cast(k);
 666       Klass* h = ak->higher_dimension();
 667       if (h != NULL) {
 668         h->array_klasses_do(collect_array_classes);
 669       }
 670     }
 671   }
 672 };
 673 
 674 // Global object for holding symbols that created during class loading. See SymbolTable::new_symbol
 675 static GrowableArray<Symbol*>* _global_symbol_objects = NULL;
 676 
 677 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 678   if (a[0] < b[0]) {
 679     return -1;
 680   } else if (a[0] == b[0]) {
 681     ResourceMark rm;
 682     log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string());
 683     return 0;
 684   } else {
 685     return 1;
 686   }
 687 }
 688 
 689 void MetaspaceShared::add_symbol(Symbol* sym) {
 690   MutexLocker ml(CDSAddSymbol_lock, Mutex::_no_safepoint_check_flag);
 691   if (_global_symbol_objects == NULL) {
 692     _global_symbol_objects = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Symbol*>(2048, mtSymbol);
 693   }
 694   _global_symbol_objects->append(sym);
 695 }
 696 
 697 GrowableArray<Symbol*>* MetaspaceShared::collected_symbols() {
 698   return _global_symbol_objects;
 699 }
 700 
 701 static void remove_unshareable_in_classes() {
 702   for (int i = 0; i < _global_klass_objects->length(); i++) {
 703     Klass* k = _global_klass_objects->at(i);
 704     if (!k->is_objArray_klass()) {
 705       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 706       // on their array classes.
 707       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 708       k->remove_unshareable_info();
 709     }
 710   }
 711 }
 712 
 713 static void remove_java_mirror_in_classes() {
 714   for (int i = 0; i < _global_klass_objects->length(); i++) {
 715     Klass* k = _global_klass_objects->at(i);
 716     if (!k->is_objArray_klass()) {
 717       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 718       // on their array classes.
 719       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 720       k->remove_java_mirror();
 721     }
 722   }
 723 }
 724 
 725 static void rewrite_nofast_bytecode(const methodHandle& method) {
 726   BytecodeStream bcs(method);
 727   while (!bcs.is_last_bytecode()) {
 728     Bytecodes::Code opcode = bcs.next();
 729     switch (opcode) {
 730     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 731     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 732     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 733     case Bytecodes::_iload: {
 734       if (!bcs.is_wide()) {
 735         *bcs.bcp() = Bytecodes::_nofast_iload;
 736       }
 737       break;
 738     }
 739     default: break;
 740     }
 741   }
 742 }
 743 
 744 // Walk all methods in the class list to ensure that they won't be modified at
 745 // run time. This includes:
 746 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
 747 //     at run time by RewriteBytecodes/RewriteFrequentPairs
 748 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
 749 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) {
 750   for (int i = 0; i < _global_klass_objects->length(); i++) {
 751     Klass* k = _global_klass_objects->at(i);
 752     if (k->is_instance_klass()) {
 753       InstanceKlass* ik = InstanceKlass::cast(k);
 754       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik);
 755     }
 756   }
 757 }
 758 
 759 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) {
 760   for (int i = 0; i < ik->methods()->length(); i++) {
 761     methodHandle m(thread, ik->methods()->at(i));
 762     rewrite_nofast_bytecode(m);
 763     Fingerprinter fp(m);
 764     // The side effect of this call sets method's fingerprint field.
 765     fp.fingerprint();
 766   }
 767 }
 768 
 769 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 770 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 771 //
 772 // Addresses of the vtables and the methods may be different across JVM runs,
 773 // if libjvm.so is dynamically loaded at a different base address.
 774 //
 775 // To ensure that the Metadata objects in the CDS archive always have the correct vtable:
 776 //
 777 // + at dump time:  we redirect the _vptr to point to our own vtables inside
 778 //                  the CDS image
 779 // + at run time:   we clone the actual contents of the vtables from libjvm.so
 780 //                  into our own tables.
 781 
 782 // Currently, the archive contain ONLY the following types of objects that have C++ vtables.
 783 #define CPP_VTABLE_PATCH_TYPES_DO(f) \
 784   f(ConstantPool) \
 785   f(InstanceKlass) \
 786   f(InstanceClassLoaderKlass) \
 787   f(InstanceMirrorKlass) \
 788   f(InstanceRefKlass) \
 789   f(Method) \
 790   f(ObjArrayKlass) \
 791   f(TypeArrayKlass)
 792 
 793 class CppVtableInfo {
 794   intptr_t _vtable_size;
 795   intptr_t _cloned_vtable[1];
 796 public:
 797   static int num_slots(int vtable_size) {
 798     return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
 799   }
 800   int vtable_size()           { return int(uintx(_vtable_size)); }
 801   void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
 802   intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; }
 803   void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
 804   // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
 805   static size_t byte_size(int vtable_size) {
 806     CppVtableInfo i;
 807     return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
 808   }
 809 };
 810 
 811 static inline intptr_t* vtable_of(Metadata* m) {
 812   return *((intptr_t**)m);
 813 }
 814 
 815 template <class T> class CppVtableCloner : public T {
 816   static CppVtableInfo* _info;
 817 
 818   static int get_vtable_length(const char* name);
 819 
 820 public:
 821   // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
 822   static intptr_t* allocate(const char* name);
 823 
 824   // Clone the vtable to ...
 825   static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
 826 
 827   static void zero_vtable_clone() {
 828     assert(DumpSharedSpaces, "dump-time only");
 829     _info->zero();
 830   }
 831 
 832   static bool is_valid_shared_object(const T* obj) {
 833     intptr_t* vptr = *(intptr_t**)obj;
 834     return vptr == _info->cloned_vtable();
 835   }
 836 
 837   static void init_orig_cpp_vtptr(int kind);
 838 };
 839 
 840 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
 841 
 842 template <class T>
 843 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
 844   assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment");
 845   int n = get_vtable_length(name);
 846   _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
 847   _info->set_vtable_size(n);
 848 
 849   intptr_t* p = clone_vtable(name, _info);
 850   assert((char*)p == _mc_region.top(), "must be");
 851 
 852   return _info->cloned_vtable();
 853 }
 854 
 855 template <class T>
 856 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
 857   if (!DumpSharedSpaces) {
 858     assert(_info == 0, "_info is initialized only at dump time");
 859     _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
 860   }
 861   T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
 862   int n = info->vtable_size();
 863   intptr_t* srcvtable = vtable_of(&tmp);
 864   intptr_t* dstvtable = info->cloned_vtable();
 865 
 866   // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
 867   // safe to do memcpy.
 868   log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
 869   memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
 870   return dstvtable + n;
 871 }
 872 
 873 // To determine the size of the vtable for each type, we use the following
 874 // trick by declaring 2 subclasses:
 875 //
 876 //   class CppVtableTesterA: public InstanceKlass {virtual int   last_virtual_method() {return 1;}    };
 877 //   class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
 878 //
 879 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
 880 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
 881 // - The first N entries have are exactly the same as in InstanceKlass's vtable.
 882 // - Their last entry is different.
 883 //
 884 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
 885 // and find the first entry that's different.
 886 //
 887 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
 888 // esoteric compilers.
 889 
 890 template <class T> class CppVtableTesterB: public T {
 891 public:
 892   virtual int last_virtual_method() {return 1;}
 893 };
 894 
 895 template <class T> class CppVtableTesterA : public T {
 896 public:
 897   virtual void* last_virtual_method() {
 898     // Make this different than CppVtableTesterB::last_virtual_method so the C++
 899     // compiler/linker won't alias the two functions.
 900     return NULL;
 901   }
 902 };
 903 
 904 template <class T>
 905 int CppVtableCloner<T>::get_vtable_length(const char* name) {
 906   CppVtableTesterA<T> a;
 907   CppVtableTesterB<T> b;
 908 
 909   intptr_t* avtable = vtable_of(&a);
 910   intptr_t* bvtable = vtable_of(&b);
 911 
 912   // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
 913   int vtable_len = 1;
 914   for (; ; vtable_len++) {
 915     if (avtable[vtable_len] != bvtable[vtable_len]) {
 916       break;
 917     }
 918   }
 919   log_debug(cds, vtables)("Found   %3d vtable entries for %s", vtable_len, name);
 920 
 921   return vtable_len;
 922 }
 923 
 924 #define ALLOC_CPP_VTABLE_CLONE(c) \
 925   _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
 926   ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
 927 
 928 #define CLONE_CPP_VTABLE(c) \
 929   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
 930 
 931 #define ZERO_CPP_VTABLE(c) \
 932  CppVtableCloner<c>::zero_vtable_clone();
 933 
 934 #define INIT_ORIG_CPP_VTPTRS(c) \
 935   CppVtableCloner<c>::init_orig_cpp_vtptr(c##_Kind);
 936 
 937 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind,
 938 
 939 enum ClonedVtableKind {
 940   // E.g., ConstantPool_Kind == 0, InstanceKlass_Kind == 1, etc.
 941   CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND)
 942   _num_cloned_vtable_kinds
 943 };
 944 
 945 // This is a map of all the original vtptrs. E.g., for
 946 //     ConstantPool *cp = new (...) ConstantPool(...) ; // a dynamically allocated constant pool
 947 // the following holds true:
 948 //     _orig_cpp_vtptrs[ConstantPool_Kind] ==  ((intptr_t**)cp)[0]
 949 static intptr_t* _orig_cpp_vtptrs[_num_cloned_vtable_kinds];
 950 static bool _orig_cpp_vtptrs_inited = false;
 951 
 952 template <class T>
 953 void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) {
 954   assert(kind < _num_cloned_vtable_kinds, "sanity");
 955   T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
 956   intptr_t* srcvtable = vtable_of(&tmp);
 957   _orig_cpp_vtptrs[kind] = srcvtable;
 958 }
 959 
 960 // This is the index of all the cloned vtables. E.g., for
 961 //     ConstantPool* cp = ....; // an archived constant pool
 962 //     InstanceKlass* ik = ....;// an archived class
 963 // the following holds true:
 964 //     _cloned_cpp_vtptrs[ConstantPool_Kind]  == ((intptr_t**)cp)[0]
 965 //     _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0]
 966 static intptr_t** _cloned_cpp_vtptrs = NULL;
 967 
 968 void MetaspaceShared::allocate_cloned_cpp_vtptrs() {
 969   assert(DumpSharedSpaces, "must");
 970   size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*);
 971   _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*));
 972 }
 973 
 974 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) {
 975   soc->do_ptr((void**)&_cloned_cpp_vtptrs);
 976 }
 977 
 978 intptr_t* MetaspaceShared::get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj) {
 979   if (!_orig_cpp_vtptrs_inited) {
 980     CPP_VTABLE_PATCH_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
 981     _orig_cpp_vtptrs_inited = true;
 982   }
 983 
 984   Arguments::assert_is_dumping_archive();
 985   int kind = -1;
 986   switch (msotype) {
 987   case MetaspaceObj::SymbolType:
 988   case MetaspaceObj::TypeArrayU1Type:
 989   case MetaspaceObj::TypeArrayU2Type:
 990   case MetaspaceObj::TypeArrayU4Type:
 991   case MetaspaceObj::TypeArrayU8Type:
 992   case MetaspaceObj::TypeArrayOtherType:
 993   case MetaspaceObj::ConstMethodType:
 994   case MetaspaceObj::ConstantPoolCacheType:
 995   case MetaspaceObj::AnnotationsType:
 996   case MetaspaceObj::MethodCountersType:
 997   case MetaspaceObj::RecordComponentType:
 998     // These have no vtables.
 999     break;
1000   case MetaspaceObj::MethodDataType:
1001     // We don't archive MethodData <-- should have been removed in removed_unsharable_info
1002     ShouldNotReachHere();
1003     break;
1004   default:
1005     for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) {
1006       if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind]) {
1007         break;
1008       }
1009     }
1010     if (kind >= _num_cloned_vtable_kinds) {
1011       fatal("Cannot find C++ vtable for " INTPTR_FORMAT " -- you probably added"
1012             " a new subtype of Klass or MetaData without updating CPP_VTABLE_PATCH_TYPES_DO",
1013             p2i(obj));
1014     }
1015   }
1016 
1017   if (kind >= 0) {
1018     assert(kind < _num_cloned_vtable_kinds, "must be");
1019     return _cloned_cpp_vtptrs[kind];
1020   } else {
1021     return NULL;
1022   }
1023 }
1024 
1025 // This can be called at both dump time and run time:
1026 // - clone the contents of the c++ vtables into the space
1027 //   allocated by allocate_cpp_vtable_clones()
1028 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
1029   assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
1030   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
1031 }
1032 
1033 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
1034   assert(DumpSharedSpaces, "dump-time only");
1035   CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
1036 }
1037 
1038 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
1039 char* MetaspaceShared::allocate_cpp_vtable_clones() {
1040   char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables
1041 
1042   assert(DumpSharedSpaces, "dump-time only");
1043   // Layout (each slot is a intptr_t):
1044   //   [number of slots in the first vtable = n1]
1045   //   [ <n1> slots for the first vtable]
1046   //   [number of slots in the first second = n2]
1047   //   [ <n2> slots for the second vtable]
1048   //   ...
1049   // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
1050   CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
1051 
1052   return cloned_vtables;
1053 }
1054 
1055 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
1056   assert(is_in_shared_metaspace(m), "must be");
1057   return CppVtableCloner<Method>::is_valid_shared_object(m);
1058 }
1059 
1060 void WriteClosure::do_oop(oop* o) {
1061   if (*o == NULL) {
1062     _dump_region->append_intptr_t(0);
1063   } else {
1064     assert(HeapShared::is_heap_object_archiving_allowed(),
1065            "Archiving heap object is not allowed");
1066     _dump_region->append_intptr_t(
1067       (intptr_t)CompressedOops::encode_not_null(*o));
1068   }
1069 }
1070 
1071 void WriteClosure::do_region(u_char* start, size_t size) {
1072   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1073   assert(size % sizeof(intptr_t) == 0, "bad size");
1074   do_tag((int)size);
1075   while (size > 0) {
1076     _dump_region->append_intptr_t(*(intptr_t*)start, true);
1077     start += sizeof(intptr_t);
1078     size -= sizeof(intptr_t);
1079   }
1080 }
1081 
1082 // This is for dumping detailed statistics for the allocations
1083 // in the shared spaces.
1084 class DumpAllocStats : public ResourceObj {
1085 public:
1086 
1087   // Here's poor man's enum inheritance
1088 #define SHAREDSPACE_OBJ_TYPES_DO(f) \
1089   METASPACE_OBJ_TYPES_DO(f) \
1090   f(SymbolHashentry) \
1091   f(SymbolBucket) \
1092   f(StringHashentry) \
1093   f(StringBucket) \
1094   f(ModulesNatives) \
1095   f(Other)
1096 
1097   enum Type {
1098     // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
1099     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
1100     _number_of_types
1101   };
1102 
1103   static const char * type_name(Type type) {
1104     switch(type) {
1105     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
1106     default:
1107       ShouldNotReachHere();
1108       return NULL;
1109     }
1110   }
1111 
1112 public:
1113   enum { RO = 0, RW = 1 };
1114 
1115   int _counts[2][_number_of_types];
1116   int _bytes [2][_number_of_types];
1117 
1118   DumpAllocStats() {
1119     memset(_counts, 0, sizeof(_counts));
1120     memset(_bytes,  0, sizeof(_bytes));
1121   };
1122 
1123   void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
1124     assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
1125     int which = (read_only) ? RO : RW;
1126     _counts[which][type] ++;
1127     _bytes [which][type] += byte_size;
1128   }
1129 
1130   void record_modules(int byte_size, bool read_only) {
1131     int which = (read_only) ? RO : RW;
1132     _bytes [which][ModulesNativesType] += byte_size;
1133   }
1134 
1135   void record_other_type(int byte_size, bool read_only) {
1136     int which = (read_only) ? RO : RW;
1137     _bytes [which][OtherType] += byte_size;
1138   }
1139   void print_stats(int ro_all, int rw_all, int mc_all);
1140 };
1141 
1142 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
1143   // Calculate size of data that was not allocated by Metaspace::allocate()
1144   MetaspaceSharedStats *stats = MetaspaceShared::stats();
1145 
1146   // symbols
1147   _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
1148   _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
1149 
1150   _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
1151   _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
1152 
1153   // strings
1154   _counts[RO][StringHashentryType] = stats->string.hashentry_count;
1155   _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
1156 
1157   _counts[RO][StringBucketType] = stats->string.bucket_count;
1158   _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
1159 
1160   // TODO: count things like dictionary, vtable, etc
1161   _bytes[RW][OtherType] += mc_all;
1162   rw_all += mc_all; // mc is mapped Read/Write
1163 
1164   // prevent divide-by-zero
1165   if (ro_all < 1) {
1166     ro_all = 1;
1167   }
1168   if (rw_all < 1) {
1169     rw_all = 1;
1170   }
1171 
1172   int all_ro_count = 0;
1173   int all_ro_bytes = 0;
1174   int all_rw_count = 0;
1175   int all_rw_bytes = 0;
1176 
1177 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
1178 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
1179   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
1180   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
1181 
1182   LogMessage(cds) msg;
1183 
1184   msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
1185   msg.debug("%s", hdr);
1186   msg.debug("%s", sep);
1187   for (int type = 0; type < int(_number_of_types); type ++) {
1188     const char *name = type_name((Type)type);
1189     int ro_count = _counts[RO][type];
1190     int ro_bytes = _bytes [RO][type];
1191     int rw_count = _counts[RW][type];
1192     int rw_bytes = _bytes [RW][type];
1193     int count = ro_count + rw_count;
1194     int bytes = ro_bytes + rw_bytes;
1195 
1196     double ro_perc = percent_of(ro_bytes, ro_all);
1197     double rw_perc = percent_of(rw_bytes, rw_all);
1198     double perc    = percent_of(bytes, ro_all + rw_all);
1199 
1200     msg.debug(fmt_stats, name,
1201                          ro_count, ro_bytes, ro_perc,
1202                          rw_count, rw_bytes, rw_perc,
1203                          count, bytes, perc);
1204 
1205     all_ro_count += ro_count;
1206     all_ro_bytes += ro_bytes;
1207     all_rw_count += rw_count;
1208     all_rw_bytes += rw_bytes;
1209   }
1210 
1211   int all_count = all_ro_count + all_rw_count;
1212   int all_bytes = all_ro_bytes + all_rw_bytes;
1213 
1214   double all_ro_perc = percent_of(all_ro_bytes, ro_all);
1215   double all_rw_perc = percent_of(all_rw_bytes, rw_all);
1216   double all_perc    = percent_of(all_bytes, ro_all + rw_all);
1217 
1218   msg.debug("%s", sep);
1219   msg.debug(fmt_stats, "Total",
1220                        all_ro_count, all_ro_bytes, all_ro_perc,
1221                        all_rw_count, all_rw_bytes, all_rw_perc,
1222                        all_count, all_bytes, all_perc);
1223 
1224   assert(all_ro_bytes == ro_all, "everything should have been counted");
1225   assert(all_rw_bytes == rw_all, "everything should have been counted");
1226 
1227 #undef fmt_stats
1228 }
1229 
1230 // Populate the shared space.
1231 
1232 class VM_PopulateDumpSharedSpace: public VM_Operation {
1233 private:
1234   GrowableArray<MemRegion> *_closed_archive_heap_regions;
1235   GrowableArray<MemRegion> *_open_archive_heap_regions;
1236 
1237   GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps;
1238   GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps;
1239 
1240   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
1241   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
1242   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1243                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
1244   void dump_symbols();
1245   char* dump_read_only_tables();
1246   void print_class_stats();
1247   void print_region_stats(FileMapInfo* map_info);
1248   void print_bitmap_region_stats(size_t size, size_t total_size);
1249   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1250                                const char *name, size_t total_size);
1251   void relocate_to_requested_base_address(CHeapBitMap* ptrmap);
1252 
1253 public:
1254 
1255   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1256   void doit();   // outline because gdb sucks
1257   bool allow_nested_vm_operations() const { return true; }
1258 }; // class VM_PopulateDumpSharedSpace
1259 
1260 // ArchiveCompactor --
1261 //
1262 // This class is the central piece of shared archive compaction -- all metaspace data are
1263 // initially allocated outside of the shared regions. ArchiveCompactor copies the
1264 // metaspace data into their final location in the shared regions.
1265 
1266 class ArchiveCompactor : AllStatic {
1267   static const int INITIAL_TABLE_SIZE = 8087;
1268   static const int MAX_TABLE_SIZE     = 1000000;
1269 
1270   static DumpAllocStats* _alloc_stats;
1271 
1272   typedef KVHashtable<address, address, mtInternal> RelocationTable;
1273   static RelocationTable* _new_loc_table;
1274 
1275 public:
1276   static void initialize() {
1277     _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
1278     _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE);
1279   }
1280   static DumpAllocStats* alloc_stats() {
1281     return _alloc_stats;
1282   }
1283 
1284   // Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
1285   // outside of ArchiveCompactor::allocate(). These are usually for misc tables
1286   // that are allocated in the RO space.
1287   class OtherROAllocMark {
1288     char* _oldtop;
1289   public:
1290     OtherROAllocMark() {
1291       _oldtop = _ro_region.top();
1292     }
1293     ~OtherROAllocMark() {
1294       char* newtop = _ro_region.top();
1295       ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
1296     }
1297   };
1298 
1299   static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
1300     address obj = ref->obj();
1301     int bytes = ref->size() * BytesPerWord;
1302     char* p;
1303     size_t alignment = BytesPerWord;
1304     char* oldtop;
1305     char* newtop;
1306 
1307     if (read_only) {
1308       oldtop = _ro_region.top();
1309       p = _ro_region.allocate(bytes, alignment);
1310       newtop = _ro_region.top();
1311     } else {
1312       oldtop = _rw_region.top();
1313       if (ref->msotype() == MetaspaceObj::ClassType) {
1314         // Save a pointer immediate in front of an InstanceKlass, so
1315         // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
1316         // without building another hashtable. See RunTimeSharedClassInfo::get_for()
1317         // in systemDictionaryShared.cpp.
1318         Klass* klass = (Klass*)obj;
1319         if (klass->is_instance_klass()) {
1320           SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
1321           _rw_region.allocate(sizeof(address), BytesPerWord);
1322         }
1323       }
1324       p = _rw_region.allocate(bytes, alignment);
1325       newtop = _rw_region.top();
1326     }
1327     memcpy(p, obj, bytes);
1328 
1329     intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)p);
1330     if (archived_vtable != NULL) {
1331       *(address*)p = (address)archived_vtable;
1332       ArchivePtrMarker::mark_pointer((address*)p);
1333     }
1334 
1335     assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once");
1336     _new_loc_table->add(obj, (address)p);
1337     log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
1338     if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) {
1339       log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size());
1340     }
1341     _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
1342   }
1343 
1344   static address get_new_loc(MetaspaceClosure::Ref* ref) {
1345     address* pp = _new_loc_table->lookup(ref->obj());
1346     assert(pp != NULL, "must be");
1347     return *pp;
1348   }
1349 
1350 private:
1351   // Makes a shallow copy of visited MetaspaceObj's
1352   class ShallowCopier: public UniqueMetaspaceClosure {
1353     bool _read_only;
1354   public:
1355     ShallowCopier(bool read_only) : _read_only(read_only) {}
1356 
1357     virtual bool do_unique_ref(Ref* ref, bool read_only) {
1358       if (read_only == _read_only) {
1359         allocate(ref, read_only);
1360       }
1361       return true; // recurse into ref.obj()
1362     }
1363   };
1364 
1365   // Relocate embedded pointers within a MetaspaceObj's shallow copy
1366   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
1367   public:
1368     virtual bool do_unique_ref(Ref* ref, bool read_only) {
1369       address new_loc = get_new_loc(ref);
1370       RefRelocator refer;
1371       ref->metaspace_pointers_do_at(&refer, new_loc);
1372       return true; // recurse into ref.obj()
1373     }
1374     virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
1375       assert(type == _method_entry_ref, "only special type allowed for now");
1376       address obj = ref->obj();
1377       address new_obj = get_new_loc(ref);
1378       size_t offset = pointer_delta(p, obj,  sizeof(u1));
1379       intptr_t* new_p = (intptr_t*)(new_obj + offset);
1380       assert(*p == *new_p, "must be a copy");
1381       ArchivePtrMarker::mark_pointer((address*)new_p);
1382     }
1383   };
1384 
1385   // Relocate a reference to point to its shallow copy
1386   class RefRelocator: public MetaspaceClosure {
1387   public:
1388     virtual bool do_ref(Ref* ref, bool read_only) {
1389       if (ref->not_null()) {
1390         ref->update(get_new_loc(ref));
1391         ArchivePtrMarker::mark_pointer(ref->addr());
1392       }
1393       return false; // Do not recurse.
1394     }
1395   };
1396 
1397 #ifdef ASSERT
1398   class IsRefInArchiveChecker: public MetaspaceClosure {
1399   public:
1400     virtual bool do_ref(Ref* ref, bool read_only) {
1401       if (ref->not_null()) {
1402         char* obj = (char*)ref->obj();
1403         assert(_ro_region.contains(obj) || _rw_region.contains(obj),
1404                "must be relocated to point to CDS archive");
1405       }
1406       return false; // Do not recurse.
1407     }
1408   };
1409 #endif
1410 
1411 public:
1412   static void copy_and_compact() {
1413     ResourceMark rm;
1414 
1415     log_info(cds)("Scanning all metaspace objects ... ");
1416     {
1417       // allocate and shallow-copy RW objects, immediately following the MC region
1418       log_info(cds)("Allocating RW objects ... ");
1419       _mc_region.pack(&_rw_region);
1420 
1421       ResourceMark rm;
1422       ShallowCopier rw_copier(false);
1423       iterate_roots(&rw_copier);
1424 
1425 #if INCLUDE_CDS_JAVA_HEAP
1426       // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
1427       char* start = _rw_region.top();
1428       ClassLoaderDataShared::allocate_archived_tables();
1429       ArchiveCompactor::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false);
1430 #endif
1431     }
1432     {
1433       // allocate and shallow-copy of RO object, immediately following the RW region
1434       log_info(cds)("Allocating RO objects ... ");
1435       _rw_region.pack(&_ro_region);
1436 
1437       ResourceMark rm;
1438       ShallowCopier ro_copier(true);
1439       iterate_roots(&ro_copier);
1440 #if INCLUDE_CDS_JAVA_HEAP
1441       char* start = _ro_region.top();
1442       ClassLoaderDataShared::init_archived_tables();
1443       ArchiveCompactor::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true);
1444 #endif
1445     }
1446     {
1447       log_info(cds)("Relocating embedded pointers ... ");
1448       ResourceMark rm;
1449       ShallowCopyEmbeddedRefRelocator emb_reloc;
1450       iterate_roots(&emb_reloc);
1451     }
1452     {
1453       log_info(cds)("Relocating external roots ... ");
1454       ResourceMark rm;
1455       RefRelocator ext_reloc;
1456       iterate_roots(&ext_reloc);
1457     }
1458     {
1459       log_info(cds)("Fixing symbol identity hash ... ");
1460       os::init_random(0x12345678);
1461       GrowableArray<Symbol*>* all_symbols = MetaspaceShared::collected_symbols();
1462       all_symbols->sort(compare_symbols_by_address);
1463       for (int i = 0; i < all_symbols->length(); i++) {
1464         assert(all_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
1465         all_symbols->at(i)->update_identity_hash();
1466       }
1467     }
1468 #ifdef ASSERT
1469     {
1470       log_info(cds)("Verifying external roots ... ");
1471       ResourceMark rm;
1472       IsRefInArchiveChecker checker;
1473       iterate_roots(&checker);
1474     }
1475 #endif
1476   }
1477 
1478   // We must relocate the System::_well_known_klasses only after we have copied the
1479   // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1480   // old objects which assert that their klass is the original klass.
1481   static void relocate_well_known_klasses() {
1482     {
1483       log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... ");
1484       ResourceMark rm;
1485       RefRelocator ext_reloc;
1486       SystemDictionary::well_known_klasses_do(&ext_reloc);
1487     }
1488     // NOTE: after this point, we shouldn't have any globals that can reach the old
1489     // objects.
1490 
1491     // We cannot use any of the objects in the heap anymore (except for the
1492     // shared strings) because their headers no longer point to valid Klasses.
1493   }
1494 
1495   static void iterate_roots(MetaspaceClosure* it) {
1496     // To ensure deterministic contents in the archive, we just need to ensure that
1497     // we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
1498     // the MetsapceObjs are located originally, as they are copied sequentially into
1499     // the archive during the iteration.
1500     //
1501     // The only issue here is that the symbol table and the system directories may be
1502     // randomly ordered, so we copy the symbols and klasses into two arrays and sort
1503     // them deterministically.
1504     //
1505     // During -Xshare:dump, the order of Symbol creation is strictly determined by
1506     // the SharedClassListFile (class loading is done in a single thread and the JIT
1507     // is disabled). Also, Symbols are allocated in monotonically increasing addresses
1508     // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
1509     // ascending address order, we ensure that all Symbols are copied into deterministic
1510     // locations in the archive.
1511     GrowableArray<Symbol*>* symbols = _global_symbol_objects;
1512     for (int i = 0; i < symbols->length(); i++) {
1513       it->push(symbols->adr_at(i));
1514     }
1515     if (_global_klass_objects != NULL) {
1516       // Need to fix up the pointers
1517       for (int i = 0; i < _global_klass_objects->length(); i++) {
1518         // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1519         it->push(_global_klass_objects->adr_at(i));
1520       }
1521     }
1522     FileMapInfo::metaspace_pointers_do(it, false);
1523     SystemDictionaryShared::dumptime_classes_do(it);
1524     Universe::metaspace_pointers_do(it);
1525     SymbolTable::metaspace_pointers_do(it);
1526     vmSymbols::metaspace_pointers_do(it);
1527 
1528     it->finish();
1529   }
1530 
1531   static Klass* get_relocated_klass(Klass* orig_klass) {
1532     assert(DumpSharedSpaces, "dump time only");
1533     address* pp = _new_loc_table->lookup((address)orig_klass);
1534     assert(pp != NULL, "must be");
1535     Klass* klass = (Klass*)(*pp);
1536     assert(klass->is_klass(), "must be");
1537     return klass;
1538   }
1539 
1540   static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
1541     assert(DumpSharedSpaces, "dump time only");
1542     address* pp = _new_loc_table->lookup((address)orig_symbol);
1543     assert(pp != NULL, "must be");
1544     return (Symbol*)(*pp);
1545   }
1546 };
1547 
1548 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1549 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1550 
1551 void VM_PopulateDumpSharedSpace::dump_symbols() {
1552   log_info(cds)("Dumping symbol table ...");
1553 
1554   NOT_PRODUCT(SymbolTable::verify());
1555   SymbolTable::write_to_archive();
1556 }
1557 
1558 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1559   ArchiveCompactor::OtherROAllocMark mark;
1560 
1561   log_info(cds)("Removing java_mirror ... ");
1562   if (!HeapShared::is_heap_object_archiving_allowed()) {
1563     Universe::clear_basic_type_mirrors();
1564   }
1565   remove_java_mirror_in_classes();
1566   log_info(cds)("done. ");
1567 
1568   SystemDictionaryShared::write_to_archive();
1569 
1570   // Write the other data to the output array.
1571   char* start = _ro_region.top();
1572   WriteClosure wc(&_ro_region);
1573   MetaspaceShared::serialize(&wc);
1574 
1575   // Write the bitmaps for patching the archive heap regions
1576   _closed_archive_heap_oopmaps = NULL;
1577   _open_archive_heap_oopmaps = NULL;
1578   dump_archive_heap_oopmaps();
1579 
1580   return start;
1581 }
1582 
1583 void VM_PopulateDumpSharedSpace::print_class_stats() {
1584   log_info(cds)("Number of classes %d", _global_klass_objects->length());
1585   {
1586     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1587     for (int i = 0; i < _global_klass_objects->length(); i++) {
1588       Klass* k = _global_klass_objects->at(i);
1589       if (k->is_instance_klass()) {
1590         num_inst ++;
1591       } else if (k->is_objArray_klass()) {
1592         num_obj_array ++;
1593       } else {
1594         assert(k->is_typeArray_klass(), "sanity");
1595         num_type_array ++;
1596       }
1597     }
1598     log_info(cds)("    instance classes   = %5d", num_inst);
1599     log_info(cds)("    obj array classes  = %5d", num_obj_array);
1600     log_info(cds)("    type array classes = %5d", num_type_array);
1601   }
1602 }
1603 
1604 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) {
1605   intx addr_delta = MetaspaceShared::final_delta();
1606   if (addr_delta == 0) {
1607     ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top());
1608   } else {
1609     // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR).
1610     // This means that the current content of the archive is based on a random
1611     // address. Let's relocate all the pointers, so that it can be mapped to
1612     // MetaspaceShared::requested_base_address() without runtime relocation.
1613     //
1614     // Note: both the base and dynamic archive are written with
1615     // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
1616 
1617     // Patch all pointers that are marked by ptrmap within this region,
1618     // where we have just dumped all the metaspace data.
1619     address patch_base = (address)SharedBaseAddress;
1620     address patch_end  = (address)_ro_region.top();
1621     size_t size = patch_end - patch_base;
1622 
1623     // the current value of the pointers to be patched must be within this
1624     // range (i.e., must point to valid metaspace objects)
1625     address valid_old_base = patch_base;
1626     address valid_old_end  = patch_end;
1627 
1628     // after patching, the pointers must point inside this range
1629     // (the requested location of the archive, as mapped at runtime).
1630     address valid_new_base = (address)MetaspaceShared::requested_base_address();
1631     address valid_new_end  = valid_new_base + size;
1632 
1633     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
1634                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
1635                    p2i(valid_new_base), p2i(valid_new_end));
1636 
1637     SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
1638                                       valid_new_base, valid_new_end, addr_delta, ptrmap);
1639     ptrmap->iterate(&patcher);
1640     ArchivePtrMarker::compact(patcher.max_non_null_offset());
1641   }
1642 }
1643 
1644 void VM_PopulateDumpSharedSpace::doit() {
1645   HeapShared::run_full_gc_in_vm_thread();
1646   CHeapBitMap ptrmap;
1647   MetaspaceShared::initialize_ptr_marker(&ptrmap);
1648 
1649   // We should no longer allocate anything from the metaspace, so that:
1650   //
1651   // (1) Metaspace::allocate might trigger GC if we have run out of
1652   //     committed metaspace, but we can't GC because we're running
1653   //     in the VM thread.
1654   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
1655   Metaspace::freeze();
1656   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
1657 
1658   Thread* THREAD = VMThread::vm_thread();
1659 
1660   FileMapInfo::check_nonempty_dir_in_shared_path_table();
1661 
1662   NOT_PRODUCT(SystemDictionary::verify();)
1663   // The following guarantee is meant to ensure that no loader constraints
1664   // exist yet, since the constraints table is not shared.  This becomes
1665   // more important now that we don't re-initialize vtables/itables for
1666   // shared classes at runtime, where constraints were previously created.
1667   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1668             "loader constraints are not saved");
1669   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1670           "placeholders are not saved");
1671 
1672   // At this point, many classes have been loaded.
1673   // Gather systemDictionary classes in a global array and do everything to
1674   // that so we don't have to walk the SystemDictionary again.
1675   SystemDictionaryShared::check_excluded_classes();
1676   _global_klass_objects = new GrowableArray<Klass*>(1000);
1677   CollectClassesClosure collect_classes;
1678   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1679   _global_klass_objects->sort(global_klass_compare);
1680 
1681   print_class_stats();
1682 
1683   // Ensure the ConstMethods won't be modified at run-time
1684   log_info(cds)("Updating ConstMethods ... ");
1685   rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD);
1686   log_info(cds)("done. ");
1687 
1688   // Remove all references outside the metadata
1689   log_info(cds)("Removing unshareable information ... ");
1690   remove_unshareable_in_classes();
1691   log_info(cds)("done. ");
1692 
1693   MetaspaceShared::allocate_cloned_cpp_vtptrs();
1694   char* cloned_vtables = _mc_region.top();
1695   MetaspaceShared::allocate_cpp_vtable_clones();
1696 
1697   ArchiveCompactor::initialize();
1698   ArchiveCompactor::copy_and_compact();
1699 
1700   dump_symbols();
1701 
1702   // Dump supported java heap objects
1703   _closed_archive_heap_regions = NULL;
1704   _open_archive_heap_regions = NULL;
1705   dump_java_heap_objects();
1706 
1707   ArchiveCompactor::relocate_well_known_klasses();
1708 
1709   char* serialized_data = dump_read_only_tables();
1710   _ro_region.pack();
1711 
1712   // The vtable clones contain addresses of the current process.
1713   // We don't want to write these addresses into the archive. Same for i2i buffer.
1714   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1715   memset(MetaspaceShared::i2i_entry_code_buffers(), 0,
1716          MetaspaceShared::i2i_entry_code_buffers_size());
1717 
1718   // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
1719   // without runtime relocation.
1720   relocate_to_requested_base_address(&ptrmap);
1721 
1722   // Create and write the archive file that maps the shared spaces.
1723 
1724   FileMapInfo* mapinfo = new FileMapInfo(true);
1725   mapinfo->populate_header(os::vm_allocation_granularity());
1726   mapinfo->set_serialized_data(serialized_data);
1727   mapinfo->set_cloned_vtables(cloned_vtables);
1728   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
1729                                       MetaspaceShared::i2i_entry_code_buffers_size());
1730   mapinfo->open_for_write();
1731   MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps);
1732   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
1733                                         _closed_archive_heap_regions,
1734                                         _closed_archive_heap_oopmaps,
1735                                         MetaspaceShared::first_closed_archive_heap_region,
1736                                         MetaspaceShared::max_closed_archive_heap_region);
1737   _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1738                                         _open_archive_heap_regions,
1739                                         _open_archive_heap_oopmaps,
1740                                         MetaspaceShared::first_open_archive_heap_region,
1741                                         MetaspaceShared::max_open_archive_heap_region);
1742 
1743   mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
1744   mapinfo->set_header_crc(mapinfo->compute_header_crc());
1745   mapinfo->write_header();
1746   print_region_stats(mapinfo);
1747   mapinfo->close();
1748 
1749   if (log_is_enabled(Info, cds)) {
1750     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1751                                                  int(_mc_region.used()));
1752   }
1753 
1754   if (PrintSystemDictionaryAtExit) {
1755     SystemDictionary::print();
1756   }
1757 
1758   if (AllowArchivingWithJavaAgent) {
1759     warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
1760             "for testing purposes only and should not be used in a production environment");
1761   }
1762 
1763   // There may be other pending VM operations that operate on the InstanceKlasses,
1764   // which will fail because InstanceKlasses::remove_unshareable_info()
1765   // has been called. Forget these operations and exit the VM directly.
1766   vm_direct_exit(0);
1767 }
1768 
1769 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
1770   // Print statistics of all the regions
1771   const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used();
1772   const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned();
1773   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
1774                                 _mc_region.reserved()  +
1775                                 bitmap_reserved +
1776                                 _total_closed_archive_region_size +
1777                                 _total_open_archive_region_size;
1778   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
1779                              _mc_region.used()  +
1780                              bitmap_used +
1781                              _total_closed_archive_region_size +
1782                              _total_open_archive_region_size;
1783   const double total_u_perc = percent_of(total_bytes, total_reserved);
1784 
1785   _mc_region.print(total_reserved);
1786   _rw_region.print(total_reserved);
1787   _ro_region.print(total_reserved);
1788   print_bitmap_region_stats(bitmap_used, total_reserved);
1789   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
1790   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
1791 
1792   log_debug(cds)("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1793                  total_bytes, total_reserved, total_u_perc);
1794 }
1795 
1796 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
1797   log_debug(cds)("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
1798                  size, size/double(total_size)*100.0, size);
1799 }
1800 
1801 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1802                                                          const char *name, size_t total_size) {
1803   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1804   for (int i = 0; i < arr_len; i++) {
1805       char* start = (char*)heap_mem->at(i).start();
1806       size_t size = heap_mem->at(i).byte_size();
1807       char* top = start + size;
1808       log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1809                      name, i, size, size/double(total_size)*100.0, size, p2i(start));
1810 
1811   }
1812 }
1813 
1814 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo,
1815                                                  GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps,
1816                                                  GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) {
1817   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
1818   // MetaspaceShared::n_regions (internal to hotspot).
1819   assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
1820 
1821   // mc contains the trampoline code for method entries, which are patched at run time,
1822   // so it needs to be read/write.
1823   write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1824   write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1825   write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1826   mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps);
1827 }
1828 
1829 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1830   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1831 }
1832 
1833 // Update a Java object to point its Klass* to the new location after
1834 // shared archive has been compacted.
1835 void MetaspaceShared::relocate_klass_ptr(oop o) {
1836   assert(DumpSharedSpaces, "sanity");
1837   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1838   o->set_klass(k);
1839 }
1840 
1841 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
1842   assert(DumpSharedSpaces, "sanity");
1843   k = ArchiveCompactor::get_relocated_klass(k);
1844   if (is_final) {
1845     k = (Klass*)(address(k) + final_delta());
1846   }
1847   return k;
1848 }
1849 
1850 Symbol* MetaspaceShared::get_relocated_symbol(Symbol* orig_symbol) {
1851   return ArchiveCompactor::get_relocated_symbol(orig_symbol);
1852 }
1853 
1854 class LinkSharedClassesClosure : public KlassClosure {
1855   Thread* THREAD;
1856   bool    _made_progress;
1857  public:
1858   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1859 
1860   void reset()               { _made_progress = false; }
1861   bool made_progress() const { return _made_progress; }
1862 
1863   void do_klass(Klass* k) {
1864     if (k->is_instance_klass()) {
1865       InstanceKlass* ik = InstanceKlass::cast(k);
1866       // For dynamic CDS dump, only link classes loaded by the builtin class loaders.
1867       bool do_linking = DumpSharedSpaces ? true : !ik->is_shared_unregistered_class();
1868       if (do_linking) {
1869         // Link the class to cause the bytecodes to be rewritten and the
1870         // cpcache to be created. Class verification is done according
1871         // to -Xverify setting.
1872         _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1873         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1874 
1875         if (DumpSharedSpaces) {
1876           // The following function is used to resolve all Strings in the statically
1877           // dumped classes to archive all the Strings. The archive heap is not supported
1878           // for the dynamic archive.
1879           ik->constants()->resolve_class_constants(THREAD);
1880         }
1881       }
1882     }
1883   }
1884 };
1885 
1886 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1887   // We need to iterate because verification may cause additional classes
1888   // to be loaded.
1889   LinkSharedClassesClosure link_closure(THREAD);
1890   do {
1891     link_closure.reset();
1892     ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure);
1893     guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1894   } while (link_closure.made_progress());
1895 }
1896 
1897 void MetaspaceShared::prepare_for_dumping() {
1898   Arguments::check_unsupported_dumping_properties();
1899   ClassLoader::initialize_shared_path();
1900 }
1901 
1902 // Preload classes from a list, populate the shared spaces and dump to a
1903 // file.
1904 void MetaspaceShared::preload_and_dump(TRAPS) {
1905   { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1906     ResourceMark rm(THREAD);
1907     char class_list_path_str[JVM_MAXPATHLEN];
1908     // Preload classes to be shared.
1909     const char* class_list_path;
1910     if (SharedClassListFile == NULL) {
1911       // Construct the path to the class list (in jre/lib)
1912       // Walk up two directories from the location of the VM and
1913       // optionally tack on "lib" (depending on platform)
1914       os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1915       for (int i = 0; i < 3; i++) {
1916         char *end = strrchr(class_list_path_str, *os::file_separator());
1917         if (end != NULL) *end = '\0';
1918       }
1919       int class_list_path_len = (int)strlen(class_list_path_str);
1920       if (class_list_path_len >= 3) {
1921         if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1922           if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1923             jio_snprintf(class_list_path_str + class_list_path_len,
1924                          sizeof(class_list_path_str) - class_list_path_len,
1925                          "%slib", os::file_separator());
1926             class_list_path_len += 4;
1927           }
1928         }
1929       }
1930       if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1931         jio_snprintf(class_list_path_str + class_list_path_len,
1932                      sizeof(class_list_path_str) - class_list_path_len,
1933                      "%sclasslist", os::file_separator());
1934       }
1935       class_list_path = class_list_path_str;
1936     } else {
1937       class_list_path = SharedClassListFile;
1938     }
1939 
1940     log_info(cds)("Loading classes to share ...");
1941     _has_error_classes = false;
1942     int class_count = preload_classes(class_list_path, THREAD);
1943     if (ExtraSharedClassListFile) {
1944       class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1945     }
1946     log_info(cds)("Loading classes to share: done.");
1947 
1948     log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1949 
1950     if (SharedArchiveConfigFile) {
1951       log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile);
1952       read_extra_data(SharedArchiveConfigFile, THREAD);
1953     }
1954     log_info(cds)("Reading extra data: done.");
1955 
1956     HeapShared::init_subgraph_entry_fields(THREAD);
1957 
1958     // Rewrite and link classes
1959     log_info(cds)("Rewriting and linking classes ...");
1960 
1961     // Link any classes which got missed. This would happen if we have loaded classes that
1962     // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1963     // fails verification, all other interfaces that were not specified in the classlist but
1964     // are implemented by K are not verified.
1965     link_and_cleanup_shared_classes(CATCH);
1966     log_info(cds)("Rewriting and linking classes: done");
1967 
1968 #if INCLUDE_CDS_JAVA_HEAP
1969     if (use_full_module_graph()) {
1970       HeapShared::reset_archived_object_states(THREAD);
1971     }
1972 #endif
1973 
1974     VM_PopulateDumpSharedSpace op;
1975     MutexLocker ml(THREAD, HeapShared::is_heap_object_archiving_allowed() ?
1976                    Heap_lock : NULL);     // needed by HeapShared::run_gc()
1977     VMThread::execute(&op);
1978   }
1979 }
1980 
1981 
1982 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1983   ClassListParser parser(class_list_path);
1984   int class_count = 0;
1985 
1986   while (parser.parse_one_line()) {
1987     Klass* klass = parser.load_current_class(THREAD);
1988     if (HAS_PENDING_EXCEPTION) {
1989       if (klass == NULL &&
1990           (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
1991         // print a warning only when the pending exception is class not found
1992         log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name());
1993       }
1994       CLEAR_PENDING_EXCEPTION;
1995     }
1996     if (klass != NULL) {
1997       if (log_is_enabled(Trace, cds)) {
1998         ResourceMark rm(THREAD);
1999         log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
2000       }
2001 
2002       if (klass->is_instance_klass()) {
2003         InstanceKlass* ik = InstanceKlass::cast(klass);
2004 
2005         // Link the class to cause the bytecodes to be rewritten and the
2006         // cpcache to be created. The linking is done as soon as classes
2007         // are loaded in order that the related data structures (klass and
2008         // cpCache) are located together.
2009         try_link_class(ik, THREAD);
2010         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
2011       }
2012 
2013       class_count++;
2014     }
2015   }
2016 
2017   return class_count;
2018 }
2019 
2020 // Returns true if the class's status has changed
2021 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
2022   Arguments::assert_is_dumping_archive();
2023   if (ik->init_state() < InstanceKlass::linked &&
2024       !SystemDictionaryShared::has_class_failed_verification(ik)) {
2025     bool saved = BytecodeVerificationLocal;
2026     if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) {
2027       // The verification decision is based on BytecodeVerificationRemote
2028       // for non-system classes. Since we are using the NULL classloader
2029       // to load non-system classes for customized class loaders during dumping,
2030       // we need to temporarily change BytecodeVerificationLocal to be the same as
2031       // BytecodeVerificationRemote. Note this can cause the parent system
2032       // classes also being verified. The extra overhead is acceptable during
2033       // dumping.
2034       BytecodeVerificationLocal = BytecodeVerificationRemote;
2035     }
2036     ik->link_class(THREAD);
2037     if (HAS_PENDING_EXCEPTION) {
2038       ResourceMark rm(THREAD);
2039       log_warning(cds)("Preload Warning: Verification failed for %s",
2040                     ik->external_name());
2041       CLEAR_PENDING_EXCEPTION;
2042       SystemDictionaryShared::set_class_has_failed_verification(ik);
2043       _has_error_classes = true;
2044     }
2045     BytecodeVerificationLocal = saved;
2046     return true;
2047   } else {
2048     return false;
2049   }
2050 }
2051 
2052 #if INCLUDE_CDS_JAVA_HEAP
2053 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
2054   // The closed and open archive heap space has maximum two regions.
2055   // See FileMapInfo::write_archive_heap_regions() for details.
2056   _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
2057   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
2058   HeapShared::archive_java_heap_objects(_closed_archive_heap_regions,
2059                                         _open_archive_heap_regions);
2060   ArchiveCompactor::OtherROAllocMark mark;
2061   HeapShared::write_subgraph_info_table();
2062 }
2063 
2064 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
2065   if (HeapShared::is_heap_object_archiving_allowed()) {
2066     _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
2067     dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
2068 
2069     _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
2070     dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
2071   }
2072 }
2073 
2074 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
2075                                                            GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
2076   for (int i=0; i<regions->length(); i++) {
2077     ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
2078     size_t size_in_bits = oopmap.size();
2079     size_t size_in_bytes = oopmap.size_in_bytes();
2080     uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
2081     oopmap.write_to(buffer, size_in_bytes);
2082     log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
2083                         INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
2084                         p2i(buffer), size_in_bytes,
2085                         p2i(regions->at(i).start()), regions->at(i).byte_size());
2086 
2087     ArchiveHeapOopmapInfo info;
2088     info._oopmap = (address)buffer;
2089     info._oopmap_size_in_bits = size_in_bits;
2090     info._oopmap_size_in_bytes = size_in_bytes;
2091     oopmaps->append(info);
2092   }
2093 }
2094 #endif // INCLUDE_CDS_JAVA_HEAP
2095 
2096 void ReadClosure::do_ptr(void** p) {
2097   assert(*p == NULL, "initializing previous initialized pointer.");
2098   intptr_t obj = nextPtr();
2099   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
2100          "hit tag while initializing ptrs.");
2101   *p = (void*)obj;
2102 }
2103 
2104 void ReadClosure::do_u4(u4* p) {
2105   intptr_t obj = nextPtr();
2106   *p = (u4)(uintx(obj));
2107 }
2108 
2109 void ReadClosure::do_bool(bool* p) {
2110   intptr_t obj = nextPtr();
2111   *p = (bool)(uintx(obj));
2112 }
2113 
2114 void ReadClosure::do_tag(int tag) {
2115   int old_tag;
2116   old_tag = (int)(intptr_t)nextPtr();
2117   // do_int(&old_tag);
2118   assert(tag == old_tag, "old tag doesn't match");
2119   FileMapInfo::assert_mark(tag == old_tag);
2120 }
2121 
2122 void ReadClosure::do_oop(oop *p) {
2123   narrowOop o = (narrowOop)nextPtr();
2124   if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
2125     *p = NULL;
2126   } else {
2127     assert(HeapShared::is_heap_object_archiving_allowed(),
2128            "Archived heap object is not allowed");
2129     assert(HeapShared::open_archive_heap_region_mapped(),
2130            "Open archive heap region is not mapped");
2131     *p = HeapShared::decode_from_archive(o);
2132   }
2133 }
2134 
2135 void ReadClosure::do_region(u_char* start, size_t size) {
2136   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
2137   assert(size % sizeof(intptr_t) == 0, "bad size");
2138   do_tag((int)size);
2139   while (size > 0) {
2140     *(intptr_t*)start = nextPtr();
2141     start += sizeof(intptr_t);
2142     size -= sizeof(intptr_t);
2143   }
2144 }
2145 
2146 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
2147   assert(base <= static_top && static_top <= top, "must be");
2148   _shared_metaspace_static_top = static_top;
2149   MetaspaceObj::set_shared_metaspace_range(base, top);
2150 }
2151 
2152 // Return true if given address is in the misc data region
2153 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
2154   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
2155 }
2156 
2157 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
2158   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
2159     return true;
2160   }
2161   return false;
2162 }
2163 
2164 bool MetaspaceShared::is_shared_dynamic(void* p) {
2165   if ((p < MetaspaceObj::shared_metaspace_top()) &&
2166       (p >= _shared_metaspace_static_top)) {
2167     return true;
2168   } else {
2169     return false;
2170   }
2171 }
2172 
2173 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
2174   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
2175   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
2176 
2177   FileMapInfo* static_mapinfo = open_static_archive();
2178   FileMapInfo* dynamic_mapinfo = NULL;
2179 
2180   if (static_mapinfo != NULL) {
2181     dynamic_mapinfo = open_dynamic_archive();
2182 
2183     // First try to map at the requested address
2184     result = map_archives(static_mapinfo, dynamic_mapinfo, true);
2185     if (result == MAP_ARCHIVE_MMAP_FAILURE) {
2186       // Mapping has failed (probably due to ASLR). Let's map at an address chosen
2187       // by the OS.
2188       log_info(cds)("Try to map archive(s) at an alternative address");
2189       result = map_archives(static_mapinfo, dynamic_mapinfo, false);
2190     }
2191   }
2192 
2193   if (result == MAP_ARCHIVE_SUCCESS) {
2194     bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
2195     char* cds_base = static_mapinfo->mapped_base();
2196     char* cds_end =  dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
2197     set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
2198     _relocation_delta = static_mapinfo->relocation_delta();
2199     if (dynamic_mapped) {
2200       FileMapInfo::set_shared_path_table(dynamic_mapinfo);
2201     } else {
2202       FileMapInfo::set_shared_path_table(static_mapinfo);
2203     }
2204     _requested_base_address = static_mapinfo->requested_base_address();
2205   } else {
2206     set_shared_metaspace_range(NULL, NULL, NULL);
2207     UseSharedSpaces = false;
2208     FileMapInfo::fail_continue("Unable to map shared spaces");
2209     if (PrintSharedArchiveAndExit) {
2210       vm_exit_during_initialization("Unable to use shared archive.");
2211     }
2212   }
2213 
2214   if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
2215     delete static_mapinfo;
2216   }
2217   if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
2218     delete dynamic_mapinfo;
2219   }
2220 }
2221 
2222 FileMapInfo* MetaspaceShared::open_static_archive() {
2223   FileMapInfo* mapinfo = new FileMapInfo(true);
2224   if (!mapinfo->initialize()) {
2225     delete(mapinfo);
2226     return NULL;
2227   }
2228   return mapinfo;
2229 }
2230 
2231 FileMapInfo* MetaspaceShared::open_dynamic_archive() {
2232   if (DynamicDumpSharedSpaces) {
2233     return NULL;
2234   }
2235   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
2236     return NULL;
2237   }
2238 
2239   FileMapInfo* mapinfo = new FileMapInfo(false);
2240   if (!mapinfo->initialize()) {
2241     delete(mapinfo);
2242     return NULL;
2243   }
2244   return mapinfo;
2245 }
2246 
2247 // use_requested_addr:
2248 //  true  = map at FileMapHeader::_requested_base_address
2249 //  false = map at an alternative address picked by OS.
2250 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
2251                                                bool use_requested_addr) {
2252   if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) {
2253     log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address.");
2254     return MAP_ARCHIVE_MMAP_FAILURE;
2255   }
2256 
2257   PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
2258       // For product build only -- this is for benchmarking the cost of doing relocation.
2259       // For debug builds, the check is done below, after reserving the space, for better test coverage
2260       // (see comment below).
2261       log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
2262       return MAP_ARCHIVE_MMAP_FAILURE;
2263     });
2264 
2265   if (ArchiveRelocationMode == 2 && !use_requested_addr) {
2266     log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address");
2267     return MAP_ARCHIVE_MMAP_FAILURE;
2268   };
2269 
2270   if (dynamic_mapinfo != NULL) {
2271     // Ensure that the OS won't be able to allocate new memory spaces between the two
2272     // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
2273     assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
2274   }
2275 
2276   ReservedSpace archive_space_rs, class_space_rs;
2277   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
2278   char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
2279                                                                  use_requested_addr, archive_space_rs,
2280                                                                  class_space_rs);
2281   if (mapped_base_address == NULL) {
2282     result = MAP_ARCHIVE_MMAP_FAILURE;
2283     log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
2284   } else {
2285 
2286 #ifdef ASSERT
2287     // Some sanity checks after reserving address spaces for archives
2288     //  and class space.
2289     assert(archive_space_rs.is_reserved(), "Sanity");
2290     if (Metaspace::using_class_space()) {
2291       // Class space must closely follow the archive space. Both spaces
2292       //  must be aligned correctly.
2293       assert(class_space_rs.is_reserved(),
2294              "A class space should have been reserved");
2295       assert(class_space_rs.base() >= archive_space_rs.end(),
2296              "class space should follow the cds archive space");
2297       assert(is_aligned(archive_space_rs.base(),
2298                         MetaspaceShared::reserved_space_alignment()),
2299              "Archive space misaligned");
2300       assert(is_aligned(class_space_rs.base(),
2301                         Metaspace::reserve_alignment()),
2302              "class space misaligned");
2303     }
2304 #endif // ASSERT
2305 
2306     log_debug(cds)("Reserved archive_space_rs     [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
2307                    p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
2308     log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
2309                    p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
2310 
2311     if (MetaspaceShared::use_windows_memory_mapping()) {
2312       // We have now reserved address space for the archives, and will map in
2313       //  the archive files into this space.
2314       //
2315       // Special handling for Windows: on Windows we cannot map a file view
2316       //  into an existing memory mapping. So, we unmap the address range we
2317       //  just reserved again, which will make it available for mapping the
2318       //  archives.
2319       // Reserving this range has not been for naught however since it makes
2320       //  us reasonably sure the address range is available.
2321       //
2322       // But still it may fail, since between unmapping the range and mapping
2323       //  in the archive someone else may grab the address space. Therefore
2324       //  there is a fallback in FileMap::map_region() where we just read in
2325       //  the archive files sequentially instead of mapping it in. We couple
2326       //  this with use_requested_addr, since we're going to patch all the
2327       //  pointers anyway so there's no benefit to mmap.
2328       if (use_requested_addr) {
2329         log_info(cds)("Windows mmap workaround: releasing archive space.");
2330         archive_space_rs.release();
2331       }
2332     }
2333     MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
2334     MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
2335                                      map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
2336 
2337     DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
2338       // This is for simulating mmap failures at the requested address. In
2339       //  debug builds, we do it here (after all archives have possibly been
2340       //  mapped), so we can thoroughly test the code for failure handling
2341       //  (releasing all allocated resource, etc).
2342       log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
2343       if (static_result == MAP_ARCHIVE_SUCCESS) {
2344         static_result = MAP_ARCHIVE_MMAP_FAILURE;
2345       }
2346       if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
2347         dynamic_result = MAP_ARCHIVE_MMAP_FAILURE;
2348       }
2349     });
2350 
2351     if (static_result == MAP_ARCHIVE_SUCCESS) {
2352       if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
2353         result = MAP_ARCHIVE_SUCCESS;
2354       } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
2355         assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
2356         // No need to retry mapping the dynamic archive again, as it will never succeed
2357         // (bad file, etc) -- just keep the base archive.
2358         log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
2359                                   dynamic_mapinfo->full_path());
2360         result = MAP_ARCHIVE_SUCCESS;
2361         // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no
2362         // easy API to do that right now.
2363       } else {
2364         result = MAP_ARCHIVE_MMAP_FAILURE;
2365       }
2366     } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
2367       result = MAP_ARCHIVE_OTHER_FAILURE;
2368     } else {
2369       result = MAP_ARCHIVE_MMAP_FAILURE;
2370     }
2371   }
2372 
2373   if (result == MAP_ARCHIVE_SUCCESS) {
2374     SharedBaseAddress = (size_t)mapped_base_address;
2375     LP64_ONLY({
2376         if (Metaspace::using_class_space()) {
2377           // Set up ccs in metaspace.
2378           Metaspace::initialize_class_space(class_space_rs);
2379 
2380           // Set up compressed Klass pointer encoding: the encoding range must
2381           //  cover both archive and class space.
2382           address cds_base = (address)static_mapinfo->mapped_base();
2383           address ccs_end = (address)class_space_rs.end();
2384           CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base);
2385 
2386           // map_heap_regions() compares the current narrow oop and klass encodings
2387           // with the archived ones, so it must be done after all encodings are determined.
2388           static_mapinfo->map_heap_regions();
2389         }
2390       });
2391     log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
2392     log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled");
2393   } else {
2394     unmap_archive(static_mapinfo);
2395     unmap_archive(dynamic_mapinfo);
2396     release_reserved_spaces(archive_space_rs, class_space_rs);
2397   }
2398 
2399   return result;
2400 }
2401 
2402 
2403 // This will reserve two address spaces suitable to house Klass structures, one
2404 //  for the cds archives (static archive and optionally dynamic archive) and
2405 //  optionally one move for ccs.
2406 //
2407 // Since both spaces must fall within the compressed class pointer encoding
2408 //  range, they are allocated close to each other.
2409 //
2410 // Space for archives will be reserved first, followed by a potential gap,
2411 //  followed by the space for ccs:
2412 //
2413 // +-- Base address             A        B                     End
2414 // |                            |        |                      |
2415 // v                            v        v                      v
2416 // +-------------+--------------+        +----------------------+
2417 // | static arc  | [dyn. arch]  | [gap]  | compr. class space   |
2418 // +-------------+--------------+        +----------------------+
2419 //
2420 // (The gap may result from different alignment requirements between metaspace
2421 //  and CDS)
2422 //
2423 // If UseCompressedClassPointers is disabled, only one address space will be
2424 //  reserved:
2425 //
2426 // +-- Base address             End
2427 // |                            |
2428 // v                            v
2429 // +-------------+--------------+
2430 // | static arc  | [dyn. arch]  |
2431 // +-------------+--------------+
2432 //
2433 // Base address: If use_archive_base_addr address is true, the Base address is
2434 //  determined by the address stored in the static archive. If
2435 //  use_archive_base_addr address is false, this base address is determined
2436 //  by the platform.
2437 //
2438 // If UseCompressedClassPointers=1, the range encompassing both spaces will be
2439 //  suitable to en/decode narrow Klass pointers: the base will be valid for
2440 //  encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax.
2441 //
2442 // Return:
2443 //
2444 // - On success:
2445 //    - archive_space_rs will be reserved and large enough to host static and
2446 //      if needed dynamic archive: [Base, A).
2447 //      archive_space_rs.base and size will be aligned to CDS reserve
2448 //      granularity.
2449 //    - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will
2450 //      be reserved. Its start address will be aligned to metaspace reserve
2451 //      alignment, which may differ from CDS alignment. It will follow the cds
2452 //      archive space, close enough such that narrow class pointer encoding
2453 //      covers both spaces.
2454 //      If UseCompressedClassPointers=0, class_space_rs remains unreserved.
2455 // - On error: NULL is returned and the spaces remain unreserved.
2456 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
2457                                                           FileMapInfo* dynamic_mapinfo,
2458                                                           bool use_archive_base_addr,
2459                                                           ReservedSpace& archive_space_rs,
2460                                                           ReservedSpace& class_space_rs) {
2461 
2462   address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL);
2463   const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment();
2464 
2465   // Size and requested location of the archive_space_rs (for both static and dynamic archives)
2466   assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
2467   size_t archive_end_offset  = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
2468   size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
2469 
2470   // If a base address is given, it must have valid alignment and be suitable as encoding base.
2471   if (base_address != NULL) {
2472     assert(is_aligned(base_address, archive_space_alignment),
2473            "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
2474     if (Metaspace::using_class_space()) {
2475       assert(CompressedKlassPointers::is_valid_base(base_address),
2476              "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
2477     }
2478   }
2479 
2480   if (!Metaspace::using_class_space()) {
2481     // Get the simple case out of the way first:
2482     // no compressed class space, simple allocation.
2483     archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
2484                                      false /* bool large */, (char*)base_address);
2485     if (archive_space_rs.is_reserved()) {
2486       assert(base_address == NULL ||
2487              (address)archive_space_rs.base() == base_address, "Sanity");
2488       // Register archive space with NMT.
2489       MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
2490       return archive_space_rs.base();
2491     }
2492     return NULL;
2493   }
2494 
2495 #ifdef _LP64
2496 
2497   // Complex case: two spaces adjacent to each other, both to be addressable
2498   //  with narrow class pointers.
2499   // We reserve the whole range spanning both spaces, then split that range up.
2500 
2501   const size_t class_space_alignment = Metaspace::reserve_alignment();
2502 
2503   // To simplify matters, lets assume that metaspace alignment will always be
2504   //  equal or a multiple of archive alignment.
2505   assert(is_power_of_2(class_space_alignment) &&
2506                        is_power_of_2(archive_space_alignment) &&
2507                        class_space_alignment >= archive_space_alignment,
2508                        "Sanity");
2509 
2510   const size_t class_space_size = CompressedClassSpaceSize;
2511   assert(CompressedClassSpaceSize > 0 &&
2512          is_aligned(CompressedClassSpaceSize, class_space_alignment),
2513          "CompressedClassSpaceSize malformed: "
2514          SIZE_FORMAT, CompressedClassSpaceSize);
2515 
2516   const size_t ccs_begin_offset = align_up(base_address + archive_space_size,
2517                                            class_space_alignment) - base_address;
2518   const size_t gap_size = ccs_begin_offset - archive_space_size;
2519 
2520   const size_t total_range_size =
2521       align_up(archive_space_size + gap_size + class_space_size,
2522                os::vm_allocation_granularity());
2523 
2524   ReservedSpace total_rs;
2525   if (base_address != NULL) {
2526     // Reserve at the given archive base address, or not at all.
2527     total_rs = ReservedSpace(total_range_size, archive_space_alignment,
2528                              false /* bool large */, (char*) base_address);
2529   } else {
2530     // Reserve at any address, but leave it up to the platform to choose a good one.
2531     total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
2532   }
2533 
2534   if (!total_rs.is_reserved()) {
2535     return NULL;
2536   }
2537 
2538   // Paranoid checks:
2539   assert(base_address == NULL || (address)total_rs.base() == base_address,
2540          "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base()));
2541   assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity");
2542   assert(total_rs.size() == total_range_size, "Sanity");
2543   assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity");
2544 
2545   // Now split up the space into ccs and cds archive. For simplicity, just leave
2546   //  the gap reserved at the end of the archive space.
2547   archive_space_rs = total_rs.first_part(ccs_begin_offset,
2548                                          (size_t)os::vm_allocation_granularity(),
2549                                          /*split=*/true);
2550   class_space_rs = total_rs.last_part(ccs_begin_offset);
2551 
2552   assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity");
2553   assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity");
2554   assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity");
2555   assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity");
2556 
2557   // NMT: fix up the space tags
2558   MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
2559   MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
2560 
2561   return archive_space_rs.base();
2562 
2563 #else
2564   ShouldNotReachHere();
2565   return NULL;
2566 #endif
2567 
2568 }
2569 
2570 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs,
2571                                               ReservedSpace& class_space_rs) {
2572   if (archive_space_rs.is_reserved()) {
2573     log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
2574     archive_space_rs.release();
2575   }
2576   if (class_space_rs.is_reserved()) {
2577     log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
2578     class_space_rs.release();
2579   }
2580 }
2581 
2582 static int archive_regions[]  = {MetaspaceShared::mc,
2583                                  MetaspaceShared::rw,
2584                                  MetaspaceShared::ro};
2585 static int archive_regions_count  = 3;
2586 
2587 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
2588   assert(UseSharedSpaces, "must be runtime");
2589   if (mapinfo == NULL) {
2590     return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded.
2591   }
2592 
2593   mapinfo->set_is_mapped(false);
2594 
2595   if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) {
2596     log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT
2597                    " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity());
2598     return MAP_ARCHIVE_OTHER_FAILURE;
2599   }
2600 
2601   MapArchiveResult result =
2602     mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs);
2603 
2604   if (result != MAP_ARCHIVE_SUCCESS) {
2605     unmap_archive(mapinfo);
2606     return result;
2607   }
2608 
2609   if (!mapinfo->validate_shared_path_table()) {
2610     unmap_archive(mapinfo);
2611     return MAP_ARCHIVE_OTHER_FAILURE;
2612   }
2613 
2614   mapinfo->set_is_mapped(true);
2615   return MAP_ARCHIVE_SUCCESS;
2616 }
2617 
2618 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
2619   assert(UseSharedSpaces, "must be runtime");
2620   if (mapinfo != NULL) {
2621     mapinfo->unmap_regions(archive_regions, archive_regions_count);
2622     mapinfo->set_is_mapped(false);
2623   }
2624 }
2625 
2626 // Read the miscellaneous data from the shared file, and
2627 // serialize it out to its various destinations.
2628 
2629 void MetaspaceShared::initialize_shared_spaces() {
2630   FileMapInfo *static_mapinfo = FileMapInfo::current_info();
2631   _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
2632   _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
2633   char* buffer = static_mapinfo->cloned_vtables();
2634   clone_cpp_vtables((intptr_t*)buffer);
2635 
2636   // Verify various attributes of the archive, plus initialize the
2637   // shared string/symbol tables
2638   buffer = static_mapinfo->serialized_data();
2639   intptr_t* array = (intptr_t*)buffer;
2640   ReadClosure rc(&array);
2641   serialize(&rc);
2642 
2643   // Initialize the run-time symbol table.
2644   SymbolTable::create_table();
2645 
2646   static_mapinfo->patch_archived_heap_embedded_pointers();
2647 
2648   // Close the mapinfo file
2649   static_mapinfo->close();
2650 
2651   static_mapinfo->unmap_region(MetaspaceShared::bm);
2652 
2653   FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
2654   if (dynamic_mapinfo != NULL) {
2655     intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
2656     ReadClosure rc(&buffer);
2657     SymbolTable::serialize_shared_table_header(&rc, false);
2658     SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
2659     dynamic_mapinfo->close();
2660   }
2661 
2662   if (PrintSharedArchiveAndExit) {
2663     if (PrintSharedDictionary) {
2664       tty->print_cr("\nShared classes:\n");
2665       SystemDictionaryShared::print_on(tty);
2666     }
2667     if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
2668       tty->print_cr("archive is invalid");
2669       vm_exit(1);
2670     } else {
2671       tty->print_cr("archive is valid");
2672       vm_exit(0);
2673     }
2674   }
2675 }
2676 
2677 // JVM/TI RedefineClasses() support:
2678 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
2679   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2680 
2681   if (UseSharedSpaces) {
2682     // remap the shared readonly space to shared readwrite, private
2683     FileMapInfo* mapinfo = FileMapInfo::current_info();
2684     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
2685       return false;
2686     }
2687     if (FileMapInfo::dynamic_info() != NULL) {
2688       mapinfo = FileMapInfo::dynamic_info();
2689       if (!mapinfo->remap_shared_readonly_as_readwrite()) {
2690         return false;
2691       }
2692     }
2693     _remapped_readwrite = true;
2694   }
2695   return true;
2696 }
2697 
2698 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
2699   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
2700   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
2701   // or so.
2702   _mc_region.print_out_of_space_msg(name, needed_bytes);
2703   _rw_region.print_out_of_space_msg(name, needed_bytes);
2704   _ro_region.print_out_of_space_msg(name, needed_bytes);
2705 
2706   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
2707                                 "Please reduce the number of shared classes.");
2708 }
2709 
2710 // This is used to relocate the pointers so that the base archive can be mapped at
2711 // MetaspaceShared::requested_base_address() without runtime relocation.
2712 intx MetaspaceShared::final_delta() {
2713   return intx(MetaspaceShared::requested_base_address())  // We want the base archive to be mapped to here at runtime
2714        - intx(SharedBaseAddress);                         // .. but the base archive is mapped at here at dump time
2715 }
2716 
2717 bool MetaspaceShared::use_full_module_graph() {
2718   return _use_optimized_module_handling && _use_full_module_graph &&
2719     (UseSharedSpaces || DumpSharedSpaces) && HeapShared::is_heap_object_archiving_allowed();
2720 }
2721 
2722 void MetaspaceShared::print_on(outputStream* st) {
2723   if (UseSharedSpaces || DumpSharedSpaces) {
2724     st->print("CDS archive(s) mapped at: ");
2725     address base;
2726     address top;
2727     if (UseSharedSpaces) { // Runtime
2728       base = (address)MetaspaceObj::shared_metaspace_base();
2729       address static_top = (address)_shared_metaspace_static_top;
2730       top = (address)MetaspaceObj::shared_metaspace_top();
2731       st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
2732     } else if (DumpSharedSpaces) { // Dump Time
2733       base = (address)_shared_rs.base();
2734       top = (address)_shared_rs.end();
2735       st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top));
2736     }
2737     st->print("size " SIZE_FORMAT ", ", top - base);
2738     st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode);
2739   } else {
2740     st->print("CDS disabled.");
2741   }
2742   st->cr();
2743 }