1 /*
   2  * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/classLoaderDataShared.hpp"
  28 #include "classfile/classListParser.hpp"
  29 #include "classfile/classLoaderExt.hpp"
  30 #include "classfile/loaderConstraints.hpp"
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "classfile/placeholders.hpp"
  33 #include "classfile/symbolTable.hpp"
  34 #include "classfile/stringTable.hpp"
  35 #include "classfile/systemDictionary.hpp"
  36 #include "classfile/systemDictionaryShared.hpp"
  37 #include "code/codeCache.hpp"
  38 #include "interpreter/bytecodeStream.hpp"
  39 #include "interpreter/bytecodes.hpp"
  40 #include "logging/log.hpp"
  41 #include "logging/logMessage.hpp"
  42 #include "memory/archiveBuilder.hpp"
  43 #include "memory/archiveUtils.inline.hpp"
  44 #include "memory/cppVtables.hpp"
  45 #include "memory/dumpAllocStats.hpp"
  46 #include "memory/dynamicArchive.hpp"
  47 #include "memory/filemap.hpp"
  48 #include "memory/heapShared.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/metaspaceClosure.hpp"
  51 #include "memory/metaspaceShared.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/compressedOops.inline.hpp"
  55 #include "oops/instanceMirrorKlass.hpp"
  56 #include "oops/objArrayOop.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/oopHandle.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/os.hpp"
  61 #include "runtime/safepointVerifiers.hpp"
  62 #include "runtime/timerTrace.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "runtime/vmOperations.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/bitMap.inline.hpp"
  67 #include "utilities/ostream.hpp"
  68 #include "utilities/defaultStream.hpp"
  69 #include "utilities/hashtable.inline.hpp"
  70 #if INCLUDE_G1GC
  71 #include "gc/g1/g1CollectedHeap.hpp"
  72 #endif
  73 
  74 ReservedSpace MetaspaceShared::_shared_rs;
  75 VirtualSpace MetaspaceShared::_shared_vs;
  76 ReservedSpace MetaspaceShared::_symbol_rs;
  77 VirtualSpace MetaspaceShared::_symbol_vs;
  78 MetaspaceSharedStats MetaspaceShared::_stats;
  79 bool MetaspaceShared::_has_error_classes;
  80 bool MetaspaceShared::_archive_loading_failed = false;
  81 bool MetaspaceShared::_remapped_readwrite = false;
  82 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
  83 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
  84 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
  85 intx MetaspaceShared::_relocation_delta;
  86 char* MetaspaceShared::_requested_base_address;
  87 bool MetaspaceShared::_use_optimized_module_handling = true;
  88 bool MetaspaceShared::_use_full_module_graph = true;
  89 
  90 // The CDS archive is divided into the following regions:
  91 //     mc  - misc code (the method entry trampolines, c++ vtables)
  92 //     rw  - read-write metadata
  93 //     ro  - read-only metadata and read-only tables
  94 //
  95 //     ca0 - closed archive heap space #0
  96 //     ca1 - closed archive heap space #1 (may be empty)
  97 //     oa0 - open archive heap space #0
  98 //     oa1 - open archive heap space #1 (may be empty)
  99 //
 100 // The mc, rw, and ro regions are linearly allocated, starting from
 101 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions
 102 // are page-aligned, and there's no gap between any consecutive regions.
 103 //
 104 // These 3 regions are populated in the following steps:
 105 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 106 //     temporarily allocated outside of the shared regions. Only the method entry
 107 //     trampolines are written into the mc region.
 108 // [2] C++ vtables are copied into the mc region.
 109 // [3] ArchiveBuilder copies RW metadata into the rw region.
 110 // [4] ArchiveBuilder copies RO metadata into the ro region.
 111 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 112 //     are copied into the ro region as read-only tables.
 113 //
 114 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
 115 // Their layout is independent of the other 4 regions.
 116 
 117 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
 118 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 119 
 120 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
 121   first_space->init(&_shared_rs, &_shared_vs);
 122 }
 123 
 124 DumpRegion* MetaspaceShared::misc_code_dump_space() {
 125   return &_mc_region;
 126 }
 127 
 128 DumpRegion* MetaspaceShared::read_write_dump_space() {
 129   return &_rw_region;
 130 }
 131 
 132 DumpRegion* MetaspaceShared::read_only_dump_space() {
 133   return &_ro_region;
 134 }
 135 
 136 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
 137                                       ReservedSpace* rs) {
 138   current->pack(next);
 139 }
 140 
 141 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
 142   return _symbol_region.allocate(num_bytes);
 143 }
 144 
 145 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 146   return _mc_region.allocate(num_bytes);
 147 }
 148 
 149 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 150   return _ro_region.allocate(num_bytes);
 151 }
 152 
 153 char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) {
 154   return _rw_region.allocate(num_bytes);
 155 }
 156 
 157 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
 158 
 159 static bool shared_base_valid(char* shared_base) {
 160 #ifdef _LP64
 161   return CompressedKlassPointers::is_valid_base((address)shared_base);
 162 #else
 163   return true;
 164 #endif
 165 }
 166 
 167 static bool shared_base_too_high(char* shared_base, size_t cds_total) {
 168   if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) {
 169     // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
 170     // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around.
 171     return true;
 172   }
 173   if (max_uintx - uintx(shared_base) < uintx(cds_total)) {
 174     // The end of the archive will wrap around
 175     return true;
 176   }
 177 
 178   return false;
 179 }
 180 
 181 static char* compute_shared_base(size_t cds_total) {
 182   char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
 183   const char* err = NULL;
 184   if (shared_base_too_high(shared_base, cds_total)) {
 185     err = "too high";
 186   } else if (!shared_base_valid(shared_base)) {
 187     err = "invalid for this platform";
 188   }
 189   if (err) {
 190     log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT,
 191                      p2i((void*)SharedBaseAddress), err,
 192                      p2i((void*)Arguments::default_SharedBaseAddress()));
 193     SharedBaseAddress = Arguments::default_SharedBaseAddress();
 194     shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
 195   }
 196   assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity");
 197   return shared_base;
 198 }
 199 
 200 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 201   assert(DumpSharedSpaces, "should be called for dump time only");
 202 
 203   const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
 204 
 205 #ifdef _LP64
 206   // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1,
 207   //  will use that to house both the archives and the ccs. See below for
 208   //  details.
 209   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 210   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
 211 #else
 212   // We don't support archives larger than 256MB on 32-bit due to limited
 213   //  virtual address space.
 214   size_t cds_total = align_down(256*M, reserve_alignment);
 215 #endif
 216 
 217   char* shared_base = compute_shared_base(cds_total);
 218   _requested_base_address = shared_base;
 219 
 220   // Whether to use SharedBaseAddress as attach address.
 221   bool use_requested_base = true;
 222 
 223   if (shared_base == NULL) {
 224     use_requested_base = false;
 225   }
 226 
 227   if (ArchiveRelocationMode == 1) {
 228     log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
 229     use_requested_base = false;
 230   }
 231 
 232   // First try to reserve the space at the specified SharedBaseAddress.
 233   assert(!_shared_rs.is_reserved(), "must be");
 234   if (use_requested_base) {
 235     _shared_rs = ReservedSpace(cds_total, reserve_alignment,
 236                                false /* large */, (char*)shared_base);
 237     if (_shared_rs.is_reserved()) {
 238       assert(_shared_rs.base() == shared_base, "should match");
 239     } else {
 240       log_info(cds)("dumptime space reservation: failed to map at "
 241                     "SharedBaseAddress " PTR_FORMAT, p2i(shared_base));
 242     }
 243   }
 244   if (!_shared_rs.is_reserved()) {
 245     // Get a reserved space anywhere if attaching at the SharedBaseAddress
 246     //  fails:
 247     if (UseCompressedClassPointers) {
 248       // If we need to reserve class space as well, let the platform handle
 249       //  the reservation.
 250       LP64_ONLY(_shared_rs =
 251                 Metaspace::reserve_address_space_for_compressed_classes(cds_total);)
 252       NOT_LP64(ShouldNotReachHere();)
 253     } else {
 254       // anywhere is fine.
 255       _shared_rs = ReservedSpace(cds_total, reserve_alignment,
 256                                  false /* large */, (char*)NULL);
 257     }
 258   }
 259 
 260   if (!_shared_rs.is_reserved()) {
 261     vm_exit_during_initialization("Unable to reserve memory for shared space",
 262                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
 263   }
 264 
 265 #ifdef _LP64
 266 
 267   if (UseCompressedClassPointers) {
 268 
 269     assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity");
 270 
 271     // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space
 272     //  must be allocated near the cds such as that the compressed Klass pointer
 273     //  encoding can be used to en/decode pointers from both cds and ccs. Since
 274     //  Metaspace cannot do this (it knows nothing about cds), we do it for
 275     //  Metaspace here and pass it the space to use for ccs.
 276     //
 277     // We do this by reserving space for the ccs behind the archives. Note
 278     //  however that ccs follows a different alignment
 279     //  (Metaspace::reserve_alignment), so there may be a gap between ccs and
 280     //  cds.
 281     // We use a similar layout at runtime, see reserve_address_space_for_archives().
 282     //
 283     //                              +-- SharedBaseAddress (default = 0x800000000)
 284     //                              v
 285     // +-..---------+---------+ ... +----+----+----+--------+-----------------+
 286     // |    Heap    | Archive |     | MC | RW | RO | [gap]  |    class space  |
 287     // +-..---------+---------+ ... +----+----+----+--------+-----------------+
 288     // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB -->|
 289     //
 290     // Note: ccs must follow the archives, and the archives must start at the
 291     //  encoding base. However, the exact placement of ccs does not matter as
 292     //  long as it it resides in the encoding range of CompressedKlassPointers
 293     //  and comes after the archive.
 294     //
 295     // We do this by splitting up the allocated 4G into 3G of archive space,
 296     //  followed by 1G for the ccs:
 297     // + The upper 1 GB is used as the "temporary compressed class space"
 298     //   -- preload_classes() will store Klasses into this space.
 299     // + The lower 3 GB is used for the archive -- when preload_classes()
 300     //   is done, ArchiveBuilder will copy the class metadata into this
 301     //   space, first the RW parts, then the RO parts.
 302 
 303     // Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
 304     size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment());
 305     address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment());
 306     size_t archive_size = class_space_start - (address)_shared_rs.base();
 307 
 308     ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size);
 309     _shared_rs = _shared_rs.first_part(archive_size);
 310 
 311     // ... as does the size of ccs.
 312     tmp_class_space = tmp_class_space.first_part(class_space_size);
 313     CompressedClassSpaceSize = class_space_size;
 314 
 315     // Let Metaspace initialize ccs
 316     Metaspace::initialize_class_space(tmp_class_space);
 317 
 318     // and set up CompressedKlassPointers encoding.
 319     CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total);
 320 
 321     log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 322                   p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
 323 
 324     log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 325                   CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 326 
 327     assert(_shared_rs.end() == tmp_class_space.base() &&
 328            is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) &&
 329            is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) &&
 330            is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity");
 331   }
 332 
 333 #endif
 334 
 335   init_shared_dump_space(&_mc_region);
 336   SharedBaseAddress = (size_t)_shared_rs.base();
 337   log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 338                 _shared_rs.size(), p2i(_shared_rs.base()));
 339 
 340   // We don't want any valid object to be at the very bottom of the archive.
 341   // See ArchivePtrMarker::mark_pointer().
 342   MetaspaceShared::misc_code_space_alloc(16);
 343 
 344   size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
 345   _symbol_rs = ReservedSpace(symbol_rs_size);
 346   if (!_symbol_rs.is_reserved()) {
 347     vm_exit_during_initialization("Unable to reserve memory for symbols",
 348                                   err_msg(SIZE_FORMAT " bytes.", symbol_rs_size));
 349   }
 350   _symbol_region.init(&_symbol_rs, &_symbol_vs);
 351 }
 352 
 353 // Called by universe_post_init()
 354 void MetaspaceShared::post_initialize(TRAPS) {
 355   if (UseSharedSpaces) {
 356     int size = FileMapInfo::get_number_of_shared_paths();
 357     if (size > 0) {
 358       SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
 359       if (!DynamicDumpSharedSpaces) {
 360         FileMapInfo* info;
 361         if (FileMapInfo::dynamic_info() == NULL) {
 362           info = FileMapInfo::current_info();
 363         } else {
 364           info = FileMapInfo::dynamic_info();
 365         }
 366         ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index());
 367         ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index());
 368       }
 369     }
 370   }
 371 }
 372 
 373 static GrowableArrayCHeap<OopHandle, mtClassShared>* _extra_interned_strings = NULL;
 374 static GrowableArrayCHeap<Symbol*, mtClassShared>* _extra_symbols = NULL;
 375 
 376 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
 377   _extra_interned_strings = new GrowableArrayCHeap<OopHandle, mtClassShared>(10000);
 378   _extra_symbols = new GrowableArrayCHeap<Symbol*, mtClassShared>(1000);
 379 
 380   HashtableTextDump reader(filename);
 381   reader.check_version("VERSION: 1.0");
 382 
 383   while (reader.remain() > 0) {
 384     int utf8_length;
 385     int prefix_type = reader.scan_prefix(&utf8_length);
 386     ResourceMark rm(THREAD);
 387     if (utf8_length == 0x7fffffff) {
 388       // buf_len will overflown 32-bit value.
 389       vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length));
 390     }
 391     int buf_len = utf8_length+1;
 392     char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len);
 393     reader.get_utf8(utf8_buffer, utf8_length);
 394     utf8_buffer[utf8_length] = '\0';
 395 
 396     if (prefix_type == HashtableTextDump::SymbolPrefix) {
 397       _extra_symbols->append(SymbolTable::new_permanent_symbol(utf8_buffer));
 398     } else{
 399       assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
 400       oop str = StringTable::intern(utf8_buffer, THREAD);
 401 
 402       if (HAS_PENDING_EXCEPTION) {
 403         log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d",
 404                                reader.last_line_no(), utf8_length);
 405         CLEAR_PENDING_EXCEPTION;
 406       } else {
 407 #if INCLUDE_G1GC
 408         if (UseG1GC) {
 409           typeArrayOop body = java_lang_String::value(str);
 410           const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
 411           if (hr->is_humongous()) {
 412             // Don't keep it alive, so it will be GC'ed before we dump the strings, in order
 413             // to maximize free heap space and minimize fragmentation.
 414             log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
 415                                 reader.last_line_no(), utf8_length);
 416             continue;
 417           }
 418         }
 419 #endif
 420         // Make sure this string is included in the dumped interned string table.
 421         assert(str != NULL, "must succeed");
 422         _extra_interned_strings->append(OopHandle(Universe::vm_global(), str));
 423       }
 424     }
 425   }
 426 }
 427 
 428 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
 429   Arguments::assert_is_dumping_archive();
 430   char* base = rs->base();
 431   size_t need_committed_size = newtop - base;
 432   size_t has_committed_size = vs->committed_size();
 433   if (need_committed_size < has_committed_size) {
 434     return;
 435   }
 436 
 437   size_t min_bytes = need_committed_size - has_committed_size;
 438   size_t preferred_bytes = 1 * M;
 439   size_t uncommitted = vs->reserved_size() - has_committed_size;
 440 
 441   size_t commit =MAX2(min_bytes, preferred_bytes);
 442   commit = MIN2(commit, uncommitted);
 443   assert(commit <= uncommitted, "sanity");
 444 
 445   bool result = vs->expand_by(commit, false);
 446   if (rs == &_shared_rs) {
 447     ArchivePtrMarker::expand_ptr_end((address*)vs->high());
 448   }
 449 
 450   if (!result) {
 451     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
 452                                           need_committed_size));
 453   }
 454 
 455   assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
 456   const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
 457   log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
 458                  which, commit, vs->actual_committed_size(), vs->high());
 459 }
 460 
 461 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
 462   ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
 463 }
 464 
 465 // Read/write a data stream for restoring/preserving metadata pointers and
 466 // miscellaneous data from/to the shared archive file.
 467 
 468 void MetaspaceShared::serialize(SerializeClosure* soc) {
 469   int tag = 0;
 470   soc->do_tag(--tag);
 471 
 472   // Verify the sizes of various metadata in the system.
 473   soc->do_tag(sizeof(Method));
 474   soc->do_tag(sizeof(ConstMethod));
 475   soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
 476   soc->do_tag(sizeof(ConstantPool));
 477   soc->do_tag(sizeof(ConstantPoolCache));
 478   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
 479   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
 480   soc->do_tag(sizeof(Symbol));
 481 
 482   // Dump/restore miscellaneous metadata.
 483   JavaClasses::serialize_offsets(soc);
 484   Universe::serialize(soc);
 485   soc->do_tag(--tag);
 486 
 487   // Dump/restore references to commonly used names and signatures.
 488   vmSymbols::serialize(soc);
 489   soc->do_tag(--tag);
 490 
 491   // Dump/restore the symbol/string/subgraph_info tables
 492   SymbolTable::serialize_shared_table_header(soc);
 493   StringTable::serialize_shared_table_header(soc);
 494   HeapShared::serialize_subgraph_info_table_header(soc);
 495   SystemDictionaryShared::serialize_dictionary_headers(soc);
 496 
 497   InstanceMirrorKlass::serialize_offsets(soc);
 498 
 499   // Dump/restore well known classes (pointers)
 500   SystemDictionaryShared::serialize_well_known_klasses(soc);
 501   soc->do_tag(--tag);
 502 
 503   CppVtables::serialize_cloned_cpp_vtptrs(soc);
 504   soc->do_tag(--tag);
 505 
 506   CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc));
 507 
 508   soc->do_tag(666);
 509 }
 510 
 511 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) {
 512   if (DumpSharedSpaces) {
 513     if (_i2i_entry_code_buffers == NULL) {
 514       _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 515       _i2i_entry_code_buffers_size = total_size;
 516     }
 517   } else if (UseSharedSpaces) {
 518     assert(_i2i_entry_code_buffers != NULL, "must already been initialized");
 519   } else {
 520     return NULL;
 521   }
 522 
 523   assert(_i2i_entry_code_buffers_size == total_size, "must not change");
 524   return _i2i_entry_code_buffers;
 525 }
 526 
 527 uintx MetaspaceShared::object_delta_uintx(void* obj) {
 528   Arguments::assert_is_dumping_archive();
 529   if (DumpSharedSpaces) {
 530     assert(shared_rs()->contains(obj), "must be");
 531   } else {
 532     assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be");
 533   }
 534   address base_address = address(SharedBaseAddress);
 535   uintx deltax = address(obj) - base_address;
 536   return deltax;
 537 }
 538 
 539 // Global object for holding classes that have been loaded.  Since this
 540 // is run at a safepoint just before exit, this is the entire set of classes.
 541 static GrowableArray<Klass*>* _global_klass_objects;
 542 
 543 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
 544   return _global_klass_objects;
 545 }
 546 
 547 static void rewrite_nofast_bytecode(const methodHandle& method) {
 548   BytecodeStream bcs(method);
 549   while (!bcs.is_last_bytecode()) {
 550     Bytecodes::Code opcode = bcs.next();
 551     switch (opcode) {
 552     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 553     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 554     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 555     case Bytecodes::_iload: {
 556       if (!bcs.is_wide()) {
 557         *bcs.bcp() = Bytecodes::_nofast_iload;
 558       }
 559       break;
 560     }
 561     default: break;
 562     }
 563   }
 564 }
 565 
 566 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
 567 //     at run time by RewriteBytecodes/RewriteFrequentPairs
 568 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
 569 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) {
 570   for (int i = 0; i < ik->methods()->length(); i++) {
 571     methodHandle m(thread, ik->methods()->at(i));
 572     rewrite_nofast_bytecode(m);
 573     Fingerprinter fp(m);
 574     // The side effect of this call sets method's fingerprint field.
 575     fp.fingerprint();
 576   }
 577 }
 578 
 579 class VM_PopulateDumpSharedSpace: public VM_Operation {
 580 private:
 581   GrowableArray<MemRegion> *_closed_archive_heap_regions;
 582   GrowableArray<MemRegion> *_open_archive_heap_regions;
 583 
 584   GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps;
 585   GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps;
 586 
 587   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
 588   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
 589   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
 590                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
 591   void dump_shared_symbol_table(GrowableArray<Symbol*>* symbols) {
 592     log_info(cds)("Dumping symbol table ...");
 593     SymbolTable::write_to_archive(symbols);
 594   }
 595   char* dump_read_only_tables();
 596   void print_region_stats(FileMapInfo* map_info);
 597   void print_bitmap_region_stats(size_t size, size_t total_size);
 598   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
 599                                const char *name, size_t total_size);
 600   void relocate_to_requested_base_address(CHeapBitMap* ptrmap);
 601 
 602 public:
 603 
 604   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 605   void doit();   // outline because gdb sucks
 606   bool allow_nested_vm_operations() const { return true; }
 607 }; // class VM_PopulateDumpSharedSpace
 608 
 609 class StaticArchiveBuilder : public ArchiveBuilder {
 610 public:
 611   StaticArchiveBuilder(DumpRegion* rw_region, DumpRegion* ro_region)
 612     : ArchiveBuilder(rw_region, ro_region) {
 613     _alloc_bottom = address(SharedBaseAddress);
 614     _buffer_to_target_delta = 0;
 615   }
 616 
 617   virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
 618     FileMapInfo::metaspace_pointers_do(it, false);
 619     SystemDictionaryShared::dumptime_classes_do(it);
 620     Universe::metaspace_pointers_do(it);
 621     vmSymbols::metaspace_pointers_do(it);
 622 
 623     // The above code should find all the symbols that are referenced by the
 624     // archived classes. We just need to add the extra symbols which
 625     // may not be used by any of the archived classes -- these are usually
 626     // symbols that we anticipate to be used at run time, so we can store
 627     // them in the RO region, to be shared across multiple processes.
 628     if (_extra_symbols != NULL) {
 629       for (int i = 0; i < _extra_symbols->length(); i++) {
 630         it->push(_extra_symbols->adr_at(i));
 631       }
 632     }
 633   }
 634 };
 635 
 636 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
 637   ArchiveBuilder::OtherROAllocMark mark;
 638 
 639   SystemDictionaryShared::write_to_archive();
 640 
 641   // Write the other data to the output array.
 642   char* start = _ro_region.top();
 643   WriteClosure wc(&_ro_region);
 644   MetaspaceShared::serialize(&wc);
 645 
 646   // Write the bitmaps for patching the archive heap regions
 647   _closed_archive_heap_oopmaps = NULL;
 648   _open_archive_heap_oopmaps = NULL;
 649   dump_archive_heap_oopmaps();
 650 
 651   return start;
 652 }
 653 
 654 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) {
 655   intx addr_delta = MetaspaceShared::final_delta();
 656   if (addr_delta == 0) {
 657     ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top());
 658   } else {
 659     // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR).
 660     // This means that the current content of the archive is based on a random
 661     // address. Let's relocate all the pointers, so that it can be mapped to
 662     // MetaspaceShared::requested_base_address() without runtime relocation.
 663     //
 664     // Note: both the base and dynamic archive are written with
 665     // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
 666 
 667     // Patch all pointers that are marked by ptrmap within this region,
 668     // where we have just dumped all the metaspace data.
 669     address patch_base = (address)SharedBaseAddress;
 670     address patch_end  = (address)_ro_region.top();
 671     size_t size = patch_end - patch_base;
 672 
 673     // the current value of the pointers to be patched must be within this
 674     // range (i.e., must point to valid metaspace objects)
 675     address valid_old_base = patch_base;
 676     address valid_old_end  = patch_end;
 677 
 678     // after patching, the pointers must point inside this range
 679     // (the requested location of the archive, as mapped at runtime).
 680     address valid_new_base = (address)MetaspaceShared::requested_base_address();
 681     address valid_new_end  = valid_new_base + size;
 682 
 683     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
 684                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
 685                    p2i(valid_new_base), p2i(valid_new_end));
 686 
 687     SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
 688                                       valid_new_base, valid_new_end, addr_delta, ptrmap);
 689     ptrmap->iterate(&patcher);
 690     ArchivePtrMarker::compact(patcher.max_non_null_offset());
 691   }
 692 }
 693 
 694 void VM_PopulateDumpSharedSpace::doit() {
 695   HeapShared::run_full_gc_in_vm_thread();
 696   CHeapBitMap ptrmap;
 697   MetaspaceShared::initialize_ptr_marker(&ptrmap);
 698 
 699   // We should no longer allocate anything from the metaspace, so that:
 700   //
 701   // (1) Metaspace::allocate might trigger GC if we have run out of
 702   //     committed metaspace, but we can't GC because we're running
 703   //     in the VM thread.
 704   // (2) ArchiveBuilder needs to work with a stable set of MetaspaceObjs.
 705   Metaspace::freeze();
 706   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
 707 
 708   Thread* THREAD = VMThread::vm_thread();
 709 
 710   FileMapInfo::check_nonempty_dir_in_shared_path_table();
 711 
 712   NOT_PRODUCT(SystemDictionary::verify();)
 713   // The following guarantee is meant to ensure that no loader constraints
 714   // exist yet, since the constraints table is not shared.  This becomes
 715   // more important now that we don't re-initialize vtables/itables for
 716   // shared classes at runtime, where constraints were previously created.
 717   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
 718             "loader constraints are not saved");
 719   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
 720           "placeholders are not saved");
 721 
 722   // At this point, many classes have been loaded.
 723   // Gather systemDictionary classes in a global array and do everything to
 724   // that so we don't have to walk the SystemDictionary again.
 725   SystemDictionaryShared::check_excluded_classes();
 726 
 727   StaticArchiveBuilder builder(&_rw_region, &_ro_region);
 728   builder.set_current_dump_space(&_mc_region);
 729   builder.gather_klasses_and_symbols();
 730   _global_klass_objects = builder.klasses();
 731 
 732   builder.gather_source_objs();
 733 
 734   CppVtables::allocate_cloned_cpp_vtptrs();
 735   char* cloned_vtables = _mc_region.top();
 736   CppVtables::allocate_cpp_vtable_clones();
 737 
 738   {
 739     _mc_region.pack(&_rw_region);
 740     builder.set_current_dump_space(&_rw_region);
 741     builder.dump_rw_region();
 742 #if INCLUDE_CDS_JAVA_HEAP
 743     if (MetaspaceShared::use_full_module_graph()) {
 744       // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
 745       char* start = _rw_region.top();
 746       ClassLoaderDataShared::allocate_archived_tables();
 747       ArchiveBuilder::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false);
 748     }
 749 #endif
 750   }
 751   {
 752     _rw_region.pack(&_ro_region);
 753     builder.set_current_dump_space(&_ro_region);
 754     builder.dump_ro_region();
 755 #if INCLUDE_CDS_JAVA_HEAP
 756     if (MetaspaceShared::use_full_module_graph()) {
 757       char* start = _ro_region.top();
 758       ClassLoaderDataShared::init_archived_tables();
 759       ArchiveBuilder::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true);
 760     }
 761 #endif
 762   }
 763   builder.relocate_pointers();
 764 
 765   dump_shared_symbol_table(builder.symbols());
 766 
 767   // Dump supported java heap objects
 768   _closed_archive_heap_regions = NULL;
 769   _open_archive_heap_regions = NULL;
 770   dump_java_heap_objects();
 771 
 772   builder.relocate_well_known_klasses();
 773 
 774   log_info(cds)("Make classes shareable");
 775   builder.make_klasses_shareable();
 776 
 777   char* serialized_data = dump_read_only_tables();
 778   _ro_region.pack();
 779 
 780   // The vtable clones contain addresses of the current process.
 781   // We don't want to write these addresses into the archive. Same for i2i buffer.
 782   CppVtables::zero_cpp_vtable_clones_for_writing();
 783   memset(MetaspaceShared::i2i_entry_code_buffers(), 0,
 784          MetaspaceShared::i2i_entry_code_buffers_size());
 785 
 786   // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
 787   // without runtime relocation.
 788   relocate_to_requested_base_address(&ptrmap);
 789 
 790   // Create and write the archive file that maps the shared spaces.
 791 
 792   FileMapInfo* mapinfo = new FileMapInfo(true);
 793   mapinfo->populate_header(os::vm_allocation_granularity());
 794   mapinfo->set_serialized_data(serialized_data);
 795   mapinfo->set_cloned_vtables(cloned_vtables);
 796   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
 797                                       MetaspaceShared::i2i_entry_code_buffers_size());
 798   mapinfo->open_for_write();
 799   MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps);
 800   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
 801                                         _closed_archive_heap_regions,
 802                                         _closed_archive_heap_oopmaps,
 803                                         MetaspaceShared::first_closed_archive_heap_region,
 804                                         MetaspaceShared::max_closed_archive_heap_region);
 805   _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
 806                                         _open_archive_heap_regions,
 807                                         _open_archive_heap_oopmaps,
 808                                         MetaspaceShared::first_open_archive_heap_region,
 809                                         MetaspaceShared::max_open_archive_heap_region);
 810 
 811   mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
 812   mapinfo->set_header_crc(mapinfo->compute_header_crc());
 813   mapinfo->write_header();
 814   print_region_stats(mapinfo);
 815   mapinfo->close();
 816 
 817   if (log_is_enabled(Info, cds)) {
 818     builder.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()));
 819   }
 820 
 821   if (PrintSystemDictionaryAtExit) {
 822     SystemDictionary::print();
 823   }
 824 
 825   if (AllowArchivingWithJavaAgent) {
 826     warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "
 827             "for testing purposes only and should not be used in a production environment");
 828   }
 829 
 830   // There may be pending VM operations. We have changed some global states
 831   // (such as SystemDictionary::_well_known_klasses) that may cause these VM operations
 832   // to fail. For safety, forget these operations and exit the VM directly.
 833   vm_direct_exit(0);
 834 }
 835 
 836 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
 837   // Print statistics of all the regions
 838   const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used();
 839   const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned();
 840   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
 841                                 _mc_region.reserved()  +
 842                                 bitmap_reserved +
 843                                 _total_closed_archive_region_size +
 844                                 _total_open_archive_region_size;
 845   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
 846                              _mc_region.used()  +
 847                              bitmap_used +
 848                              _total_closed_archive_region_size +
 849                              _total_open_archive_region_size;
 850   const double total_u_perc = percent_of(total_bytes, total_reserved);
 851 
 852   _mc_region.print(total_reserved);
 853   _rw_region.print(total_reserved);
 854   _ro_region.print(total_reserved);
 855   print_bitmap_region_stats(bitmap_used, total_reserved);
 856   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
 857   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
 858 
 859   log_debug(cds)("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
 860                  total_bytes, total_reserved, total_u_perc);
 861 }
 862 
 863 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
 864   log_debug(cds)("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
 865                  size, size/double(total_size)*100.0, size);
 866 }
 867 
 868 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
 869                                                          const char *name, size_t total_size) {
 870   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
 871   for (int i = 0; i < arr_len; i++) {
 872       char* start = (char*)heap_mem->at(i).start();
 873       size_t size = heap_mem->at(i).byte_size();
 874       char* top = start + size;
 875       log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
 876                      name, i, size, size/double(total_size)*100.0, size, p2i(start));
 877 
 878   }
 879 }
 880 
 881 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo,
 882                                                  GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps,
 883                                                  GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) {
 884   // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
 885   // MetaspaceShared::n_regions (internal to hotspot).
 886   assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
 887 
 888   // mc contains the trampoline code for method entries, which are patched at run time,
 889   // so it needs to be read/write.
 890   write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
 891   write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
 892   write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
 893   mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps);
 894 }
 895 
 896 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
 897   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
 898 }
 899 
 900 // Update a Java object to point its Klass* to the new location after
 901 // shared archive has been compacted.
 902 void MetaspaceShared::relocate_klass_ptr(oop o) {
 903   assert(DumpSharedSpaces, "sanity");
 904   Klass* k = ArchiveBuilder::get_relocated_klass(o->klass());
 905   o->set_klass(k);
 906 }
 907 
 908 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
 909   assert(DumpSharedSpaces, "sanity");
 910   k = ArchiveBuilder::get_relocated_klass(k);
 911   if (is_final) {
 912     k = (Klass*)(address(k) + final_delta());
 913   }
 914   return k;
 915 }
 916 
 917 static GrowableArray<ClassLoaderData*>* _loaded_cld = NULL;
 918 
 919 class CollectCLDClosure : public CLDClosure {
 920   void do_cld(ClassLoaderData* cld) {
 921     if (_loaded_cld == NULL) {
 922       _loaded_cld = new (ResourceObj::C_HEAP, mtClassShared)GrowableArray<ClassLoaderData*>(10, mtClassShared);
 923     }
 924     if (!cld->is_unloading()) {
 925       cld->inc_keep_alive();
 926       _loaded_cld->append(cld);
 927     }
 928   }
 929 };
 930 
 931 bool MetaspaceShared::linking_required(InstanceKlass* ik) {
 932   // For dynamic CDS dump, only link classes loaded by the builtin class loaders.
 933   return DumpSharedSpaces ? true : !ik->is_shared_unregistered_class();
 934 }
 935 
 936 bool MetaspaceShared::link_class_for_cds(InstanceKlass* ik, TRAPS) {
 937   // Link the class to cause the bytecodes to be rewritten and the
 938   // cpcache to be created. Class verification is done according
 939   // to -Xverify setting.
 940   bool res = MetaspaceShared::try_link_class(ik, THREAD);
 941   guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
 942 
 943   if (DumpSharedSpaces) {
 944     // The following function is used to resolve all Strings in the statically
 945     // dumped classes to archive all the Strings. The archive heap is not supported
 946     // for the dynamic archive.
 947     ik->constants()->resolve_class_constants(THREAD);
 948   }
 949   return res;
 950 }
 951 
 952 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
 953   // Collect all loaded ClassLoaderData.
 954   CollectCLDClosure collect_cld;
 955   {
 956     MutexLocker lock(ClassLoaderDataGraph_lock);
 957     ClassLoaderDataGraph::loaded_cld_do(&collect_cld);
 958   }
 959 
 960   while (true) {
 961     bool has_linked = false;
 962     for (int i = 0; i < _loaded_cld->length(); i++) {
 963       ClassLoaderData* cld = _loaded_cld->at(i);
 964       for (Klass* klass = cld->klasses(); klass != NULL; klass = klass->next_link()) {
 965         if (klass->is_instance_klass()) {
 966           InstanceKlass* ik = InstanceKlass::cast(klass);
 967           if (linking_required(ik)) {
 968             has_linked |= link_class_for_cds(ik, THREAD);
 969           }
 970         }
 971       }
 972     }
 973 
 974     if (!has_linked) {
 975       break;
 976     }
 977     // Class linking includes verification which may load more classes.
 978     // Keep scanning until we have linked no more classes.
 979   }
 980 
 981   for (int i = 0; i < _loaded_cld->length(); i++) {
 982     ClassLoaderData* cld = _loaded_cld->at(i);
 983     cld->dec_keep_alive();
 984   }
 985 }
 986 
 987 void MetaspaceShared::prepare_for_dumping() {
 988   Arguments::check_unsupported_dumping_properties();
 989   ClassLoader::initialize_shared_path();
 990 }
 991 
 992 // Preload classes from a list, populate the shared spaces and dump to a
 993 // file.
 994 void MetaspaceShared::preload_and_dump(TRAPS) {
 995   { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
 996     ResourceMark rm(THREAD);
 997     char class_list_path_str[JVM_MAXPATHLEN];
 998     // Preload classes to be shared.
 999     const char* class_list_path;
1000     if (SharedClassListFile == NULL) {
1001       // Construct the path to the class list (in jre/lib)
1002       // Walk up two directories from the location of the VM and
1003       // optionally tack on "lib" (depending on platform)
1004       os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1005       for (int i = 0; i < 3; i++) {
1006         char *end = strrchr(class_list_path_str, *os::file_separator());
1007         if (end != NULL) *end = '\0';
1008       }
1009       int class_list_path_len = (int)strlen(class_list_path_str);
1010       if (class_list_path_len >= 3) {
1011         if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1012           if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1013             jio_snprintf(class_list_path_str + class_list_path_len,
1014                          sizeof(class_list_path_str) - class_list_path_len,
1015                          "%slib", os::file_separator());
1016             class_list_path_len += 4;
1017           }
1018         }
1019       }
1020       if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1021         jio_snprintf(class_list_path_str + class_list_path_len,
1022                      sizeof(class_list_path_str) - class_list_path_len,
1023                      "%sclasslist", os::file_separator());
1024       }
1025       class_list_path = class_list_path_str;
1026     } else {
1027       class_list_path = SharedClassListFile;
1028     }
1029 
1030     log_info(cds)("Loading classes to share ...");
1031     _has_error_classes = false;
1032     int class_count = preload_classes(class_list_path, THREAD);
1033     if (ExtraSharedClassListFile) {
1034       class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1035     }
1036     log_info(cds)("Loading classes to share: done.");
1037 
1038     log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1039 
1040     if (SharedArchiveConfigFile) {
1041       log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile);
1042       read_extra_data(SharedArchiveConfigFile, THREAD);
1043     }
1044     log_info(cds)("Reading extra data: done.");
1045 
1046     HeapShared::init_for_dumping(THREAD);
1047 
1048     // exercise the manifest processing code to ensure classes used by CDS are always archived
1049     SystemDictionaryShared::create_jar_manifest("Manifest-Version: 1.0\n", strlen("Manifest-Version: 1.0\n"), THREAD);
1050     // Rewrite and link classes
1051     log_info(cds)("Rewriting and linking classes ...");
1052 
1053     // Link any classes which got missed. This would happen if we have loaded classes that
1054     // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1055     // fails verification, all other interfaces that were not specified in the classlist but
1056     // are implemented by K are not verified.
1057     link_and_cleanup_shared_classes(CATCH);
1058     log_info(cds)("Rewriting and linking classes: done");
1059 
1060 #if INCLUDE_CDS_JAVA_HEAP
1061     if (use_full_module_graph()) {
1062       HeapShared::reset_archived_object_states(THREAD);
1063     }
1064 #endif
1065 
1066     VM_PopulateDumpSharedSpace op;
1067     MutexLocker ml(THREAD, HeapShared::is_heap_object_archiving_allowed() ?
1068                    Heap_lock : NULL);     // needed by HeapShared::run_gc()
1069     VMThread::execute(&op);
1070   }
1071 }
1072 
1073 
1074 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1075   ClassListParser parser(class_list_path);
1076   int class_count = 0;
1077 
1078   while (parser.parse_one_line()) {
1079     Klass* klass = parser.load_current_class(THREAD);
1080     if (HAS_PENDING_EXCEPTION) {
1081       if (klass == NULL &&
1082           (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
1083         // print a warning only when the pending exception is class not found
1084         log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name());
1085       }
1086       CLEAR_PENDING_EXCEPTION;
1087     }
1088     if (klass != NULL) {
1089       if (log_is_enabled(Trace, cds)) {
1090         ResourceMark rm(THREAD);
1091         log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1092       }
1093 
1094       if (klass->is_instance_klass()) {
1095         InstanceKlass* ik = InstanceKlass::cast(klass);
1096 
1097         // Link the class to cause the bytecodes to be rewritten and the
1098         // cpcache to be created. The linking is done as soon as classes
1099         // are loaded in order that the related data structures (klass and
1100         // cpCache) are located together.
1101         try_link_class(ik, THREAD);
1102         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1103       }
1104 
1105       class_count++;
1106     }
1107   }
1108 
1109   return class_count;
1110 }
1111 
1112 // Returns true if the class's status has changed
1113 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
1114   Arguments::assert_is_dumping_archive();
1115   if (ik->is_loaded() && !ik->is_linked() &&
1116       !SystemDictionaryShared::has_class_failed_verification(ik)) {
1117     bool saved = BytecodeVerificationLocal;
1118     if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) {
1119       // The verification decision is based on BytecodeVerificationRemote
1120       // for non-system classes. Since we are using the NULL classloader
1121       // to load non-system classes for customized class loaders during dumping,
1122       // we need to temporarily change BytecodeVerificationLocal to be the same as
1123       // BytecodeVerificationRemote. Note this can cause the parent system
1124       // classes also being verified. The extra overhead is acceptable during
1125       // dumping.
1126       BytecodeVerificationLocal = BytecodeVerificationRemote;
1127     }
1128     ik->link_class(THREAD);
1129     if (HAS_PENDING_EXCEPTION) {
1130       ResourceMark rm(THREAD);
1131       log_warning(cds)("Preload Warning: Verification failed for %s",
1132                     ik->external_name());
1133       CLEAR_PENDING_EXCEPTION;
1134       SystemDictionaryShared::set_class_has_failed_verification(ik);
1135       _has_error_classes = true;
1136     }
1137     BytecodeVerificationLocal = saved;
1138     return true;
1139   } else {
1140     return false;
1141   }
1142 }
1143 
1144 #if INCLUDE_CDS_JAVA_HEAP
1145 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1146   // Find all the interned strings that should be dumped.
1147   int i;
1148   for (i = 0; i < _global_klass_objects->length(); i++) {
1149     Klass* k = _global_klass_objects->at(i);
1150     if (k->is_instance_klass()) {
1151       InstanceKlass* ik = InstanceKlass::cast(k);
1152       ik->constants()->add_dumped_interned_strings();
1153     }
1154   }
1155   if (_extra_interned_strings != NULL) {
1156     for (i = 0; i < _extra_interned_strings->length(); i ++) {
1157       OopHandle string = _extra_interned_strings->at(i);
1158       HeapShared::add_to_dumped_interned_strings(string.resolve());
1159     }
1160   }
1161 
1162   // The closed and open archive heap space has maximum two regions.
1163   // See FileMapInfo::write_archive_heap_regions() for details.
1164   _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
1165   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1166   HeapShared::archive_java_heap_objects(_closed_archive_heap_regions,
1167                                         _open_archive_heap_regions);
1168   ArchiveBuilder::OtherROAllocMark mark;
1169   HeapShared::write_subgraph_info_table();
1170 }
1171 
1172 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
1173   if (HeapShared::is_heap_object_archiving_allowed()) {
1174     _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1175     dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
1176 
1177     _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1178     dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
1179   }
1180 }
1181 
1182 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1183                                                            GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
1184   for (int i=0; i<regions->length(); i++) {
1185     ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
1186     size_t size_in_bits = oopmap.size();
1187     size_t size_in_bytes = oopmap.size_in_bytes();
1188     uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
1189     oopmap.write_to(buffer, size_in_bytes);
1190     log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
1191                         INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
1192                         p2i(buffer), size_in_bytes,
1193                         p2i(regions->at(i).start()), regions->at(i).byte_size());
1194 
1195     ArchiveHeapOopmapInfo info;
1196     info._oopmap = (address)buffer;
1197     info._oopmap_size_in_bits = size_in_bits;
1198     info._oopmap_size_in_bytes = size_in_bytes;
1199     oopmaps->append(info);
1200   }
1201 }
1202 #endif // INCLUDE_CDS_JAVA_HEAP
1203 
1204 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
1205   assert(base <= static_top && static_top <= top, "must be");
1206   _shared_metaspace_static_top = static_top;
1207   MetaspaceObj::set_shared_metaspace_range(base, top);
1208 }
1209 
1210 // Return true if given address is in the misc data region
1211 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
1212   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
1213 }
1214 
1215 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
1216   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
1217     return true;
1218   }
1219   return false;
1220 }
1221 
1222 bool MetaspaceShared::is_shared_dynamic(void* p) {
1223   if ((p < MetaspaceObj::shared_metaspace_top()) &&
1224       (p >= _shared_metaspace_static_top)) {
1225     return true;
1226   } else {
1227     return false;
1228   }
1229 }
1230 
1231 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
1232   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
1233   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
1234 
1235   FileMapInfo* static_mapinfo = open_static_archive();
1236   FileMapInfo* dynamic_mapinfo = NULL;
1237 
1238   if (static_mapinfo != NULL) {
1239     dynamic_mapinfo = open_dynamic_archive();
1240 
1241     // First try to map at the requested address
1242     result = map_archives(static_mapinfo, dynamic_mapinfo, true);
1243     if (result == MAP_ARCHIVE_MMAP_FAILURE) {
1244       // Mapping has failed (probably due to ASLR). Let's map at an address chosen
1245       // by the OS.
1246       log_info(cds)("Try to map archive(s) at an alternative address");
1247       result = map_archives(static_mapinfo, dynamic_mapinfo, false);
1248     }
1249   }
1250 
1251   if (result == MAP_ARCHIVE_SUCCESS) {
1252     bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
1253     char* cds_base = static_mapinfo->mapped_base();
1254     char* cds_end =  dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
1255     set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
1256     _relocation_delta = static_mapinfo->relocation_delta();
1257     if (dynamic_mapped) {
1258       FileMapInfo::set_shared_path_table(dynamic_mapinfo);
1259     } else {
1260       FileMapInfo::set_shared_path_table(static_mapinfo);
1261     }
1262     _requested_base_address = static_mapinfo->requested_base_address();
1263   } else {
1264     set_shared_metaspace_range(NULL, NULL, NULL);
1265     UseSharedSpaces = false;
1266     FileMapInfo::fail_continue("Unable to map shared spaces");
1267     if (PrintSharedArchiveAndExit) {
1268       vm_exit_during_initialization("Unable to use shared archive.");
1269     }
1270   }
1271 
1272   if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
1273     delete static_mapinfo;
1274   }
1275   if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
1276     delete dynamic_mapinfo;
1277   }
1278 }
1279 
1280 FileMapInfo* MetaspaceShared::open_static_archive() {
1281   FileMapInfo* mapinfo = new FileMapInfo(true);
1282   if (!mapinfo->initialize()) {
1283     delete(mapinfo);
1284     return NULL;
1285   }
1286   return mapinfo;
1287 }
1288 
1289 FileMapInfo* MetaspaceShared::open_dynamic_archive() {
1290   if (DynamicDumpSharedSpaces) {
1291     return NULL;
1292   }
1293   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
1294     return NULL;
1295   }
1296 
1297   FileMapInfo* mapinfo = new FileMapInfo(false);
1298   if (!mapinfo->initialize()) {
1299     delete(mapinfo);
1300     return NULL;
1301   }
1302   return mapinfo;
1303 }
1304 
1305 // use_requested_addr:
1306 //  true  = map at FileMapHeader::_requested_base_address
1307 //  false = map at an alternative address picked by OS.
1308 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
1309                                                bool use_requested_addr) {
1310   if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) {
1311     log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address.");
1312     return MAP_ARCHIVE_MMAP_FAILURE;
1313   }
1314 
1315   PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
1316       // For product build only -- this is for benchmarking the cost of doing relocation.
1317       // For debug builds, the check is done below, after reserving the space, for better test coverage
1318       // (see comment below).
1319       log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
1320       return MAP_ARCHIVE_MMAP_FAILURE;
1321     });
1322 
1323   if (ArchiveRelocationMode == 2 && !use_requested_addr) {
1324     log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address");
1325     return MAP_ARCHIVE_MMAP_FAILURE;
1326   };
1327 
1328   if (dynamic_mapinfo != NULL) {
1329     // Ensure that the OS won't be able to allocate new memory spaces between the two
1330     // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
1331     assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
1332   }
1333 
1334   ReservedSpace archive_space_rs, class_space_rs;
1335   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
1336   char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
1337                                                                  use_requested_addr, archive_space_rs,
1338                                                                  class_space_rs);
1339   if (mapped_base_address == NULL) {
1340     result = MAP_ARCHIVE_MMAP_FAILURE;
1341     log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
1342   } else {
1343 
1344 #ifdef ASSERT
1345     // Some sanity checks after reserving address spaces for archives
1346     //  and class space.
1347     assert(archive_space_rs.is_reserved(), "Sanity");
1348     if (Metaspace::using_class_space()) {
1349       // Class space must closely follow the archive space. Both spaces
1350       //  must be aligned correctly.
1351       assert(class_space_rs.is_reserved(),
1352              "A class space should have been reserved");
1353       assert(class_space_rs.base() >= archive_space_rs.end(),
1354              "class space should follow the cds archive space");
1355       assert(is_aligned(archive_space_rs.base(),
1356                         MetaspaceShared::reserved_space_alignment()),
1357              "Archive space misaligned");
1358       assert(is_aligned(class_space_rs.base(),
1359                         Metaspace::reserve_alignment()),
1360              "class space misaligned");
1361     }
1362 #endif // ASSERT
1363 
1364     log_debug(cds)("Reserved archive_space_rs     [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
1365                    p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
1366     log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
1367                    p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
1368 
1369     if (MetaspaceShared::use_windows_memory_mapping()) {
1370       // We have now reserved address space for the archives, and will map in
1371       //  the archive files into this space.
1372       //
1373       // Special handling for Windows: on Windows we cannot map a file view
1374       //  into an existing memory mapping. So, we unmap the address range we
1375       //  just reserved again, which will make it available for mapping the
1376       //  archives.
1377       // Reserving this range has not been for naught however since it makes
1378       //  us reasonably sure the address range is available.
1379       //
1380       // But still it may fail, since between unmapping the range and mapping
1381       //  in the archive someone else may grab the address space. Therefore
1382       //  there is a fallback in FileMap::map_region() where we just read in
1383       //  the archive files sequentially instead of mapping it in. We couple
1384       //  this with use_requested_addr, since we're going to patch all the
1385       //  pointers anyway so there's no benefit to mmap.
1386       if (use_requested_addr) {
1387         log_info(cds)("Windows mmap workaround: releasing archive space.");
1388         archive_space_rs.release();
1389       }
1390     }
1391     MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
1392     MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
1393                                      map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
1394 
1395     DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
1396       // This is for simulating mmap failures at the requested address. In
1397       //  debug builds, we do it here (after all archives have possibly been
1398       //  mapped), so we can thoroughly test the code for failure handling
1399       //  (releasing all allocated resource, etc).
1400       log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
1401       if (static_result == MAP_ARCHIVE_SUCCESS) {
1402         static_result = MAP_ARCHIVE_MMAP_FAILURE;
1403       }
1404       if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
1405         dynamic_result = MAP_ARCHIVE_MMAP_FAILURE;
1406       }
1407     });
1408 
1409     if (static_result == MAP_ARCHIVE_SUCCESS) {
1410       if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
1411         result = MAP_ARCHIVE_SUCCESS;
1412       } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
1413         assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
1414         // No need to retry mapping the dynamic archive again, as it will never succeed
1415         // (bad file, etc) -- just keep the base archive.
1416         log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
1417                                   dynamic_mapinfo->full_path());
1418         result = MAP_ARCHIVE_SUCCESS;
1419         // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no
1420         // easy API to do that right now.
1421       } else {
1422         result = MAP_ARCHIVE_MMAP_FAILURE;
1423       }
1424     } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
1425       result = MAP_ARCHIVE_OTHER_FAILURE;
1426     } else {
1427       result = MAP_ARCHIVE_MMAP_FAILURE;
1428     }
1429   }
1430 
1431   if (result == MAP_ARCHIVE_SUCCESS) {
1432     SharedBaseAddress = (size_t)mapped_base_address;
1433     LP64_ONLY({
1434         if (Metaspace::using_class_space()) {
1435           // Set up ccs in metaspace.
1436           Metaspace::initialize_class_space(class_space_rs);
1437 
1438           // Set up compressed Klass pointer encoding: the encoding range must
1439           //  cover both archive and class space.
1440           address cds_base = (address)static_mapinfo->mapped_base();
1441           address ccs_end = (address)class_space_rs.end();
1442           CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base);
1443 
1444           // map_heap_regions() compares the current narrow oop and klass encodings
1445           // with the archived ones, so it must be done after all encodings are determined.
1446           static_mapinfo->map_heap_regions();
1447 
1448           // disable_full_module_graph(); // Disabled temporarily for JDK-8253081
1449         }
1450       });
1451     log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
1452     log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled");
1453   } else {
1454     unmap_archive(static_mapinfo);
1455     unmap_archive(dynamic_mapinfo);
1456     release_reserved_spaces(archive_space_rs, class_space_rs);
1457   }
1458 
1459   return result;
1460 }
1461 
1462 
1463 // This will reserve two address spaces suitable to house Klass structures, one
1464 //  for the cds archives (static archive and optionally dynamic archive) and
1465 //  optionally one move for ccs.
1466 //
1467 // Since both spaces must fall within the compressed class pointer encoding
1468 //  range, they are allocated close to each other.
1469 //
1470 // Space for archives will be reserved first, followed by a potential gap,
1471 //  followed by the space for ccs:
1472 //
1473 // +-- Base address             A        B                     End
1474 // |                            |        |                      |
1475 // v                            v        v                      v
1476 // +-------------+--------------+        +----------------------+
1477 // | static arc  | [dyn. arch]  | [gap]  | compr. class space   |
1478 // +-------------+--------------+        +----------------------+
1479 //
1480 // (The gap may result from different alignment requirements between metaspace
1481 //  and CDS)
1482 //
1483 // If UseCompressedClassPointers is disabled, only one address space will be
1484 //  reserved:
1485 //
1486 // +-- Base address             End
1487 // |                            |
1488 // v                            v
1489 // +-------------+--------------+
1490 // | static arc  | [dyn. arch]  |
1491 // +-------------+--------------+
1492 //
1493 // Base address: If use_archive_base_addr address is true, the Base address is
1494 //  determined by the address stored in the static archive. If
1495 //  use_archive_base_addr address is false, this base address is determined
1496 //  by the platform.
1497 //
1498 // If UseCompressedClassPointers=1, the range encompassing both spaces will be
1499 //  suitable to en/decode narrow Klass pointers: the base will be valid for
1500 //  encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax.
1501 //
1502 // Return:
1503 //
1504 // - On success:
1505 //    - archive_space_rs will be reserved and large enough to host static and
1506 //      if needed dynamic archive: [Base, A).
1507 //      archive_space_rs.base and size will be aligned to CDS reserve
1508 //      granularity.
1509 //    - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will
1510 //      be reserved. Its start address will be aligned to metaspace reserve
1511 //      alignment, which may differ from CDS alignment. It will follow the cds
1512 //      archive space, close enough such that narrow class pointer encoding
1513 //      covers both spaces.
1514 //      If UseCompressedClassPointers=0, class_space_rs remains unreserved.
1515 // - On error: NULL is returned and the spaces remain unreserved.
1516 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
1517                                                           FileMapInfo* dynamic_mapinfo,
1518                                                           bool use_archive_base_addr,
1519                                                           ReservedSpace& archive_space_rs,
1520                                                           ReservedSpace& class_space_rs) {
1521 
1522   address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL);
1523   const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment();
1524 
1525   // Size and requested location of the archive_space_rs (for both static and dynamic archives)
1526   assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
1527   size_t archive_end_offset  = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
1528   size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
1529 
1530   // If a base address is given, it must have valid alignment and be suitable as encoding base.
1531   if (base_address != NULL) {
1532     assert(is_aligned(base_address, archive_space_alignment),
1533            "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
1534     if (Metaspace::using_class_space()) {
1535       assert(CompressedKlassPointers::is_valid_base(base_address),
1536              "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
1537     }
1538   }
1539 
1540   if (!Metaspace::using_class_space()) {
1541     // Get the simple case out of the way first:
1542     // no compressed class space, simple allocation.
1543     archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
1544                                      false /* bool large */, (char*)base_address);
1545     if (archive_space_rs.is_reserved()) {
1546       assert(base_address == NULL ||
1547              (address)archive_space_rs.base() == base_address, "Sanity");
1548       // Register archive space with NMT.
1549       MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
1550       return archive_space_rs.base();
1551     }
1552     return NULL;
1553   }
1554 
1555 #ifdef _LP64
1556 
1557   // Complex case: two spaces adjacent to each other, both to be addressable
1558   //  with narrow class pointers.
1559   // We reserve the whole range spanning both spaces, then split that range up.
1560 
1561   const size_t class_space_alignment = Metaspace::reserve_alignment();
1562 
1563   // To simplify matters, lets assume that metaspace alignment will always be
1564   //  equal or a multiple of archive alignment.
1565   assert(is_power_of_2(class_space_alignment) &&
1566                        is_power_of_2(archive_space_alignment) &&
1567                        class_space_alignment >= archive_space_alignment,
1568                        "Sanity");
1569 
1570   const size_t class_space_size = CompressedClassSpaceSize;
1571   assert(CompressedClassSpaceSize > 0 &&
1572          is_aligned(CompressedClassSpaceSize, class_space_alignment),
1573          "CompressedClassSpaceSize malformed: "
1574          SIZE_FORMAT, CompressedClassSpaceSize);
1575 
1576   const size_t ccs_begin_offset = align_up(base_address + archive_space_size,
1577                                            class_space_alignment) - base_address;
1578   const size_t gap_size = ccs_begin_offset - archive_space_size;
1579 
1580   const size_t total_range_size =
1581       align_up(archive_space_size + gap_size + class_space_size,
1582                os::vm_allocation_granularity());
1583 
1584   ReservedSpace total_rs;
1585   if (base_address != NULL) {
1586     // Reserve at the given archive base address, or not at all.
1587     total_rs = ReservedSpace(total_range_size, archive_space_alignment,
1588                              false /* bool large */, (char*) base_address);
1589   } else {
1590     // Reserve at any address, but leave it up to the platform to choose a good one.
1591     total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
1592   }
1593 
1594   if (!total_rs.is_reserved()) {
1595     return NULL;
1596   }
1597 
1598   // Paranoid checks:
1599   assert(base_address == NULL || (address)total_rs.base() == base_address,
1600          "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base()));
1601   assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity");
1602   assert(total_rs.size() == total_range_size, "Sanity");
1603   assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity");
1604 
1605   // Now split up the space into ccs and cds archive. For simplicity, just leave
1606   //  the gap reserved at the end of the archive space.
1607   archive_space_rs = total_rs.first_part(ccs_begin_offset,
1608                                          (size_t)os::vm_allocation_granularity(),
1609                                          /*split=*/true);
1610   class_space_rs = total_rs.last_part(ccs_begin_offset);
1611 
1612   assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity");
1613   assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity");
1614   assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity");
1615   assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity");
1616 
1617   // NMT: fix up the space tags
1618   MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
1619   MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
1620 
1621   return archive_space_rs.base();
1622 
1623 #else
1624   ShouldNotReachHere();
1625   return NULL;
1626 #endif
1627 
1628 }
1629 
1630 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs,
1631                                               ReservedSpace& class_space_rs) {
1632   if (archive_space_rs.is_reserved()) {
1633     log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
1634     archive_space_rs.release();
1635   }
1636   if (class_space_rs.is_reserved()) {
1637     log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
1638     class_space_rs.release();
1639   }
1640 }
1641 
1642 static int archive_regions[]  = {MetaspaceShared::mc,
1643                                  MetaspaceShared::rw,
1644                                  MetaspaceShared::ro};
1645 static int archive_regions_count  = 3;
1646 
1647 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
1648   assert(UseSharedSpaces, "must be runtime");
1649   if (mapinfo == NULL) {
1650     return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded.
1651   }
1652 
1653   mapinfo->set_is_mapped(false);
1654 
1655   if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) {
1656     log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT
1657                    " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity());
1658     return MAP_ARCHIVE_OTHER_FAILURE;
1659   }
1660 
1661   MapArchiveResult result =
1662     mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs);
1663 
1664   if (result != MAP_ARCHIVE_SUCCESS) {
1665     unmap_archive(mapinfo);
1666     return result;
1667   }
1668 
1669   if (!mapinfo->validate_shared_path_table()) {
1670     unmap_archive(mapinfo);
1671     return MAP_ARCHIVE_OTHER_FAILURE;
1672   }
1673 
1674   mapinfo->set_is_mapped(true);
1675   return MAP_ARCHIVE_SUCCESS;
1676 }
1677 
1678 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
1679   assert(UseSharedSpaces, "must be runtime");
1680   if (mapinfo != NULL) {
1681     mapinfo->unmap_regions(archive_regions, archive_regions_count);
1682     mapinfo->set_is_mapped(false);
1683   }
1684 }
1685 
1686 // Read the miscellaneous data from the shared file, and
1687 // serialize it out to its various destinations.
1688 
1689 void MetaspaceShared::initialize_shared_spaces() {
1690   FileMapInfo *static_mapinfo = FileMapInfo::current_info();
1691   _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
1692   _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
1693   char* buffer = static_mapinfo->cloned_vtables();
1694   CppVtables::clone_cpp_vtables((intptr_t*)buffer);
1695 
1696   // Verify various attributes of the archive, plus initialize the
1697   // shared string/symbol tables
1698   buffer = static_mapinfo->serialized_data();
1699   intptr_t* array = (intptr_t*)buffer;
1700   ReadClosure rc(&array);
1701   serialize(&rc);
1702 
1703   // Initialize the run-time symbol table.
1704   SymbolTable::create_table();
1705 
1706   static_mapinfo->patch_archived_heap_embedded_pointers();
1707 
1708   // Close the mapinfo file
1709   static_mapinfo->close();
1710 
1711   static_mapinfo->unmap_region(MetaspaceShared::bm);
1712 
1713   FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
1714   if (dynamic_mapinfo != NULL) {
1715     intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
1716     ReadClosure rc(&buffer);
1717     SymbolTable::serialize_shared_table_header(&rc, false);
1718     SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
1719     dynamic_mapinfo->close();
1720   }
1721 
1722   if (PrintSharedArchiveAndExit) {
1723     if (PrintSharedDictionary) {
1724       tty->print_cr("\nShared classes:\n");
1725       SystemDictionaryShared::print_on(tty);
1726     }
1727     if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
1728       tty->print_cr("archive is invalid");
1729       vm_exit(1);
1730     } else {
1731       tty->print_cr("archive is valid");
1732       vm_exit(0);
1733     }
1734   }
1735 }
1736 
1737 // JVM/TI RedefineClasses() support:
1738 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1739   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1740 
1741   if (UseSharedSpaces) {
1742     // remap the shared readonly space to shared readwrite, private
1743     FileMapInfo* mapinfo = FileMapInfo::current_info();
1744     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1745       return false;
1746     }
1747     if (FileMapInfo::dynamic_info() != NULL) {
1748       mapinfo = FileMapInfo::dynamic_info();
1749       if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1750         return false;
1751       }
1752     }
1753     _remapped_readwrite = true;
1754   }
1755   return true;
1756 }
1757 
1758 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1759   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1760   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1761   // or so.
1762   _mc_region.print_out_of_space_msg(name, needed_bytes);
1763   _rw_region.print_out_of_space_msg(name, needed_bytes);
1764   _ro_region.print_out_of_space_msg(name, needed_bytes);
1765 
1766   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1767                                 "Please reduce the number of shared classes.");
1768 }
1769 
1770 // This is used to relocate the pointers so that the base archive can be mapped at
1771 // MetaspaceShared::requested_base_address() without runtime relocation.
1772 intx MetaspaceShared::final_delta() {
1773   return intx(MetaspaceShared::requested_base_address())  // We want the base archive to be mapped to here at runtime
1774        - intx(SharedBaseAddress);                         // .. but the base archive is mapped at here at dump time
1775 }
1776 
1777 bool MetaspaceShared::use_full_module_graph() {
1778   bool result = _use_optimized_module_handling && _use_full_module_graph &&
1779     (UseSharedSpaces || DumpSharedSpaces) && HeapShared::is_heap_object_archiving_allowed();
1780   if (result && UseSharedSpaces) {
1781     // Classes used by the archived full module graph are loaded in JVMTI early phase.
1782     assert(!(JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()),
1783            "CDS should be disabled if early class hooks are enabled");
1784   }
1785   return result;
1786 }
1787 
1788 void MetaspaceShared::print_on(outputStream* st) {
1789   if (UseSharedSpaces || DumpSharedSpaces) {
1790     st->print("CDS archive(s) mapped at: ");
1791     address base;
1792     address top;
1793     if (UseSharedSpaces) { // Runtime
1794       base = (address)MetaspaceObj::shared_metaspace_base();
1795       address static_top = (address)_shared_metaspace_static_top;
1796       top = (address)MetaspaceObj::shared_metaspace_top();
1797       st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
1798     } else if (DumpSharedSpaces) { // Dump Time
1799       base = (address)_shared_rs.base();
1800       top = (address)_shared_rs.end();
1801       st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top));
1802     }
1803     st->print("size " SIZE_FORMAT ", ", top - base);
1804     st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode);
1805   } else {
1806     st->print("CDS disabled.");
1807   }
1808   st->cr();
1809 }