1 /*
   2  * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/systemDictionary.hpp"
  34 #include "classfile/systemDictionaryShared.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "gc/shared/gcLocker.hpp"
  37 #include "interpreter/bytecodeStream.hpp"
  38 #include "interpreter/bytecodes.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logMessage.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "memory/metaspace.hpp"
  43 #include "memory/metaspaceShared.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/instanceClassLoaderKlass.hpp"
  46 #include "oops/instanceMirrorKlass.hpp"
  47 #include "oops/instanceRefKlass.hpp"
  48 #include "oops/objArrayKlass.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/typeArrayKlass.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvmtiRedefineClasses.hpp"
  54 #include "runtime/timerTrace.hpp"
  55 #include "runtime/os.hpp"
  56 #include "runtime/signature.hpp"
  57 #include "runtime/vmThread.hpp"
  58 #include "runtime/vm_operations.hpp"
  59 #include "utilities/align.hpp"
  60 #include "utilities/defaultStream.hpp"
  61 #include "utilities/hashtable.inline.hpp"
  62 #include "memory/metaspaceClosure.hpp"
  63 
  64 ReservedSpace MetaspaceShared::_shared_rs;
  65 VirtualSpace MetaspaceShared::_shared_vs;
  66 MetaspaceSharedStats MetaspaceShared::_stats;
  67 bool MetaspaceShared::_has_error_classes;
  68 bool MetaspaceShared::_archive_loading_failed = false;
  69 bool MetaspaceShared::_remapped_readwrite = false;
  70 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  71 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  72 size_t MetaspaceShared::_core_spaces_size = 0;
  73 
  74 // The CDS archive is divided into the following regions:
  75 //     mc - misc code (the method entry trampolines)
  76 //     rw - read-write metadata
  77 //     ro - read-only metadata and read-only tables
  78 //     md - misc data (the c++ vtables)
  79 //     od - optional data (original class files)
  80 //
  81 //     s0 - shared strings #0
  82 //     s1 - shared strings #1 (may be empty)
  83 //
  84 // Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from
  85 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  86 // are page-aligned, and there's no gap between any consecutive regions.
  87 //
  88 // These 5 regions are populated in the following steps:
  89 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  90 //     temporarily allocated outside of the shared regions. Only the method entry
  91 //     trampolines are written into the mc region.
  92 // [2] ArchiveCompactor copies RW metadata into the rw region.
  93 // [3] ArchiveCompactor copies RO metadata into the ro region.
  94 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
  95 //     are copied into the ro region as read-only tables.
  96 // [5] C++ vtables are copied into the md region.
  97 // [6] Original class files are copied into the od region.
  98 //
  99 // The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their
 100 // layout is independent of the other 5 regions.
 101 
 102 class DumpRegion {
 103 private:
 104   const char* _name;
 105   char* _base;
 106   char* _top;
 107   char* _end;
 108   bool _is_packed;
 109 
 110   char* expand_top_to(char* newtop) {
 111     assert(is_allocatable(), "must be initialized and not packed");
 112     assert(newtop >= _top, "must not grow backwards");
 113     if (newtop > _end) {
 114       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 115       ShouldNotReachHere();
 116     }
 117     MetaspaceShared::commit_shared_space_to(newtop);
 118     _top = newtop;
 119     return _top;
 120   }
 121 
 122 public:
 123   DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
 124 
 125   char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
 126     char* p = (char*)align_up(_top, alignment);
 127     char* newtop = p + align_up(num_bytes, alignment);
 128     expand_top_to(newtop);
 129     memset(p, 0, newtop - p);
 130     return p;
 131   }
 132 
 133   void append_intptr_t(intptr_t n) {
 134     assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
 135     intptr_t *p = (intptr_t*)_top;
 136     char* newtop = _top + sizeof(intptr_t);
 137     expand_top_to(newtop);
 138     *p = n;
 139   }
 140 
 141   char* base()      const { return _base;        }
 142   char* top()       const { return _top;         }
 143   char* end()       const { return _end;         }
 144   size_t reserved() const { return _end - _base; }
 145   size_t used()     const { return _top - _base; }
 146   bool is_packed()  const { return _is_packed;   }
 147   bool is_allocatable() const {
 148     return !is_packed() && _base != NULL;
 149   }
 150 
 151   double perc(size_t used, size_t total) const {
 152     if (total == 0) {
 153       total = 1;
 154     }
 155     return used / double(total) * 100.0;
 156   }
 157 
 158   void print(size_t total_bytes) const {
 159     tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
 160                   _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
 161   }
 162   void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
 163     tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
 164                _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
 165     if (strcmp(_name, failing_region) == 0) {
 166       tty->print_cr(" required = %d", int(needed_bytes));
 167     } else {
 168       tty->cr();
 169     }
 170   }
 171 
 172   void init(const ReservedSpace* rs) {
 173     _base = _top = rs->base();
 174     _end = rs->end();
 175   }
 176   void init(char* b, char* t, char* e) {
 177     _base = b;
 178     _top = t;
 179     _end = e;
 180   }
 181 
 182   void pack(DumpRegion* next = NULL) {
 183     assert(!is_packed(), "sanity");
 184     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 185     _is_packed = true;
 186     if (next != NULL) {
 187       next->_base = next->_top = this->_end;
 188       next->_end = MetaspaceShared::shared_rs()->end();
 189     }
 190   }
 191   bool contains(char* p) {
 192     return base() <= p && p < top();
 193   }
 194 };
 195 
 196 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 197 DumpRegion _s0_region("s0"), _s1_region("s1");
 198 
 199 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 200   return _mc_region.allocate(num_bytes);
 201 }
 202 
 203 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 204   return _ro_region.allocate(num_bytes);
 205 }
 206 
 207 void MetaspaceShared::initialize_shared_rs() {
 208   const size_t reserve_alignment = Metaspace::reserve_alignment();
 209   bool large_pages = false; // No large pages when dumping the CDS archive.
 210   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 211 
 212 #ifdef _LP64
 213   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 214   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
 215 #else
 216   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
 217   size_t cds_total = align_down(256*M, reserve_alignment);
 218 #endif
 219 
 220   // First try to reserve the space at the specified SharedBaseAddress.
 221   _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
 222   if (_shared_rs.is_reserved()) {
 223     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
 224   } else {
 225     // Get a mmap region anywhere if the SharedBaseAddress fails.
 226     _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
 227   }
 228   if (!_shared_rs.is_reserved()) {
 229     vm_exit_during_initialization("Unable to reserve memory for shared space",
 230                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
 231   }
 232 
 233 #ifdef _LP64
 234   // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
 235   // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
 236   //   will store Klasses into this space.
 237   // + The lower 3 GB is used for the archive -- when preload_classes() is done,
 238   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
 239   //   then the RO parts.
 240 
 241   assert(UseCompressedOops && UseCompressedClassPointers,
 242       "UseCompressedOops and UseCompressedClassPointers must be set");
 243 
 244   size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
 245   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
 246   CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
 247   _shared_rs = _shared_rs.first_part(max_archive_size);
 248 
 249   // Set up compress class pointers.
 250   Universe::set_narrow_klass_base((address)_shared_rs.base());
 251   if (UseAOT || cds_total > UnscaledClassSpaceMax) {
 252     // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
 253     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
 254   } else {
 255     Universe::set_narrow_klass_shift(0);
 256   }
 257 
 258   Metaspace::initialize_class_space(tmp_class_space);
 259   tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 260                 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
 261 
 262   tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 263                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 264 #endif
 265 
 266   // Start with 0 committed bytes. The memory will be committed as needed by
 267   // MetaspaceShared::commit_shared_space_to().
 268   if (!_shared_vs.initialize(_shared_rs, 0)) {
 269     vm_exit_during_initialization("Unable to allocate memory for shared space");
 270   }
 271 
 272   _mc_region.init(&_shared_rs);
 273   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 274                 _shared_rs.size(), p2i(_shared_rs.base()));
 275 }
 276 
 277 void MetaspaceShared::commit_shared_space_to(char* newtop) {
 278   assert(DumpSharedSpaces, "dump-time only");
 279   char* base = _shared_rs.base();
 280   size_t need_committed_size = newtop - base;
 281   size_t has_committed_size = _shared_vs.committed_size();
 282   if (need_committed_size < has_committed_size) {
 283     return;
 284   }
 285 
 286   size_t min_bytes = need_committed_size - has_committed_size;
 287   size_t preferred_bytes = 1 * M;
 288   size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
 289 
 290   size_t commit = MAX2(min_bytes, preferred_bytes);
 291   assert(commit <= uncommitted, "sanity");
 292 
 293   bool result = _shared_vs.expand_by(commit, false);
 294   if (!result) {
 295     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
 296                                           need_committed_size));
 297   }
 298 
 299   log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
 300                 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
 301 }
 302 
 303 // Read/write a data stream for restoring/preserving metadata pointers and
 304 // miscellaneous data from/to the shared archive file.
 305 
 306 void MetaspaceShared::serialize(SerializeClosure* soc) {
 307   int tag = 0;
 308   soc->do_tag(--tag);
 309 
 310   // Verify the sizes of various metadata in the system.
 311   soc->do_tag(sizeof(Method));
 312   soc->do_tag(sizeof(ConstMethod));
 313   soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
 314   soc->do_tag(sizeof(ConstantPool));
 315   soc->do_tag(sizeof(ConstantPoolCache));
 316   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
 317   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
 318   soc->do_tag(sizeof(Symbol));
 319 
 320   // Dump/restore miscellaneous metadata.
 321   Universe::serialize(soc, true);
 322   soc->do_tag(--tag);
 323 
 324   // Dump/restore references to commonly used names and signatures.
 325   vmSymbols::serialize(soc);
 326   soc->do_tag(--tag);
 327 
 328   // Dump/restore the symbol and string tables
 329   SymbolTable::serialize(soc);
 330   StringTable::serialize(soc);
 331   soc->do_tag(--tag);
 332 
 333   soc->do_tag(666);
 334 }
 335 
 336 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
 337   if (DumpSharedSpaces) {
 338     if (_cds_i2i_entry_code_buffers == NULL) {
 339       _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 340       _cds_i2i_entry_code_buffers_size = total_size;
 341     }
 342   } else if (UseSharedSpaces) {
 343     assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
 344   } else {
 345     return NULL;
 346   }
 347 
 348   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
 349   return _cds_i2i_entry_code_buffers;
 350 }
 351 
 352 // CDS code for dumping shared archive.
 353 
 354 // Global object for holding classes that have been loaded.  Since this
 355 // is run at a safepoint just before exit, this is the entire set of classes.
 356 static GrowableArray<Klass*>* _global_klass_objects;
 357 class CollectClassesClosure : public KlassClosure {
 358   void do_klass(Klass* k) {
 359     if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
 360       _global_klass_objects->append_if_missing(k);
 361     }
 362   }
 363 };
 364 
 365 static void remove_unshareable_in_classes() {
 366   for (int i = 0; i < _global_klass_objects->length(); i++) {
 367     Klass* k = _global_klass_objects->at(i);
 368     k->remove_unshareable_info();
 369   }
 370 }
 371 
 372 static void rewrite_nofast_bytecode(Method* method) {
 373   RawBytecodeStream bcs(method);
 374   while (!bcs.is_last_bytecode()) {
 375     Bytecodes::Code opcode = bcs.raw_next();
 376     switch (opcode) {
 377     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 378     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 379     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 380     case Bytecodes::_iload: {
 381       if (!bcs.is_wide()) {
 382         *bcs.bcp() = Bytecodes::_nofast_iload;
 383       }
 384       break;
 385     }
 386     default: break;
 387     }
 388   }
 389 }
 390 
 391 // Walk all methods in the class list to ensure that they won't be modified at
 392 // run time. This includes:
 393 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
 394 //     at run time by RewriteBytecodes/RewriteFrequentPairs
 395 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
 396 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
 397   for (int i = 0; i < _global_klass_objects->length(); i++) {
 398     Klass* k = _global_klass_objects->at(i);
 399     if (k->is_instance_klass()) {
 400       InstanceKlass* ik = InstanceKlass::cast(k);
 401       for (int i = 0; i < ik->methods()->length(); i++) {
 402         Method* m = ik->methods()->at(i);
 403         rewrite_nofast_bytecode(m);
 404         Fingerprinter fp(m);
 405         // The side effect of this call sets method's fingerprint field.
 406         fp.fingerprint();
 407       }
 408     }
 409   }
 410 }
 411 
 412 static void relocate_cached_class_file() {
 413   for (int i = 0; i < _global_klass_objects->length(); i++) {
 414     Klass* k = _global_klass_objects->at(i);
 415     if (k->is_instance_klass()) {
 416       InstanceKlass* ik = InstanceKlass::cast(k);
 417       JvmtiCachedClassFileData* p = ik->get_archived_class_data();
 418       if (p != NULL) {
 419         int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
 420         JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
 421         q->length = p->length;
 422         memcpy(q->data, p->data, p->length);
 423         ik->set_archived_class_data(q);
 424       }
 425     }
 426   }
 427 }
 428 
 429 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 430 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 431 //
 432 // Addresses of the vtables and the methods may be different across JVM runs,
 433 // if libjvm.so is dynamically loaded at a different base address.
 434 //
 435 // To ensure that the Metadata objects in the CDS archive always have the correct vtable:
 436 //
 437 // + at dump time:  we redirect the _vptr to point to our own vtables inside
 438 //                  the CDS image
 439 // + at run time:   we clone the actual contents of the vtables from libjvm.so
 440 //                  into our own tables.
 441 
 442 // Currently, the archive contain ONLY the following types of objects that have C++ vtables.
 443 #define CPP_VTABLE_PATCH_TYPES_DO(f) \
 444   f(ConstantPool) \
 445   f(InstanceKlass) \
 446   f(InstanceClassLoaderKlass) \
 447   f(InstanceMirrorKlass) \
 448   f(InstanceRefKlass) \
 449   f(Method) \
 450   f(ObjArrayKlass) \
 451   f(TypeArrayKlass)
 452 
 453 class CppVtableInfo {
 454   intptr_t _vtable_size;
 455   intptr_t _cloned_vtable[1];
 456 public:
 457   static int num_slots(int vtable_size) {
 458     return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
 459   }
 460   int vtable_size()           { return int(uintx(_vtable_size)); }
 461   void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
 462   intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; }
 463   void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
 464   // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
 465   static size_t byte_size(int vtable_size) {
 466     CppVtableInfo i;
 467     return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
 468   }
 469 };
 470 
 471 template <class T> class CppVtableCloner : public T {
 472   static intptr_t* vtable_of(Metadata& m) {
 473     return *((intptr_t**)&m);
 474   }
 475   static CppVtableInfo* _info;
 476 
 477   static int get_vtable_length(const char* name);
 478 
 479 public:
 480   // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
 481   static intptr_t* allocate(const char* name);
 482 
 483   // Clone the vtable to ...
 484   static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
 485 
 486   static void zero_vtable_clone() {
 487     assert(DumpSharedSpaces, "dump-time only");
 488     _info->zero();
 489   }
 490 
 491   // Switch the vtable pointer to point to the cloned vtable.
 492   static void patch(Metadata* obj) {
 493     assert(DumpSharedSpaces, "dump-time only");
 494     *(void**)obj = (void*)(_info->cloned_vtable());
 495   }
 496 
 497   static bool is_valid_shared_object(const T* obj) {
 498     intptr_t* vptr = *(intptr_t**)obj;
 499     return vptr == _info->cloned_vtable();
 500   }
 501 };
 502 
 503 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
 504 
 505 template <class T>
 506 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
 507   assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
 508   int n = get_vtable_length(name);
 509   _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
 510   _info->set_vtable_size(n);
 511 
 512   intptr_t* p = clone_vtable(name, _info);
 513   assert((char*)p == _md_region.top(), "must be");
 514 
 515   return p;
 516 }
 517 
 518 template <class T>
 519 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
 520   if (!DumpSharedSpaces) {
 521     assert(_info == 0, "_info is initialized only at dump time");
 522     _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
 523   }
 524   T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
 525   int n = info->vtable_size();
 526   intptr_t* srcvtable = vtable_of(tmp);
 527   intptr_t* dstvtable = info->cloned_vtable();
 528 
 529   // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
 530   // safe to do memcpy.
 531   log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
 532   memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
 533   return dstvtable + n;
 534 }
 535 
 536 // To determine the size of the vtable for each type, we use the following
 537 // trick by declaring 2 subclasses:
 538 //
 539 //   class CppVtableTesterA: public InstanceKlass {virtual int   last_virtual_method() {return 1;}    };
 540 //   class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
 541 //
 542 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
 543 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
 544 // - The first N entries have are exactly the same as in InstanceKlass's vtable.
 545 // - Their last entry is different.
 546 //
 547 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
 548 // and find the first entry that's different.
 549 //
 550 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
 551 // esoteric compilers.
 552 
 553 template <class T> class CppVtableTesterB: public T {
 554 public:
 555   virtual int last_virtual_method() {return 1;}
 556 };
 557 
 558 template <class T> class CppVtableTesterA : public T {
 559 public:
 560   virtual void* last_virtual_method() {
 561     // Make this different than CppVtableTesterB::last_virtual_method so the C++
 562     // compiler/linker won't alias the two functions.
 563     return NULL;
 564   }
 565 };
 566 
 567 template <class T>
 568 int CppVtableCloner<T>::get_vtable_length(const char* name) {
 569   CppVtableTesterA<T> a;
 570   CppVtableTesterB<T> b;
 571 
 572   intptr_t* avtable = vtable_of(a);
 573   intptr_t* bvtable = vtable_of(b);
 574 
 575   // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
 576   int vtable_len = 1;
 577   for (; ; vtable_len++) {
 578     if (avtable[vtable_len] != bvtable[vtable_len]) {
 579       break;
 580     }
 581   }
 582   log_debug(cds, vtables)("Found   %3d vtable entries for %s", vtable_len, name);
 583 
 584   return vtable_len;
 585 }
 586 
 587 #define ALLOC_CPP_VTABLE_CLONE(c) \
 588   CppVtableCloner<c>::allocate(#c);
 589 
 590 #define CLONE_CPP_VTABLE(c) \
 591   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
 592 
 593 #define ZERO_CPP_VTABLE(c) \
 594  CppVtableCloner<c>::zero_vtable_clone();
 595 
 596 // This can be called at both dump time and run time.
 597 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
 598   assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
 599   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
 600   return p;
 601 }
 602 
 603 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
 604   assert(DumpSharedSpaces, "dump-time only");
 605   CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
 606 }
 607 
 608 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
 609 void MetaspaceShared::allocate_cpp_vtable_clones() {
 610   assert(DumpSharedSpaces, "dump-time only");
 611   // Layout (each slot is a intptr_t):
 612   //   [number of slots in the first vtable = n1]
 613   //   [ <n1> slots for the first vtable]
 614   //   [number of slots in the first second = n2]
 615   //   [ <n2> slots for the second vtable]
 616   //   ...
 617   // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
 618   CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
 619 }
 620 
 621 // Switch the vtable pointer to point to the cloned vtable. We assume the
 622 // vtable pointer is in first slot in object.
 623 void MetaspaceShared::patch_cpp_vtable_pointers() {
 624   int n = _global_klass_objects->length();
 625   for (int i = 0; i < n; i++) {
 626     Klass* obj = _global_klass_objects->at(i);
 627     if (obj->is_instance_klass()) {
 628       InstanceKlass* ik = InstanceKlass::cast(obj);
 629       if (ik->is_class_loader_instance_klass()) {
 630         CppVtableCloner<InstanceClassLoaderKlass>::patch(ik);
 631       } else if (ik->is_reference_instance_klass()) {
 632         CppVtableCloner<InstanceRefKlass>::patch(ik);
 633       } else if (ik->is_mirror_instance_klass()) {
 634         CppVtableCloner<InstanceMirrorKlass>::patch(ik);
 635       } else {
 636         CppVtableCloner<InstanceKlass>::patch(ik);
 637       }
 638       ConstantPool* cp = ik->constants();
 639       CppVtableCloner<ConstantPool>::patch(cp);
 640       for (int j = 0; j < ik->methods()->length(); j++) {
 641         Method* m = ik->methods()->at(j);
 642         CppVtableCloner<Method>::patch(m);
 643         assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be");
 644       }
 645     } else if (obj->is_objArray_klass()) {
 646       CppVtableCloner<ObjArrayKlass>::patch(obj);
 647     } else {
 648       assert(obj->is_typeArray_klass(), "sanity");
 649       CppVtableCloner<TypeArrayKlass>::patch(obj);
 650     }
 651   }
 652 }
 653 
 654 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
 655   assert(is_in_shared_space(m), "must be");
 656   return CppVtableCloner<Method>::is_valid_shared_object(m);
 657 }
 658 
 659 // Closure for serializing initialization data out to a data area to be
 660 // written to the shared file.
 661 
 662 class WriteClosure : public SerializeClosure {
 663 private:
 664   DumpRegion* _dump_region;
 665 
 666 public:
 667   WriteClosure(DumpRegion* r) {
 668     _dump_region = r;
 669   }
 670 
 671   void do_ptr(void** p) {
 672     _dump_region->append_intptr_t((intptr_t)*p);
 673   }
 674 
 675   void do_u4(u4* p) {
 676     void* ptr = (void*)(uintx(*p));
 677     do_ptr(&ptr);
 678   }
 679 
 680   void do_tag(int tag) {
 681     _dump_region->append_intptr_t((intptr_t)tag);
 682   }
 683 
 684   void do_region(u_char* start, size_t size) {
 685     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
 686     assert(size % sizeof(intptr_t) == 0, "bad size");
 687     do_tag((int)size);
 688     while (size > 0) {
 689       _dump_region->append_intptr_t(*(intptr_t*)start);
 690       start += sizeof(intptr_t);
 691       size -= sizeof(intptr_t);
 692     }
 693   }
 694 
 695   bool reading() const { return false; }
 696 };
 697 
 698 // This is for dumping detailed statistics for the allocations
 699 // in the shared spaces.
 700 class DumpAllocStats : public ResourceObj {
 701 public:
 702 
 703   // Here's poor man's enum inheritance
 704 #define SHAREDSPACE_OBJ_TYPES_DO(f) \
 705   METASPACE_OBJ_TYPES_DO(f) \
 706   f(SymbolHashentry) \
 707   f(SymbolBucket) \
 708   f(StringHashentry) \
 709   f(StringBucket) \
 710   f(Other)
 711 
 712   enum Type {
 713     // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
 714     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
 715     _number_of_types
 716   };
 717 
 718   static const char * type_name(Type type) {
 719     switch(type) {
 720     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
 721     default:
 722       ShouldNotReachHere();
 723       return NULL;
 724     }
 725   }
 726 
 727 public:
 728   enum { RO = 0, RW = 1 };
 729 
 730   int _counts[2][_number_of_types];
 731   int _bytes [2][_number_of_types];
 732 
 733   DumpAllocStats() {
 734     memset(_counts, 0, sizeof(_counts));
 735     memset(_bytes,  0, sizeof(_bytes));
 736   };
 737 
 738   void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
 739     assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
 740     int which = (read_only) ? RO : RW;
 741     _counts[which][type] ++;
 742     _bytes [which][type] += byte_size;
 743   }
 744 
 745   void record_other_type(int byte_size, bool read_only) {
 746     int which = (read_only) ? RO : RW;
 747     _bytes [which][OtherType] += byte_size;
 748   }
 749   void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
 750 };
 751 
 752 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
 753   // Calculate size of data that was not allocated by Metaspace::allocate()
 754   MetaspaceSharedStats *stats = MetaspaceShared::stats();
 755 
 756   // symbols
 757   _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
 758   _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
 759 
 760   _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
 761   _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
 762 
 763   // strings
 764   _counts[RO][StringHashentryType] = stats->string.hashentry_count;
 765   _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
 766 
 767   _counts[RO][StringBucketType] = stats->string.bucket_count;
 768   _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
 769 
 770   // TODO: count things like dictionary, vtable, etc
 771   _bytes[RW][OtherType] += mc_all + md_all;
 772   rw_all += mc_all + md_all; // mc/md are mapped Read/Write
 773 
 774   // prevent divide-by-zero
 775   if (ro_all < 1) {
 776     ro_all = 1;
 777   }
 778   if (rw_all < 1) {
 779     rw_all = 1;
 780   }
 781 
 782   int all_ro_count = 0;
 783   int all_ro_bytes = 0;
 784   int all_rw_count = 0;
 785   int all_rw_bytes = 0;
 786 
 787 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
 788 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
 789   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
 790   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
 791 
 792   ResourceMark rm;
 793   LogMessage(cds) msg;
 794   stringStream info_stream;
 795 
 796   info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
 797   info_stream.print_cr("%s", hdr);
 798   info_stream.print_cr("%s", sep);
 799   for (int type = 0; type < int(_number_of_types); type ++) {
 800     const char *name = type_name((Type)type);
 801     int ro_count = _counts[RO][type];
 802     int ro_bytes = _bytes [RO][type];
 803     int rw_count = _counts[RW][type];
 804     int rw_bytes = _bytes [RW][type];
 805     int count = ro_count + rw_count;
 806     int bytes = ro_bytes + rw_bytes;
 807 
 808     double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
 809     double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
 810     double perc    = 100.0 * double(bytes)    / double(ro_all + rw_all);
 811 
 812     info_stream.print_cr(fmt_stats, name,
 813                          ro_count, ro_bytes, ro_perc,
 814                          rw_count, rw_bytes, rw_perc,
 815                          count, bytes, perc);
 816 
 817     all_ro_count += ro_count;
 818     all_ro_bytes += ro_bytes;
 819     all_rw_count += rw_count;
 820     all_rw_bytes += rw_bytes;
 821   }
 822 
 823   int all_count = all_ro_count + all_rw_count;
 824   int all_bytes = all_ro_bytes + all_rw_bytes;
 825 
 826   double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
 827   double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
 828   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 829 
 830   info_stream.print_cr("%s", sep);
 831   info_stream.print_cr(fmt_stats, "Total",
 832                        all_ro_count, all_ro_bytes, all_ro_perc,
 833                        all_rw_count, all_rw_bytes, all_rw_perc,
 834                        all_count, all_bytes, all_perc);
 835 
 836   assert(all_ro_bytes == ro_all, "everything should have been counted");
 837   assert(all_rw_bytes == rw_all, "everything should have been counted");
 838 
 839   msg.info("%s", info_stream.as_string());
 840 #undef fmt_stats
 841 }
 842 
 843 // Populate the shared space.
 844 
 845 class VM_PopulateDumpSharedSpace: public VM_Operation {
 846 private:
 847   GrowableArray<MemRegion> *_string_regions;
 848 
 849   void dump_string_and_symbols();
 850   char* dump_read_only_tables();
 851   void print_region_stats();
 852 public:
 853 
 854   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 855   void doit();   // outline because gdb sucks
 856   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 857 }; // class VM_PopulateDumpSharedSpace
 858 
 859 class SortedSymbolClosure: public SymbolClosure {
 860   GrowableArray<Symbol*> _symbols;
 861   virtual void do_symbol(Symbol** sym) {
 862     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 863     _symbols.append(*sym);
 864   }
 865   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 866     if (a[0] < b[0]) {
 867       return -1;
 868     } else if (a[0] == b[0]) {
 869       return 0;
 870     } else {
 871       return 1;
 872     }
 873   }
 874 
 875 public:
 876   SortedSymbolClosure() {
 877     SymbolTable::symbols_do(this);
 878     _symbols.sort(compare_symbols_by_address);
 879   }
 880   GrowableArray<Symbol*>* get_sorted_symbols() {
 881     return &_symbols;
 882   }
 883 };
 884 
 885 // ArchiveCompactor --
 886 //
 887 // This class is the central piece of shared archive compaction -- all metaspace data are
 888 // initially allocated outside of the shared regions. ArchiveCompactor copies the
 889 // metaspace data into their final location in the shared regions.
 890 
 891 class ArchiveCompactor : AllStatic {
 892   static DumpAllocStats* _alloc_stats;
 893   static SortedSymbolClosure* _ssc;
 894 
 895   static unsigned my_hash(const address& a) {
 896     return primitive_hash<address>(a);
 897   }
 898   static bool my_equals(const address& a0, const address& a1) {
 899     return primitive_equals<address>(a0, a1);
 900   }
 901   typedef ResourceHashtable<
 902       address, address,
 903       ArchiveCompactor::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
 904       ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
 905       16384, ResourceObj::C_HEAP> RelocationTable;
 906   static RelocationTable* _new_loc_table;
 907 
 908 public:
 909   static void initialize() {
 910     _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
 911     _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
 912   }
 913   static DumpAllocStats* alloc_stats() {
 914     return _alloc_stats;
 915   }
 916 
 917   static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
 918     address obj = ref->obj();
 919     int bytes = ref->size() * BytesPerWord;
 920     char* p;
 921     size_t alignment = BytesPerWord;
 922     char* oldtop;
 923     char* newtop;
 924 
 925     if (read_only) {
 926       oldtop = _ro_region.top();
 927       p = _ro_region.allocate(bytes, alignment);
 928       newtop = _ro_region.top();
 929     } else {
 930       oldtop = _rw_region.top();
 931       p = _rw_region.allocate(bytes, alignment);
 932       newtop = _rw_region.top();
 933     }
 934     memcpy(p, obj, bytes);
 935     bool isnew = _new_loc_table->put(obj, (address)p);
 936     assert(isnew, "must be");
 937     log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
 938 
 939     _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
 940     if (ref->msotype() == MetaspaceObj::SymbolType) {
 941       uintx delta = MetaspaceShared::object_delta(p);
 942       if (delta > MAX_SHARED_DELTA) {
 943         // This is just a sanity check and should not appear in any real world usage. This
 944         // happens only if you allocate more than 2GB of Symbols and would require
 945         // millions of shared classes.
 946         vm_exit_during_initialization("Too many Symbols in the CDS archive",
 947                                       "Please reduce the number of shared classes.");
 948       }
 949     }
 950   }
 951 
 952   static address get_new_loc(MetaspaceClosure::Ref* ref) {
 953     address* pp = _new_loc_table->get(ref->obj());
 954     assert(pp != NULL, "must be");
 955     return *pp;
 956   }
 957 
 958 private:
 959   // Makes a shallow copy of visited MetaspaceObj's
 960   class ShallowCopier: public UniqueMetaspaceClosure {
 961     bool _read_only;
 962   public:
 963     ShallowCopier(bool read_only) : _read_only(read_only) {}
 964 
 965     virtual void do_unique_ref(Ref* ref, bool read_only) {
 966       if (read_only == _read_only) {
 967         allocate(ref, read_only);
 968       }
 969     }
 970   };
 971 
 972   // Relocate embedded pointers within a MetaspaceObj's shallow copy
 973   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
 974   public:
 975     virtual void do_unique_ref(Ref* ref, bool read_only) {
 976       address new_loc = get_new_loc(ref);
 977       RefRelocator refer;
 978       ref->metaspace_pointers_do_at(&refer, new_loc);
 979     }
 980   };
 981 
 982   // Relocate a reference to point to its shallow copy
 983   class RefRelocator: public MetaspaceClosure {
 984   public:
 985     virtual bool do_ref(Ref* ref, bool read_only) {
 986       if (ref->not_null()) {
 987         ref->update(get_new_loc(ref));
 988       }
 989       return false; // Do not recurse.
 990     }
 991   };
 992 
 993 #ifdef ASSERT
 994   class IsRefInArchiveChecker: public MetaspaceClosure {
 995   public:
 996     virtual bool do_ref(Ref* ref, bool read_only) {
 997       if (ref->not_null()) {
 998         char* obj = (char*)ref->obj();
 999         assert(_ro_region.contains(obj) || _rw_region.contains(obj),
1000                "must be relocated to point to CDS archive");
1001       }
1002       return false; // Do not recurse.
1003     }
1004   };
1005 #endif
1006 
1007 public:
1008   static void copy_and_compact() {
1009     // We should no longer allocate anything from the metaspace, so that
1010     // we can have a stable set of MetaspaceObjs to work with.
1011     Metaspace::freeze();
1012 
1013     ResourceMark rm;
1014     SortedSymbolClosure the_ssc; // StackObj
1015     _ssc = &the_ssc;
1016 
1017     tty->print_cr("Scanning all metaspace objects ... ");
1018     {
1019       // allocate and shallow-copy RW objects, immediately following the MC region
1020       tty->print_cr("Allocating RW objects ... ");
1021       _mc_region.pack(&_rw_region);
1022 
1023       ResourceMark rm;
1024       ShallowCopier rw_copier(false);
1025       iterate_roots(&rw_copier);
1026     }
1027     {
1028       // allocate and shallow-copy of RO object, immediately following the RW region
1029       tty->print_cr("Allocating RO objects ... ");
1030       _rw_region.pack(&_ro_region);
1031 
1032       ResourceMark rm;
1033       ShallowCopier ro_copier(true);
1034       iterate_roots(&ro_copier);
1035     }
1036     {
1037       tty->print_cr("Relocating embedded pointers ... ");
1038       ResourceMark rm;
1039       ShallowCopyEmbeddedRefRelocator emb_reloc;
1040       iterate_roots(&emb_reloc);
1041     }
1042     {
1043       tty->print_cr("Relocating external roots ... ");
1044       ResourceMark rm;
1045       RefRelocator ext_reloc;
1046       iterate_roots(&ext_reloc);
1047     }
1048 
1049 #ifdef ASSERT
1050     {
1051       tty->print_cr("Verifying external roots ... ");
1052       ResourceMark rm;
1053       IsRefInArchiveChecker checker;
1054       iterate_roots(&checker);
1055     }
1056 #endif
1057 
1058 
1059     // cleanup
1060     _ssc = NULL;
1061   }
1062 
1063   // We must relocate the System::_well_known_klasses only after we have copied the
1064   // strings in during dump_string_and_symbols(): during the string copy, we operate on old
1065   // String objects which assert that their klass is the old
1066   // SystemDictionary::String_klass().
1067   static void relocate_well_known_klasses() {
1068     {
1069       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1070       ResourceMark rm;
1071       RefRelocator ext_reloc;
1072       SystemDictionary::well_known_klasses_do(&ext_reloc);
1073     }
1074     // NOTE: after this point, we shouldn't have any globals that can reach the old
1075     // objects.
1076 
1077     // We cannot use any of the objects in the heap anymore (except for the objects
1078     // in the CDS shared string regions) because their headers no longer point to
1079     // valid Klasses.
1080   }
1081 
1082   static void iterate_roots(MetaspaceClosure* it) {
1083     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1084     for (int i=0; i<symbols->length(); i++) {
1085       it->push(symbols->adr_at(i));
1086     }
1087     if (_global_klass_objects != NULL) {
1088       // Need to fix up the pointers
1089       for (int i = 0; i < _global_klass_objects->length(); i++) {
1090         // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1091         it->push(_global_klass_objects->adr_at(i));
1092       }
1093     }
1094     FileMapInfo::metaspace_pointers_do(it);
1095     SystemDictionary::classes_do(it);
1096     Universe::metaspace_pointers_do(it);
1097     SymbolTable::metaspace_pointers_do(it);
1098     vmSymbols::metaspace_pointers_do(it);
1099   }
1100 
1101   static Klass* get_relocated_klass(Klass* orig_klass) {
1102     address* pp = _new_loc_table->get((address)orig_klass);
1103     assert(pp != NULL, "must be");
1104     Klass* klass = (Klass*)(*pp);
1105     assert(klass->is_klass(), "must be");
1106     return klass;
1107   }
1108 };
1109 
1110 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1111 SortedSymbolClosure* ArchiveCompactor::_ssc;
1112 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1113 
1114 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1115                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1116   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1117 }
1118 
1119 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1120   tty->print_cr("Dumping string and symbol tables ...");
1121 
1122   NOT_PRODUCT(SymbolTable::verify());
1123   NOT_PRODUCT(StringTable::verify());
1124   SymbolTable::write_to_archive();
1125 
1126   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1127   _string_regions = new GrowableArray<MemRegion>(2);
1128   StringTable::write_to_archive(_string_regions);
1129 }
1130 
1131 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1132   char* oldtop = _ro_region.top();
1133   // Reorder the system dictionary. Moving the symbols affects
1134   // how the hash table indices are calculated.
1135   SystemDictionary::reorder_dictionary_for_sharing();
1136   NOT_PRODUCT(SystemDictionary::verify();)
1137 
1138   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1139   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1140   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1141 
1142   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1143   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1144   SystemDictionary::copy_table(table_top, _ro_region.top());
1145 
1146   // Write the other data to the output array.
1147   WriteClosure wc(&_ro_region);
1148   MetaspaceShared::serialize(&wc);
1149 
1150   char* newtop = _ro_region.top();
1151   ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1152   return buckets_top;
1153 }
1154 
1155 void VM_PopulateDumpSharedSpace::doit() {
1156   Thread* THREAD = VMThread::vm_thread();
1157 
1158   NOT_PRODUCT(SystemDictionary::verify();)
1159   // The following guarantee is meant to ensure that no loader constraints
1160   // exist yet, since the constraints table is not shared.  This becomes
1161   // more important now that we don't re-initialize vtables/itables for
1162   // shared classes at runtime, where constraints were previously created.
1163   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1164             "loader constraints are not saved");
1165   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1166           "placeholders are not saved");
1167   // Revisit and implement this if we prelink method handle call sites:
1168   guarantee(SystemDictionary::invoke_method_table() == NULL ||
1169             SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1170             "invoke method table is not saved");
1171 
1172   // At this point, many classes have been loaded.
1173   // Gather systemDictionary classes in a global array and do everything to
1174   // that so we don't have to walk the SystemDictionary again.
1175   _global_klass_objects = new GrowableArray<Klass*>(1000);
1176   CollectClassesClosure collect_classes;
1177   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1178 
1179   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1180   {
1181     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1182     for (int i = 0; i < _global_klass_objects->length(); i++) {
1183       Klass* k = _global_klass_objects->at(i);
1184       if (k->is_instance_klass()) {
1185         num_inst ++;
1186       } else if (k->is_objArray_klass()) {
1187         num_obj_array ++;
1188       } else {
1189         assert(k->is_typeArray_klass(), "sanity");
1190         num_type_array ++;
1191       }
1192     }
1193     tty->print_cr("    instance classes   = %5d", num_inst);
1194     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1195     tty->print_cr("    type array classes = %5d", num_type_array);
1196   }
1197 
1198 
1199   // Ensure the ConstMethods won't be modified at run-time
1200   tty->print("Updating ConstMethods ... ");
1201   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1202   tty->print_cr("done. ");
1203 
1204   // Remove all references outside the metadata
1205   tty->print("Removing unshareable information ... ");
1206   remove_unshareable_in_classes();
1207   tty->print_cr("done. ");
1208 
1209   ArchiveCompactor::initialize();
1210   ArchiveCompactor::copy_and_compact();
1211 
1212   dump_string_and_symbols();
1213   ArchiveCompactor::relocate_well_known_klasses();
1214 
1215   char* read_only_tables_start = dump_read_only_tables();
1216   _ro_region.pack(&_md_region);
1217 
1218   char* vtbl_list = _md_region.top();
1219   MetaspaceShared::allocate_cpp_vtable_clones();
1220   _md_region.pack(&_od_region);
1221 
1222   // Relocate the archived class file data into the od region
1223   relocate_cached_class_file();
1224   _od_region.pack();
1225 
1226   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1227   // is just the spaces between the two ends.
1228   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1229   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1230          "should already be aligned");
1231 
1232   // During patching, some virtual methods may be called, so at this point
1233   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1234   MetaspaceShared::patch_cpp_vtable_pointers();
1235 
1236   // The vtable clones contain addresses of the current process.
1237   // We don't want to write these addresses into the archive.
1238   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1239 
1240   // Create and write the archive file that maps the shared spaces.
1241 
1242   FileMapInfo* mapinfo = new FileMapInfo();
1243   mapinfo->populate_header(os::vm_allocation_granularity());
1244   mapinfo->set_read_only_tables_start(read_only_tables_start);
1245   mapinfo->set_misc_data_patching_start(vtbl_list);
1246   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1247   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1248   mapinfo->set_core_spaces_size(core_spaces_size);
1249 
1250   char* s0_start, *s0_top, *s0_end;
1251   char* s1_start, *s1_top, *s1_end;
1252 
1253   for (int pass=1; pass<=2; pass++) {
1254     if (pass == 1) {
1255       // The first pass doesn't actually write the data to disk. All it
1256       // does is to update the fields in the mapinfo->_header.
1257     } else {
1258       // After the first pass, the contents of mapinfo->_header are finalized,
1259       // so we can compute the header's CRC, and write the contents of the header
1260       // and the regions into disk.
1261       mapinfo->open_for_write();
1262       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1263     }
1264     mapinfo->write_header();
1265 
1266     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1267     // so it needs to be read/write.
1268     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1269     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1270     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1271     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1272     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1273 
1274     mapinfo->write_string_regions(_string_regions,
1275                                   &s0_start, &s0_top, &s0_end,
1276                                   &s1_start, &s1_top, &s1_end);
1277   }
1278 
1279   mapinfo->close();
1280 
1281   // Restore the vtable in case we invoke any virtual methods.
1282   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1283 
1284   _s0_region.init(s0_start, s0_top, s0_end);
1285   _s1_region.init(s1_start, s1_top, s1_end);
1286   print_region_stats();
1287 
1288   if (log_is_enabled(Info, cds)) {
1289     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1290                                                  int(_mc_region.used()), int(_md_region.used()));
1291   }
1292 }
1293 
1294 void VM_PopulateDumpSharedSpace::print_region_stats() {
1295   // Print statistics of all the regions
1296   const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1297                                 _mc_region.reserved() + _md_region.reserved() +
1298                                 _od_region.reserved() +
1299                                 _s0_region.reserved() + _s1_region.reserved();
1300   const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1301                              _mc_region.used() + _md_region.used() +
1302                              _od_region.used() +
1303                              _s0_region.used() + _s1_region.used();
1304   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1305 
1306   _mc_region.print(total_reserved);
1307   _rw_region.print(total_reserved);
1308   _ro_region.print(total_reserved);
1309   _md_region.print(total_reserved);
1310   _od_region.print(total_reserved);
1311   _s0_region.print(total_reserved);
1312   _s1_region.print(total_reserved);
1313 
1314   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1315                  total_bytes, total_reserved, total_u_perc);
1316 }
1317 
1318 
1319 // Update a Java object to point its Klass* to the new location after
1320 // shared archive has been compacted.
1321 void MetaspaceShared::relocate_klass_ptr(oop o) {
1322   assert(DumpSharedSpaces, "sanity");
1323   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1324   o->set_klass(k);
1325 }
1326 
1327 class LinkSharedClassesClosure : public KlassClosure {
1328   Thread* THREAD;
1329   bool    _made_progress;
1330  public:
1331   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1332 
1333   void reset()               { _made_progress = false; }
1334   bool made_progress() const { return _made_progress; }
1335 
1336   void do_klass(Klass* k) {
1337     if (k->is_instance_klass()) {
1338       InstanceKlass* ik = InstanceKlass::cast(k);
1339       // Link the class to cause the bytecodes to be rewritten and the
1340       // cpcache to be created. Class verification is done according
1341       // to -Xverify setting.
1342       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1343       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1344     }
1345   }
1346 };
1347 
1348 class CheckSharedClassesClosure : public KlassClosure {
1349   bool    _made_progress;
1350  public:
1351   CheckSharedClassesClosure() : _made_progress(false) {}
1352 
1353   void reset()               { _made_progress = false; }
1354   bool made_progress() const { return _made_progress; }
1355   void do_klass(Klass* k) {
1356     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1357       _made_progress = true;
1358     }
1359   }
1360 };
1361 
1362 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1363   if (k->is_instance_klass()) {
1364     InstanceKlass* ik = InstanceKlass::cast(k);
1365     u2 loader_type = ik->loader_type();
1366     ResourceMark rm;
1367     guarantee(loader_type != 0,
1368               "Class loader type is not set for this class %s", ik->name()->as_C_string());
1369   }
1370 }
1371 
1372 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1373   // We need to iterate because verification may cause additional classes
1374   // to be loaded.
1375   LinkSharedClassesClosure link_closure(THREAD);
1376   do {
1377     link_closure.reset();
1378     ClassLoaderDataGraph::loaded_classes_do(&link_closure);
1379     guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1380   } while (link_closure.made_progress());
1381 
1382   if (_has_error_classes) {
1383     // Mark all classes whose super class or interfaces failed verification.
1384     CheckSharedClassesClosure check_closure;
1385     do {
1386       // Not completely sure if we need to do this iteratively. Anyway,
1387       // we should come here only if there are unverifiable classes, which
1388       // shouldn't happen in normal cases. So better safe than sorry.
1389       check_closure.reset();
1390       ClassLoaderDataGraph::loaded_classes_do(&check_closure);
1391     } while (check_closure.made_progress());
1392 
1393     if (IgnoreUnverifiableClassesDuringDump) {
1394       // This is useful when running JCK or SQE tests. You should not
1395       // enable this when running real apps.
1396       SystemDictionary::remove_classes_in_error_state();
1397     } else {
1398       tty->print_cr("Please remove the unverifiable classes from your class list and try again");
1399       exit(1);
1400     }
1401   }
1402 
1403   // Copy the verification constraints from C_HEAP-alloced GrowableArrays to RO-alloced
1404   // Arrays
1405   SystemDictionaryShared::finalize_verification_constraints();
1406 }
1407 
1408 void MetaspaceShared::prepare_for_dumping() {
1409   Arguments::check_unsupported_dumping_properties();
1410   ClassLoader::initialize_shared_path();
1411   FileMapInfo::allocate_classpath_entry_table();
1412 }
1413 
1414 // Preload classes from a list, populate the shared spaces and dump to a
1415 // file.
1416 void MetaspaceShared::preload_and_dump(TRAPS) {
1417   { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1418     ResourceMark rm;
1419     char class_list_path_str[JVM_MAXPATHLEN];
1420     // Preload classes to be shared.
1421     // Should use some os:: method rather than fopen() here. aB.
1422     const char* class_list_path;
1423     if (SharedClassListFile == NULL) {
1424       // Construct the path to the class list (in jre/lib)
1425       // Walk up two directories from the location of the VM and
1426       // optionally tack on "lib" (depending on platform)
1427       os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1428       for (int i = 0; i < 3; i++) {
1429         char *end = strrchr(class_list_path_str, *os::file_separator());
1430         if (end != NULL) *end = '\0';
1431       }
1432       int class_list_path_len = (int)strlen(class_list_path_str);
1433       if (class_list_path_len >= 3) {
1434         if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1435           if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1436             jio_snprintf(class_list_path_str + class_list_path_len,
1437                          sizeof(class_list_path_str) - class_list_path_len,
1438                          "%slib", os::file_separator());
1439             class_list_path_len += 4;
1440           }
1441         }
1442       }
1443       if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1444         jio_snprintf(class_list_path_str + class_list_path_len,
1445                      sizeof(class_list_path_str) - class_list_path_len,
1446                      "%sclasslist", os::file_separator());
1447       }
1448       class_list_path = class_list_path_str;
1449     } else {
1450       class_list_path = SharedClassListFile;
1451     }
1452 
1453     tty->print_cr("Loading classes to share ...");
1454     _has_error_classes = false;
1455     int class_count = preload_classes(class_list_path, THREAD);
1456     if (ExtraSharedClassListFile) {
1457       class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1458     }
1459     tty->print_cr("Loading classes to share: done.");
1460 
1461     log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1462 
1463     // Rewrite and link classes
1464     tty->print_cr("Rewriting and linking classes ...");
1465 
1466     // Link any classes which got missed. This would happen if we have loaded classes that
1467     // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1468     // fails verification, all other interfaces that were not specified in the classlist but
1469     // are implemented by K are not verified.
1470     link_and_cleanup_shared_classes(CATCH);
1471     tty->print_cr("Rewriting and linking classes: done");
1472 
1473     VM_PopulateDumpSharedSpace op;
1474     VMThread::execute(&op);
1475   }
1476 
1477   if (PrintSystemDictionaryAtExit) {
1478     SystemDictionary::print();
1479   }
1480 
1481   // Since various initialization steps have been undone by this process,
1482   // it is not reasonable to continue running a java process.
1483   exit(0);
1484 }
1485 
1486 
1487 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1488   ClassListParser parser(class_list_path);
1489   int class_count = 0;
1490 
1491     while (parser.parse_one_line()) {
1492       Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
1493 
1494       CLEAR_PENDING_EXCEPTION;
1495       if (klass != NULL) {
1496         if (log_is_enabled(Trace, cds)) {
1497           ResourceMark rm;
1498           log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1499         }
1500 
1501         InstanceKlass* ik = InstanceKlass::cast(klass);
1502 
1503         // Link the class to cause the bytecodes to be rewritten and the
1504         // cpcache to be created. The linking is done as soon as classes
1505         // are loaded in order that the related data structures (klass and
1506         // cpCache) are located together.
1507         try_link_class(ik, THREAD);
1508         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1509 
1510         class_count++;
1511       }
1512     }
1513 
1514   return class_count;
1515 }
1516 
1517 // Returns true if the class's status has changed
1518 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
1519   assert(DumpSharedSpaces, "should only be called during dumping");
1520   if (ik->init_state() < InstanceKlass::linked) {
1521     bool saved = BytecodeVerificationLocal;
1522     if (!(ik->is_shared_boot_class())) {
1523       // The verification decision is based on BytecodeVerificationRemote
1524       // for non-system classes. Since we are using the NULL classloader
1525       // to load non-system classes during dumping, we need to temporarily
1526       // change BytecodeVerificationLocal to be the same as
1527       // BytecodeVerificationRemote. Note this can cause the parent system
1528       // classes also being verified. The extra overhead is acceptable during
1529       // dumping.
1530       BytecodeVerificationLocal = BytecodeVerificationRemote;
1531     }
1532     ik->link_class(THREAD);
1533     if (HAS_PENDING_EXCEPTION) {
1534       ResourceMark rm;
1535       tty->print_cr("Preload Warning: Verification failed for %s",
1536                     ik->external_name());
1537       CLEAR_PENDING_EXCEPTION;
1538       ik->set_in_error_state();
1539       _has_error_classes = true;
1540     }
1541     BytecodeVerificationLocal = saved;
1542     return true;
1543   } else {
1544     return false;
1545   }
1546 }
1547 
1548 // Closure for serializing initialization data in from a data area
1549 // (ptr_array) read from the shared file.
1550 
1551 class ReadClosure : public SerializeClosure {
1552 private:
1553   intptr_t** _ptr_array;
1554 
1555   inline intptr_t nextPtr() {
1556     return *(*_ptr_array)++;
1557   }
1558 
1559 public:
1560   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1561 
1562   void do_ptr(void** p) {
1563     assert(*p == NULL, "initializing previous initialized pointer.");
1564     intptr_t obj = nextPtr();
1565     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1566            "hit tag while initializing ptrs.");
1567     *p = (void*)obj;
1568   }
1569 
1570   void do_u4(u4* p) {
1571     intptr_t obj = nextPtr();
1572     *p = (u4)(uintx(obj));
1573   }
1574 
1575   void do_tag(int tag) {
1576     int old_tag;
1577     old_tag = (int)(intptr_t)nextPtr();
1578     // do_int(&old_tag);
1579     assert(tag == old_tag, "old tag doesn't match");
1580     FileMapInfo::assert_mark(tag == old_tag);
1581   }
1582 
1583   void do_region(u_char* start, size_t size) {
1584     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1585     assert(size % sizeof(intptr_t) == 0, "bad size");
1586     do_tag((int)size);
1587     while (size > 0) {
1588       *(intptr_t*)start = nextPtr();
1589       start += sizeof(intptr_t);
1590       size -= sizeof(intptr_t);
1591     }
1592   }
1593 
1594   bool reading() const { return true; }
1595 };
1596 
1597 // Return true if given address is in the mapped shared space.
1598 bool MetaspaceShared::is_in_shared_space(const void* p) {
1599   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
1600 }
1601 
1602 // Return true if given address is in the misc data region
1603 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
1604   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
1605 }
1606 
1607 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
1608   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
1609     return true;
1610   }
1611   return false;
1612 }
1613 
1614 void MetaspaceShared::print_shared_spaces() {
1615   if (UseSharedSpaces) {
1616     FileMapInfo::current_info()->print_shared_spaces();
1617   }
1618 }
1619 
1620 
1621 // Map shared spaces at requested addresses and return if succeeded.
1622 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
1623   size_t image_alignment = mapinfo->alignment();
1624 
1625 #ifndef _WINDOWS
1626   // Map in the shared memory and then map the regions on top of it.
1627   // On Windows, don't map the memory here because it will cause the
1628   // mappings of the regions to fail.
1629   ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
1630   if (!shared_rs.is_reserved()) return false;
1631 #endif
1632 
1633   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
1634 
1635   char* _ro_base = NULL;
1636   char* _rw_base = NULL;
1637   char* _mc_base = NULL;
1638   char* _md_base = NULL;
1639   char* _od_base = NULL;
1640 
1641   // Map each shared region
1642   if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
1643       mapinfo->verify_region_checksum(mc) &&
1644       (_rw_base = mapinfo->map_region(rw)) != NULL &&
1645       mapinfo->verify_region_checksum(rw) &&
1646       (_ro_base = mapinfo->map_region(ro)) != NULL &&
1647       mapinfo->verify_region_checksum(ro) &&
1648       (_md_base = mapinfo->map_region(md)) != NULL &&
1649       mapinfo->verify_region_checksum(md) &&
1650       (_od_base = mapinfo->map_region(od)) != NULL &&
1651       mapinfo->verify_region_checksum(od) &&
1652       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
1653       mapinfo->validate_classpath_entry_table()) {
1654     // Success (no need to do anything)
1655     return true;
1656   } else {
1657     // If there was a failure in mapping any of the spaces, unmap the ones
1658     // that succeeded
1659     if (_ro_base != NULL) mapinfo->unmap_region(ro);
1660     if (_rw_base != NULL) mapinfo->unmap_region(rw);
1661     if (_mc_base != NULL) mapinfo->unmap_region(mc);
1662     if (_md_base != NULL) mapinfo->unmap_region(md);
1663     if (_od_base != NULL) mapinfo->unmap_region(od);
1664 #ifndef _WINDOWS
1665     // Release the entire mapped region
1666     shared_rs.release();
1667 #endif
1668     // If -Xshare:on is specified, print out the error message and exit VM,
1669     // otherwise, set UseSharedSpaces to false and continue.
1670     if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
1671       vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
1672     } else {
1673       FLAG_SET_DEFAULT(UseSharedSpaces, false);
1674     }
1675     return false;
1676   }
1677 }
1678 
1679 // Read the miscellaneous data from the shared file, and
1680 // serialize it out to its various destinations.
1681 
1682 void MetaspaceShared::initialize_shared_spaces() {
1683   FileMapInfo *mapinfo = FileMapInfo::current_info();
1684   _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
1685   _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
1686   _core_spaces_size = mapinfo->core_spaces_size();
1687   char* buffer = mapinfo->misc_data_patching_start();
1688   clone_cpp_vtables((intptr_t*)buffer);
1689 
1690   // The rest of the data is now stored in the RW region
1691   buffer = mapinfo->read_only_tables_start();
1692   int sharedDictionaryLen = *(intptr_t*)buffer;
1693   buffer += sizeof(intptr_t);
1694   int number_of_entries = *(intptr_t*)buffer;
1695   buffer += sizeof(intptr_t);
1696   SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
1697                                           sharedDictionaryLen,
1698                                           number_of_entries);
1699   buffer += sharedDictionaryLen;
1700 
1701   // The following data are the linked list elements
1702   // (HashtableEntry objects) for the shared dictionary table.
1703 
1704   int len = *(intptr_t*)buffer;     // skip over shared dictionary entries
1705   buffer += sizeof(intptr_t);
1706   buffer += len;
1707 
1708   // Verify various attributes of the archive, plus initialize the
1709   // shared string/symbol tables
1710   intptr_t* array = (intptr_t*)buffer;
1711   ReadClosure rc(&array);
1712   serialize(&rc);
1713 
1714   // Initialize the run-time symbol table.
1715   SymbolTable::create_table();
1716 
1717   // Close the mapinfo file
1718   mapinfo->close();
1719 
1720   if (PrintSharedArchiveAndExit) {
1721     if (PrintSharedDictionary) {
1722       tty->print_cr("\nShared classes:\n");
1723       SystemDictionary::print_shared(false);
1724     }
1725     if (_archive_loading_failed) {
1726       tty->print_cr("archive is invalid");
1727       vm_exit(1);
1728     } else {
1729       tty->print_cr("archive is valid");
1730       vm_exit(0);
1731     }
1732   }
1733 }
1734 
1735 void MetaspaceShared::fixup_shared_string_regions() {
1736   FileMapInfo *mapinfo = FileMapInfo::current_info();
1737   mapinfo->fixup_string_regions();
1738 }
1739 
1740 // JVM/TI RedefineClasses() support:
1741 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1742   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1743 
1744   if (UseSharedSpaces) {
1745     // remap the shared readonly space to shared readwrite, private
1746     FileMapInfo* mapinfo = FileMapInfo::current_info();
1747     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1748       return false;
1749     }
1750     _remapped_readwrite = true;
1751   }
1752   return true;
1753 }
1754 
1755 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1756   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1757   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1758   // or so.
1759   _mc_region.print_out_of_space_msg(name, needed_bytes);
1760   _rw_region.print_out_of_space_msg(name, needed_bytes);
1761   _ro_region.print_out_of_space_msg(name, needed_bytes);
1762   _md_region.print_out_of_space_msg(name, needed_bytes);
1763   _od_region.print_out_of_space_msg(name, needed_bytes);
1764 
1765   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1766                                 "Please reduce the number of shared classes.");
1767 }