1 /*
   2  * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classListParser.hpp"
  27 #include "classfile/classLoaderExt.hpp"
  28 #include "classfile/dictionary.hpp"
  29 #include "classfile/loaderConstraints.hpp"
  30 #include "classfile/placeholders.hpp"
  31 #include "classfile/sharedClassUtil.hpp"
  32 #include "classfile/symbolTable.hpp"
  33 #include "classfile/systemDictionary.hpp"
  34 #include "classfile/systemDictionaryShared.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "gc/shared/gcLocker.hpp"
  37 #include "interpreter/bytecodeStream.hpp"
  38 #include "interpreter/bytecodes.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logMessage.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "memory/metaspace.hpp"
  43 #include "memory/metaspaceShared.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/instanceClassLoaderKlass.hpp"
  46 #include "oops/instanceMirrorKlass.hpp"
  47 #include "oops/instanceRefKlass.hpp"
  48 #include "oops/objArrayKlass.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/typeArrayKlass.hpp"
  52 #include "prims/jvmtiRedefineClasses.hpp"
  53 #include "runtime/timerTrace.hpp"
  54 #include "runtime/os.hpp"
  55 #include "runtime/signature.hpp"
  56 #include "runtime/vmThread.hpp"
  57 #include "runtime/vm_operations.hpp"
  58 #include "utilities/defaultStream.hpp"
  59 #include "utilities/hashtable.inline.hpp"
  60 #include "memory/metaspaceClosure.hpp"
  61 
  62 ReservedSpace MetaspaceShared::_shared_rs;
  63 VirtualSpace MetaspaceShared::_shared_vs;
  64 MetaspaceSharedStats MetaspaceShared::_stats;
  65 bool MetaspaceShared::_has_error_classes;
  66 bool MetaspaceShared::_archive_loading_failed = false;
  67 bool MetaspaceShared::_remapped_readwrite = false;
  68 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  69 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  70 size_t MetaspaceShared::_core_spaces_size = 0;
  71 
  72 // The CDS archive is divided into 6 regions:
  73 //     mc - misc code (the method entry trampolines)
  74 //     rw - read-write metadata
  75 //     ro - read-only metadata and read-only tables
  76 //     md - misc data (the c++ vtables)
  77 //     od - other data (original class files)
  78 //     st - shared strings
  79 //
  80 // Except for the st region, the other 5 regions are linearly allocated, starting from
  81 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  82 // are page-aligned, and there's no gap between any consecutive regions.
  83 //
  84 // These 5 regions are populated in the following steps:
  85 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
  86 //     temporarily allocated outside of the shared regions. Only the method entry
  87 //     trampolines are written into the mc region.
  88 // [2] ArchiveCompactor copies RW metadata into the rw region.
  89 // [3] ArchiveCompactor copies RO metadata into the ro region.
  90 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
  91 //     are copied into the ro region as read-only tables.
  92 // [5] C++ vtables are copied into the md region.
  93 // [6] Original class files are copied into the od region.
  94 //
  95 // The st region is populated inside MetaspaceShared::dump_string_and_symbols. Its
  96 // layout is independent of the other 5 regions.
  97 
  98 class DumpRegion {
  99 private:
 100   const char* _name;
 101   char* _base;
 102   char* _top;
 103   char* _end;
 104   bool _is_packed;
 105 
 106   char* expand_top_to(char* newtop) {
 107     assert(is_allocatable(), "must be initialized and not packed");
 108     assert(newtop >= _top, "must not grow backwards");
 109     if (newtop > _end) {
 110       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 111       ShouldNotReachHere();
 112     }
 113     MetaspaceShared::commit_shared_space_to(newtop);
 114     _top = newtop;
 115     return _top;
 116   }
 117 
 118 public:
 119   DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
 120 
 121   char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
 122     char* p = (char*)align_ptr_up(_top, alignment);
 123     char* newtop = p + align_size_up(num_bytes, alignment);
 124     expand_top_to(newtop);
 125     memset(p, 0, newtop - p);
 126     return p;
 127   }
 128 
 129   void append_intptr_t(intptr_t n) {
 130     assert(is_ptr_aligned(_top, sizeof(intptr_t)), "bad alignment");
 131     intptr_t *p = (intptr_t*)_top;
 132     char* newtop = _top + sizeof(intptr_t);
 133     expand_top_to(newtop);
 134     *p = n;
 135   }
 136 
 137   char* base()      const { return _base;        }
 138   char* top()       const { return _top;         }
 139   char* end()       const { return _end;         }
 140   size_t reserved() const { return _end - _base; }
 141   size_t used()     const { return _top - _base; }
 142   bool is_packed()  const { return _is_packed;   }
 143   bool is_allocatable() const {
 144     return !is_packed() && _base != NULL;
 145   }
 146 
 147   double perc(size_t used, size_t total) const {
 148     if (total == 0) {total = 1;}
 149     return used / double(total) * 100.0;
 150   }
 151 
 152   void print(size_t total_bytes) const {
 153     tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
 154                   _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
 155   }
 156   void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
 157     tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
 158                _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
 159     if (strcmp(_name, failing_region) == 0) {
 160       tty->print_cr(" required = %d", int(needed_bytes));
 161     } else {
 162       tty->cr();
 163     }
 164   }
 165 
 166   void init(const ReservedSpace* rs) {
 167     _base = _top = rs->base();
 168     _end = rs->end();
 169   }
 170   void init(char* b, char* t, char* e) {
 171     _base = b;
 172     _top = t;
 173     _end = e;
 174   }
 175 
 176   void pack(DumpRegion* next = NULL) {
 177     assert(!is_packed(), "sanity");
 178     _end = (char*)align_ptr_up(_top, Metaspace::reserve_alignment());
 179     _is_packed = true;
 180     if (next != NULL) {
 181       next->_base = next->_top = this->_end;
 182       next->_end = MetaspaceShared::shared_rs()->end();
 183     }
 184   }
 185   bool contains(char* p) {
 186     return base() <= p && p < top();
 187   }
 188 };
 189 
 190 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
 191 DumpRegion _st_region("st"), _od_region("od");
 192 
 193 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 194   return _mc_region.allocate(num_bytes);
 195 }
 196 
 197 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 198   return _ro_region.allocate(num_bytes);
 199 }
 200 
 201 void MetaspaceShared::initialize_shared_rs() {
 202   const size_t reserve_alignment = Metaspace::reserve_alignment();
 203   bool large_pages = false; // No large pages when dumping the CDS archive.
 204   char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, reserve_alignment);
 205 
 206 #ifdef _LP64
 207   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 208   const size_t cds_total = align_size_down(UnscaledClassSpaceMax, reserve_alignment);
 209 #else
 210   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
 211   size_t cds_total = align_size_down(256*M, reserve_alignment);
 212 #endif
 213 
 214   // First try to reserve the space at the specified SharedBaseAddress.
 215   _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
 216   if (_shared_rs.is_reserved()) {
 217     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
 218   } else {
 219     // Get a mmap region anywhere if the SharedBaseAddress fails.
 220     _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
 221   }
 222   if (!_shared_rs.is_reserved()) {
 223     vm_exit_during_initialization("Unable to reserve memory for shared space",
 224                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
 225   }
 226 
 227 #ifdef _LP64
 228   // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
 229   // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
 230   //   will store Klasses into this space.
 231   // + The lower 3 GB is used for the archive -- when preload_classes() is done,
 232   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
 233   //   then the RO parts.
 234 
 235   assert(UseCompressedOops && UseCompressedClassPointers,
 236       "UseCompressedOops and UseCompressedClassPointers must be set");
 237 
 238   size_t max_archive_size = align_size_down(cds_total * 3 / 4, reserve_alignment);
 239   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
 240   CompressedClassSpaceSize = align_size_down(tmp_class_space.size(), reserve_alignment);
 241   _shared_rs = _shared_rs.first_part(max_archive_size);
 242 
 243   // Set up compress class pointers.
 244   Universe::set_narrow_klass_base((address)_shared_rs.base());
 245   if (UseAOT || cds_total > UnscaledClassSpaceMax) {
 246     // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
 247     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
 248   } else {
 249     Universe::set_narrow_klass_shift(0);
 250   }
 251 
 252   Metaspace::initialize_class_space(tmp_class_space);
 253   tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 254                 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
 255 
 256   tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 257                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 258 #endif
 259 
 260   // Start with 0 committed bytes. The memory will be committed as needed by
 261   // MetaspaceShared::commit_shared_space_to().
 262   if (!_shared_vs.initialize(_shared_rs, 0)) {
 263     vm_exit_during_initialization("Unable to allocate memory for shared space");
 264   }
 265 
 266   _mc_region.init(&_shared_rs);
 267   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 268                 _shared_rs.size(), p2i(_shared_rs.base()));
 269 }
 270 
 271 void MetaspaceShared::commit_shared_space_to(char* newtop) {
 272   assert(DumpSharedSpaces, "dump-time only");
 273   char* base = _shared_rs.base();
 274   size_t need_committed_size = newtop - base;
 275   size_t has_committed_size = _shared_vs.committed_size();
 276   if (need_committed_size < has_committed_size) {
 277     return;
 278   }
 279 
 280   size_t min_bytes = need_committed_size - has_committed_size;
 281   size_t preferred_bytes = 1 * M;
 282   size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
 283 
 284   size_t commit = MAX2(min_bytes, preferred_bytes);
 285   assert(commit <= uncommitted, "sanity");
 286 
 287   bool result = _shared_vs.expand_by(commit, false);
 288   assert(result, "Failed to commit memory");
 289 
 290   log_info(cds)("Expanding shared spaces by %7d bytes [total %8d bytes ending at %p]",
 291                 int(commit), int(_shared_vs.actual_committed_size()), _shared_vs.high());
 292 }
 293 
 294 // Read/write a data stream for restoring/preserving metadata pointers and
 295 // miscellaneous data from/to the shared archive file.
 296 
 297 void MetaspaceShared::serialize(SerializeClosure* soc) {
 298   int tag = 0;
 299   soc->do_tag(--tag);
 300 
 301   // Verify the sizes of various metadata in the system.
 302   soc->do_tag(sizeof(Method));
 303   soc->do_tag(sizeof(ConstMethod));
 304   soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
 305   soc->do_tag(sizeof(ConstantPool));
 306   soc->do_tag(sizeof(ConstantPoolCache));
 307   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
 308   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
 309   soc->do_tag(sizeof(Symbol));
 310 
 311   // Dump/restore miscellaneous metadata.
 312   Universe::serialize(soc, true);
 313   soc->do_tag(--tag);
 314 
 315   // Dump/restore references to commonly used names and signatures.
 316   vmSymbols::serialize(soc);
 317   soc->do_tag(--tag);
 318 
 319   // Dump/restore the symbol and string tables
 320   SymbolTable::serialize(soc);
 321   StringTable::serialize(soc);
 322   soc->do_tag(--tag);
 323 
 324   soc->do_tag(666);
 325 }
 326 
 327 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
 328   if (DumpSharedSpaces) {
 329     if (_cds_i2i_entry_code_buffers == NULL) {
 330       _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 331       _cds_i2i_entry_code_buffers_size = total_size;
 332     }
 333   } else if (UseSharedSpaces) {
 334     assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
 335   } else {
 336     return NULL;
 337   }
 338 
 339   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
 340   return _cds_i2i_entry_code_buffers;
 341 }
 342 
 343 // CDS code for dumping shared archive.
 344 
 345 // Global object for holding classes that have been loaded.  Since this
 346 // is run at a safepoint just before exit, this is the entire set of classes.
 347 static GrowableArray<Klass*>* _global_klass_objects;
 348 class CollectClassesClosure : public KlassClosure {
 349   void do_klass(Klass* k) {
 350     if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
 351       _global_klass_objects->append_if_missing(k);
 352     }
 353   }
 354 };
 355 
 356 static void remove_unshareable_in_classes() {
 357   for (int i = 0; i < _global_klass_objects->length(); i++) {
 358     Klass* k = _global_klass_objects->at(i);
 359     k->remove_unshareable_info();
 360   }
 361 }
 362 
 363 static void rewrite_nofast_bytecode(Method* method) {
 364   RawBytecodeStream bcs(method);
 365   while (!bcs.is_last_bytecode()) {
 366     Bytecodes::Code opcode = bcs.raw_next();
 367     switch (opcode) {
 368     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 369     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 370     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 371     case Bytecodes::_iload: {
 372       if (!bcs.is_wide()) {
 373         *bcs.bcp() = Bytecodes::_nofast_iload;
 374       }
 375       break;
 376     }
 377     default: break;
 378     }
 379   }
 380 }
 381 
 382 // Walk all methods in the class list to ensure that they won't be modified at
 383 // run time. This includes:
 384 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
 385 //     at run time by RewriteBytecodes/RewriteFrequentPairs
 386 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
 387 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
 388   for (int i = 0; i < _global_klass_objects->length(); i++) {
 389     Klass* k = _global_klass_objects->at(i);
 390     if (k->is_instance_klass()) {
 391       InstanceKlass* ik = InstanceKlass::cast(k);
 392       for (int i = 0; i < ik->methods()->length(); i++) {
 393         Method* m = ik->methods()->at(i);
 394         rewrite_nofast_bytecode(m);
 395         Fingerprinter fp(m);
 396         // The side effect of this call sets method's fingerprint field.
 397         fp.fingerprint();
 398       }
 399     }
 400   }
 401 }
 402 
 403 static void relocate_cached_class_file() {
 404   for (int i = 0; i < _global_klass_objects->length(); i++) {
 405     Klass* k = _global_klass_objects->at(i);
 406     if (k->is_instance_klass()) {
 407       InstanceKlass* ik = InstanceKlass::cast(k);
 408       JvmtiCachedClassFileData* p = ik->get_archived_class_data();
 409       if (p != NULL) {
 410         int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
 411         JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
 412         q->length = p->length;
 413         memcpy(q->data, p->data, p->length);
 414         ik->set_archived_class_data(q);
 415       }
 416     }
 417   }
 418 }
 419 
 420 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 421 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 422 //
 423 // Addresses of the vtables and the methods may be different across JVM runs,
 424 // if libjvm.so is dynamically loaded at a different base address.
 425 //
 426 // To ensure that the Metadata objects in the CDS archive always have the correct vtable:
 427 //
 428 // + at dump time:  we redirect the _vptr to point to our own vtables inside
 429 //                  the CDS image
 430 // + at run time:   we clone the actual contents of the vtables from libjvm.so
 431 //                  into our own tables.
 432 
 433 // Currently, the archive contain ONLY the following types of objects that have C++ vtables.
 434 #define CPP_VTABLE_PATCH_TYPES_DO(f) \
 435   f(ConstantPool) \
 436   f(InstanceKlass) \
 437   f(InstanceClassLoaderKlass) \
 438   f(InstanceMirrorKlass) \
 439   f(InstanceRefKlass) \
 440   f(Method) \
 441   f(ObjArrayKlass) \
 442   f(TypeArrayKlass)
 443 
 444 class CppVtableInfo {
 445   intptr_t _vtable_size;
 446   intptr_t _cloned_vtable[1];
 447 public:
 448   static int num_slots(int vtable_size) {
 449     return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
 450   }
 451   int vtable_size()           { return int(uintx(_vtable_size)); }
 452   void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
 453   intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; }
 454   void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
 455   // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
 456   static size_t byte_size(int vtable_size) {
 457     CppVtableInfo i;
 458     return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
 459   }
 460 };
 461 
 462 template <class T> class CppVtableCloner : public T {
 463   static intptr_t* vtable_of(Metadata& m) {
 464     return *((intptr_t**)&m);
 465   }
 466   static CppVtableInfo* _info;
 467 
 468   static int get_vtable_length(const char* name);
 469 
 470 public:
 471   // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
 472   static intptr_t* allocate(const char* name);
 473 
 474   // Clone the vtable to ...
 475   static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
 476 
 477   static void zero_vtable_clone() {
 478     assert(DumpSharedSpaces, "dump-time only");
 479     _info->zero();
 480   }
 481 
 482   // Switch the vtable pointer to point to the cloned vtable.
 483   static void patch(Metadata* obj) {
 484     assert(DumpSharedSpaces, "dump-time only");
 485     *(void**)obj = (void*)(_info->cloned_vtable());
 486   }
 487 
 488   static bool is_valid_shared_object(const T* obj) {
 489     intptr_t* vptr = *(intptr_t**)obj;
 490     return vptr == _info->cloned_vtable();
 491   }
 492 };
 493 
 494 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
 495 
 496 template <class T>
 497 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
 498   assert(is_ptr_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
 499   int n = get_vtable_length(name);
 500   _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
 501   _info->set_vtable_size(n);
 502 
 503   intptr_t* p = clone_vtable(name, _info);
 504   assert((char*)p == _md_region.top(), "must be");
 505 
 506   return p;
 507 }
 508 
 509 template <class T>
 510 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
 511   if (!DumpSharedSpaces) {
 512     assert(_info == 0, "_info is initialized only at dump time");
 513     _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
 514   }
 515   T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
 516   int n = info->vtable_size();
 517   intptr_t* srcvtable = vtable_of(tmp);
 518   intptr_t* dstvtable = info->cloned_vtable();
 519 
 520   // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
 521   // safe to do memcpy.
 522   log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
 523   memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
 524   return dstvtable + n;
 525 }
 526 
 527 // To determine the size of the vtable for each type, we use the following
 528 // trick by declaring 2 subclasses:
 529 //
 530 //   class CppVtableTesterA: public InstanceKlass {virtual int   last_virtual_method() {return 1;}    };
 531 //   class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
 532 //
 533 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
 534 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
 535 // - The first N entries have are exactly the same as in InstanceKlass's vtable.
 536 // - Their last entry is different.
 537 //
 538 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
 539 // and find the first entry that's different.
 540 //
 541 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
 542 // esoteric compilers.
 543 
 544 template <class T> class CppVtableTesterB: public T {
 545 public:
 546   virtual int last_virtual_method() {return 1;}
 547 };
 548 
 549 template <class T> class CppVtableTesterA : public T {
 550 public:
 551   virtual void* last_virtual_method() {
 552     // Make this different than CppVtableTesterB::last_virtual_method so the C++
 553     // compiler/linker won't alias the two functions.
 554     return NULL;
 555   }
 556 };
 557 
 558 template <class T>
 559 int CppVtableCloner<T>::get_vtable_length(const char* name) {
 560   CppVtableTesterA<T> a;
 561   CppVtableTesterB<T> b;
 562 
 563   intptr_t* avtable = vtable_of(a);
 564   intptr_t* bvtable = vtable_of(b);
 565 
 566   // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
 567   int vtable_len = 1;
 568   for (; ; vtable_len++) {
 569     if (avtable[vtable_len] != bvtable[vtable_len]) {
 570       break;
 571     }
 572   }
 573   log_debug(cds, vtables)("Found   %3d vtable entries for %s", vtable_len, name);
 574 
 575   return vtable_len;
 576 }
 577 
 578 #define ALLOC_CPP_VTABLE_CLONE(c) \
 579   CppVtableCloner<c>::allocate(#c);
 580 
 581 #define CLONE_CPP_VTABLE(c) \
 582   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
 583 
 584 #define ZERO_CPP_VTABLE(c) \
 585  CppVtableCloner<c>::zero_vtable_clone();
 586 
 587 // This can be called at both dump time and run time.
 588 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
 589   assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
 590   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
 591   return p;
 592 }
 593 
 594 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
 595   assert(DumpSharedSpaces, "dump-time only");
 596   CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
 597 }
 598 
 599 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
 600 void MetaspaceShared::allocate_cpp_vtable_clones() {
 601   assert(DumpSharedSpaces, "dump-time only");
 602   // Layout (each slot is a intptr_t):
 603   //   [number of slots in the first vtable = n1]
 604   //   [ <n1> slots for the first vtable]
 605   //   [number of slots in the first second = n2]
 606   //   [ <n2> slots for the second vtable]
 607   //   ...
 608   // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
 609   CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
 610 }
 611 
 612 // Switch the vtable pointer to point to the cloned vtable. We assume the
 613 // vtable pointer is in first slot in object.
 614 void MetaspaceShared::patch_cpp_vtable_pointers() {
 615   int n = _global_klass_objects->length();
 616   for (int i = 0; i < n; i++) {
 617     Klass* obj = _global_klass_objects->at(i);
 618     if (obj->is_instance_klass()) {
 619       InstanceKlass* ik = InstanceKlass::cast(obj);
 620       if (ik->is_class_loader_instance_klass()) {
 621         CppVtableCloner<InstanceClassLoaderKlass>::patch(ik);
 622       } else if (ik->is_reference_instance_klass()) {
 623         CppVtableCloner<InstanceRefKlass>::patch(ik);
 624       } else if (ik->is_mirror_instance_klass()) {
 625         CppVtableCloner<InstanceMirrorKlass>::patch(ik);
 626       } else {
 627         CppVtableCloner<InstanceKlass>::patch(ik);
 628       }
 629       ConstantPool* cp = ik->constants();
 630       CppVtableCloner<ConstantPool>::patch(cp);
 631       for (int j = 0; j < ik->methods()->length(); j++) {
 632         Method* m = ik->methods()->at(j);
 633         CppVtableCloner<Method>::patch(m);
 634         assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be");
 635       }
 636     } else if (obj->is_objArray_klass()) {
 637       CppVtableCloner<ObjArrayKlass>::patch(obj);
 638     } else {
 639       assert(obj->is_typeArray_klass(), "sanity");
 640       CppVtableCloner<TypeArrayKlass>::patch(obj);
 641     }
 642   }
 643 }
 644 
 645 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
 646   assert(is_in_shared_space(m), "must be");
 647   return CppVtableCloner<Method>::is_valid_shared_object(m);
 648 }
 649 
 650 // Closure for serializing initialization data out to a data area to be
 651 // written to the shared file.
 652 
 653 class WriteClosure : public SerializeClosure {
 654 private:
 655   DumpRegion* _dump_region;
 656 
 657 public:
 658   WriteClosure(DumpRegion* r) {
 659     _dump_region = r;
 660   }
 661 
 662   void do_ptr(void** p) {
 663     _dump_region->append_intptr_t((intptr_t)*p);
 664   }
 665 
 666   void do_u4(u4* p) {
 667     void* ptr = (void*)(uintx(*p));
 668     do_ptr(&ptr);
 669   }
 670 
 671   void do_tag(int tag) {
 672     _dump_region->append_intptr_t((intptr_t)tag);
 673   }
 674 
 675   void do_region(u_char* start, size_t size) {
 676     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
 677     assert(size % sizeof(intptr_t) == 0, "bad size");
 678     do_tag((int)size);
 679     while (size > 0) {
 680       _dump_region->append_intptr_t(*(intptr_t*)start);
 681       start += sizeof(intptr_t);
 682       size -= sizeof(intptr_t);
 683     }
 684   }
 685 
 686   bool reading() const { return false; }
 687 };
 688 
 689 // This is for dumping detailed statistics for the allocations
 690 // in the shared spaces.
 691 class DumpAllocStats : public ResourceObj {
 692 public:
 693 
 694   // Here's poor man's enum inheritance
 695 #define SHAREDSPACE_OBJ_TYPES_DO(f) \
 696   METASPACE_OBJ_TYPES_DO(f) \
 697   f(SymbolHashentry) \
 698   f(SymbolBucket) \
 699   f(StringHashentry) \
 700   f(StringBucket) \
 701   f(Other)
 702 
 703   enum Type {
 704     // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
 705     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
 706     _number_of_types
 707   };
 708 
 709   static const char * type_name(Type type) {
 710     switch(type) {
 711     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
 712     default:
 713       ShouldNotReachHere();
 714       return NULL;
 715     }
 716   }
 717 
 718 public:
 719   enum { RO = 0, RW = 1 };
 720 
 721   int _counts[2][_number_of_types];
 722   int _bytes [2][_number_of_types];
 723 
 724   DumpAllocStats() {
 725     memset(_counts, 0, sizeof(_counts));
 726     memset(_bytes,  0, sizeof(_bytes));
 727   };
 728 
 729   void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
 730     assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
 731     int which = (read_only) ? RO : RW;
 732     _counts[which][type] ++;
 733     _bytes [which][type] += byte_size;
 734   }
 735 
 736   void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
 737 };
 738 
 739 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
 740   // Calculate size of data that was not allocated by Metaspace::allocate()
 741   MetaspaceSharedStats *stats = MetaspaceShared::stats();
 742 
 743   // symbols
 744   _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
 745   _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
 746 
 747   _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
 748   _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
 749 
 750   // strings
 751   _counts[RO][StringHashentryType] = stats->string.hashentry_count;
 752   _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
 753 
 754   _counts[RO][StringBucketType] = stats->string.bucket_count;
 755   _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
 756 
 757   // TODO: count things like dictionary, vtable, etc
 758   _bytes[RW][OtherType] += mc_all + md_all;
 759   rw_all += mc_all + md_all; // mc/md are mapped Read/Write
 760 
 761   // prevent divide-by-zero
 762   if (ro_all < 1) {
 763     ro_all = 1;
 764   }
 765   if (rw_all < 1) {
 766     rw_all = 1;
 767   }
 768 
 769   int all_ro_count = 0;
 770   int all_ro_bytes = 0;
 771   int all_rw_count = 0;
 772   int all_rw_bytes = 0;
 773 
 774 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
 775 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
 776   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
 777   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
 778 
 779   ResourceMark rm;
 780   LogMessage(cds) msg;
 781   stringStream info_stream;
 782 
 783   info_stream.print_cr("Detailed metadata info (rw includes md and mc):");
 784   info_stream.print_cr("%s", hdr);
 785   info_stream.print_cr("%s", sep);
 786   for (int type = 0; type < int(_number_of_types); type ++) {
 787     const char *name = type_name((Type)type);
 788     int ro_count = _counts[RO][type];
 789     int ro_bytes = _bytes [RO][type];
 790     int rw_count = _counts[RW][type];
 791     int rw_bytes = _bytes [RW][type];
 792     int count = ro_count + rw_count;
 793     int bytes = ro_bytes + rw_bytes;
 794 
 795     double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
 796     double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
 797     double perc    = 100.0 * double(bytes)    / double(ro_all + rw_all);
 798 
 799     info_stream.print_cr(fmt_stats, name,
 800                          ro_count, ro_bytes, ro_perc,
 801                          rw_count, rw_bytes, rw_perc,
 802                          count, bytes, perc);
 803 
 804     all_ro_count += ro_count;
 805     all_ro_bytes += ro_bytes;
 806     all_rw_count += rw_count;
 807     all_rw_bytes += rw_bytes;
 808   }
 809 
 810   int all_count = all_ro_count + all_rw_count;
 811   int all_bytes = all_ro_bytes + all_rw_bytes;
 812 
 813   double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
 814   double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
 815   double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
 816 
 817   info_stream.print_cr("%s", sep);
 818   info_stream.print_cr(fmt_stats, "Total",
 819                        all_ro_count, all_ro_bytes, all_ro_perc,
 820                        all_rw_count, all_rw_bytes, all_rw_perc,
 821                        all_count, all_bytes, all_perc);
 822 
 823 //assert(all_ro_bytes == ro_all, "everything should have been counted");
 824   assert(all_rw_bytes == rw_all, "everything should have been counted");
 825 
 826   msg.info("%s", info_stream.as_string());
 827 #undef fmt_stats
 828 }
 829 
 830 // Populate the shared space.
 831 
 832 class VM_PopulateDumpSharedSpace: public VM_Operation {
 833 private:
 834   GrowableArray<MemRegion> *_string_regions;
 835 
 836   void dump_string_and_symbols();
 837   char* dump_read_only_tables();
 838 public:
 839 
 840   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
 841   void doit();   // outline because gdb sucks
 842   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
 843 }; // class VM_PopulateDumpSharedSpace
 844 
 845 class SortedSymbolClosure: public SymbolClosure {
 846   GrowableArray<Symbol*> _symbols;
 847   virtual void do_symbol(Symbol** sym) {
 848     assert((*sym)->is_permanent(), "archived symbols must be permanent");
 849     _symbols.append(*sym);
 850   }
 851   static int compare_symbols_by_address(Symbol** a, Symbol** b) {
 852     if (a[0] < b[0]) {
 853       return -1;
 854     } else if (a[0] == b[0]) {
 855       return 0;
 856     } else {
 857       return 1;
 858     }
 859   }
 860 
 861 public:
 862   SortedSymbolClosure() {
 863     SymbolTable::symbols_do(this);
 864     _symbols.sort(compare_symbols_by_address);
 865   }
 866   GrowableArray<Symbol*>* get_sorted_symbols() {
 867     return &_symbols;
 868   }
 869 };
 870 
 871 // ArchiveCompactor --
 872 //
 873 // This class is the central piece of shared archive compaction -- all metaspace data are
 874 // initially allocated outside of the shared regions. ArchiveCompactor copies the
 875 // metaspace data into their final location in the shared regions.
 876 
 877 class ArchiveCompactor : AllStatic {
 878   static DumpAllocStats* _alloc_stats;
 879   static SortedSymbolClosure* _ssc;
 880 
 881   static unsigned my_hash(const address& a) {
 882     return primitive_hash<address>(a);
 883   }
 884   static bool my_equals(const address& a0, const address& a1) {
 885     return primitive_equals<address>(a0, a1);
 886   }
 887   typedef ResourceHashtable<
 888       address, address,
 889       ArchiveCompactor::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
 890       ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
 891       16384, ResourceObj::C_HEAP> MyTable;
 892   static MyTable* _new_loc_table;
 893 
 894 public:
 895   static void initialize() {
 896     _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
 897     _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)MyTable;
 898   }
 899   static DumpAllocStats* alloc_stats() {
 900     return _alloc_stats;
 901   }
 902 
 903   static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
 904     address obj = ref->obj();
 905     int bytes = ref->size() * BytesPerWord;
 906     char* p;
 907     size_t alignment = BytesPerWord;    
 908     if (read_only) {
 909       p = _ro_region.allocate(bytes, alignment);
 910     } else {
 911       p = _rw_region.allocate(bytes, alignment);
 912     }
 913     memcpy(p, obj, bytes);
 914     bool isnew = _new_loc_table->put(obj, (address)p);
 915     assert(isnew, "must be");
 916     log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
 917 
 918     _alloc_stats->record(ref->msotype(), bytes, read_only);
 919     if (ref->msotype() == MetaspaceObj::SymbolType) {
 920       uintx delta = MetaspaceShared::object_delta(p);
 921       if (delta > MAX_SHARED_DELTA) {
 922         // This is just a sanity check and should not appear in any real world usage. This
 923         // happens only if you allocate more than 2GB of Symbols and would require
 924         // millions of shared classes.
 925         vm_exit_during_initialization("Too many Symbols in the CDS archive",
 926                                       "Please reduce the number of shared classes.");
 927       }
 928     }
 929   }
 930 
 931   static address get_new_loc(MetaspaceClosure::Ref* ref) {
 932     address* pp = _new_loc_table->get(ref->obj());
 933     assert(pp != NULL, "must be");
 934     return *pp;
 935   }
 936 
 937 private:
 938   // Makes a shallow copy of visited MetaspaceObj's
 939   class ShallowCopier: public UniqueMetaspaceClosure {
 940     bool _read_only;
 941   public:
 942     ShallowCopier(bool read_only) : _read_only(read_only) {}
 943 
 944     virtual void do_unique_ref(Ref* ref, bool read_only) {
 945       if (read_only == _read_only) {
 946         allocate(ref, read_only);
 947       }
 948     }
 949   };
 950 
 951   // Relocate embedded pointers within a MetaspaceObj's shallow copy
 952   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
 953   public:
 954     virtual void do_unique_ref(Ref* ref, bool read_only) {
 955       address new_loc = get_new_loc(ref);
 956       RefRelocator refer;
 957       ref->metaspace_pointers_do_at(&refer, new_loc);
 958     }
 959   };
 960 
 961   // Relocate a reference to point to its shallow copy
 962   class RefRelocator: public MetaspaceClosure {
 963   public:
 964     virtual bool do_ref(Ref* ref, bool read_only) {
 965       if (ref->not_null()) {
 966         ref->update(get_new_loc(ref));
 967       }
 968       return false; // Do not recurse.
 969     }
 970   };
 971 
 972 #ifdef ASSERT
 973   class IsRefInArchiveChecker: public MetaspaceClosure {
 974   public:
 975     virtual bool do_ref(Ref* ref, bool read_only) {
 976       if (ref->not_null()) {
 977         char* obj = (char*)ref->obj();
 978         assert(_ro_region.contains(obj) || _rw_region.contains(obj),
 979                "must be relocated to point to CDS archive");
 980       }
 981       return false; // Do not recurse.
 982     }
 983   };
 984 #endif
 985 
 986 public:
 987   static void copy_and_compact() {
 988     // We should no longer allocate anything from the metaspace, so that
 989     // we can have a stable set of MetaspaceObjs to work with.
 990     Metaspace::freeze();
 991 
 992     ResourceMark rm;
 993     SortedSymbolClosure the_ssc; // StackObj
 994     _ssc = &the_ssc;
 995 
 996     tty->print_cr("Scanning all metaspace objects ... ");
 997     {
 998       // allocate and shallow-copy RW objects, immediately following the MC region
 999       tty->print_cr("Allocating RW objects ... ");
1000       _mc_region.pack(&_rw_region);
1001 
1002       ResourceMark rm;
1003       ShallowCopier rw_copier(false);
1004       iterate_roots(&rw_copier);
1005     }
1006     {
1007       // allocate and shallow-copy of RO object, immediately following the RW region
1008       tty->print_cr("Allocating RO objects ... ");
1009       _rw_region.pack(&_ro_region);
1010 
1011       ResourceMark rm;
1012       ShallowCopier ro_copier(true);
1013       iterate_roots(&ro_copier);
1014     }
1015     {
1016       tty->print_cr("Relocating embedded pointers ... ");
1017       ResourceMark rm;
1018       ShallowCopyEmbeddedRefRelocator emb_reloc;
1019       iterate_roots(&emb_reloc);
1020     }
1021     {
1022       tty->print_cr("Relocating external roots ... ");
1023       ResourceMark rm;
1024       RefRelocator ext_reloc;
1025       iterate_roots(&ext_reloc);
1026     }
1027 
1028 #ifdef ASSERT
1029     {
1030       tty->print_cr("Verifying external roots ... ");
1031       ResourceMark rm;
1032       IsRefInArchiveChecker checker;
1033       iterate_roots(&checker);
1034     }
1035 #endif
1036 
1037 
1038     // cleanup
1039     _ssc = NULL;
1040   }
1041 
1042   // We must relocate the System::_well_known_klasses only after we have copied the
1043   // strings in during dump_string_and_symbols(): during the copy, we operate on old
1044   // String objects which assert that their klass is the old
1045   // SystemDictionary::String_klass().
1046   static void relocate_well_known_klasses() {
1047     {
1048       tty->print_cr("Relocating _well_known_klasses[] ... ");
1049       ResourceMark rm;
1050       RefRelocator ext_reloc;
1051       SystemDictionary::well_known_klasses_do(&ext_reloc);
1052     }
1053     // NOTE: after this point, we shouldn't have any globals that can reach the old
1054     // objects.
1055 
1056     // We cannot use any of the objects in the heap anymore (except for the objects
1057     // in the CDS shared string regions) because their headers no longer point to
1058     // valid Klasses.
1059   }
1060 
1061   static void iterate_roots(MetaspaceClosure* it) {
1062     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1063     for (int i=0; i<symbols->length(); i++) {
1064       it->push(symbols->adr_at(i));
1065     }
1066     if (_global_klass_objects != NULL) {
1067       // Need to fix up the pointers
1068       for (int i = 0; i < _global_klass_objects->length(); i++) {
1069         // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1070         it->push(_global_klass_objects->adr_at(i));
1071       }
1072     }
1073     FileMapInfo::metaspace_pointers_do(it);
1074     SystemDictionary::classes_do(it);
1075     Universe::metaspace_pointers_do(it);
1076     SymbolTable::metaspace_pointers_do(it);
1077     vmSymbols::metaspace_pointers_do(it);
1078   }
1079 
1080   static Klass* get_relocated_klass(Klass* orig_klass) {
1081     address* pp = _new_loc_table->get((address)orig_klass);
1082     assert(pp != NULL, "must be");
1083     Klass* klass = (Klass*)(*pp);
1084     assert(klass->is_klass(), "must be");
1085     return klass;
1086   }
1087 };
1088 
1089 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1090 SortedSymbolClosure* ArchiveCompactor::_ssc;
1091 ArchiveCompactor::MyTable* ArchiveCompactor::_new_loc_table;
1092 
1093 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1094                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1095   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1096 }
1097 
1098 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1099   tty->print_cr("Dumping string and symbol tables ...");
1100 
1101   NOT_PRODUCT(SymbolTable::verify());
1102   NOT_PRODUCT(StringTable::verify());
1103   SymbolTable::write_to_archive();
1104 
1105   // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1106   _string_regions = new GrowableArray<MemRegion>(2);
1107   size_t shared_string_bytes = 0;
1108   StringTable::write_to_archive(_string_regions, &shared_string_bytes);
1109   char* st_base = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
1110   char* st_top = st_base + shared_string_bytes;
1111   _st_region.init(st_base, st_top, st_top);
1112   _st_region.pack();
1113 }
1114 
1115 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1116   // Reorder the system dictionary. Moving the symbols affects
1117   // how the hash table indices are calculated.
1118   SystemDictionary::reorder_dictionary_for_sharing();
1119   NOT_PRODUCT(SystemDictionary::verify();)
1120 
1121   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1122   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1123   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1124 
1125   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1126   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1127   SystemDictionary::copy_table(table_top, _ro_region.top());
1128 
1129   // Write the other data to the output array.
1130   WriteClosure wc(&_ro_region);
1131   MetaspaceShared::serialize(&wc);
1132 
1133   return buckets_top;
1134 }
1135 
1136 void VM_PopulateDumpSharedSpace::doit() {
1137   Thread* THREAD = VMThread::vm_thread();
1138 
1139   NOT_PRODUCT(SystemDictionary::verify();)
1140   // The following guarantee is meant to ensure that no loader constraints
1141   // exist yet, since the constraints table is not shared.  This becomes
1142   // more important now that we don't re-initialize vtables/itables for
1143   // shared classes at runtime, where constraints were previously created.
1144   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1145             "loader constraints are not saved");
1146   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1147           "placeholders are not saved");
1148   // Revisit and implement this if we prelink method handle call sites:
1149   guarantee(SystemDictionary::invoke_method_table() == NULL ||
1150             SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1151             "invoke method table is not saved");
1152 
1153   // At this point, many classes have been loaded.
1154   // Gather systemDictionary classes in a global array and do everything to
1155   // that so we don't have to walk the SystemDictionary again.
1156   _global_klass_objects = new GrowableArray<Klass*>(1000);
1157   CollectClassesClosure collect_classes;
1158   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1159 
1160   tty->print_cr("Number of classes %d", _global_klass_objects->length());
1161   {
1162     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1163     for (int i = 0; i < _global_klass_objects->length(); i++) {
1164       Klass* k = _global_klass_objects->at(i);
1165       if (k->is_instance_klass()) {
1166         num_inst ++;
1167       } else if (k->is_objArray_klass()) {
1168         num_obj_array ++;
1169       } else {
1170         assert(k->is_typeArray_klass(), "sanity");
1171         num_type_array ++;
1172       }
1173     }
1174     tty->print_cr("    instance classes   = %5d", num_inst);
1175     tty->print_cr("    obj array classes  = %5d", num_obj_array);
1176     tty->print_cr("    type array classes = %5d", num_type_array);
1177   }
1178 
1179 
1180   // Ensure the ConstMethods won't be modified at run-time
1181   tty->print("Updating ConstMethods ... ");
1182   rewrite_nofast_bytecodes_and_calculate_fingerprints();
1183   tty->print_cr("done. ");
1184 
1185   // Remove all references outside the metadata
1186   tty->print("Removing unshareable information ... ");
1187   remove_unshareable_in_classes();
1188   tty->print_cr("done. ");
1189 
1190   ArchiveCompactor::initialize();
1191   ArchiveCompactor::copy_and_compact();
1192 
1193   dump_string_and_symbols();
1194   ArchiveCompactor::relocate_well_known_klasses();
1195 
1196   char* read_only_tables_start = dump_read_only_tables();
1197   _ro_region.pack(&_md_region);
1198 
1199   char* vtbl_list = _md_region.top();
1200   MetaspaceShared::allocate_cpp_vtable_clones();
1201   _md_region.pack(&_od_region);
1202 
1203   // Relocate the archived class file data into the od region
1204   relocate_cached_class_file();
1205   _od_region.pack();
1206 
1207   // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1208   // is just the spaces between the two ends.
1209   size_t core_spaces_size = _od_region.end() - _mc_region.base();
1210   assert(core_spaces_size == (size_t)align_size_up(core_spaces_size, Metaspace::reserve_alignment()),
1211          "should already be aligned");
1212 
1213   // Print statistics of all the regions
1214   const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1215                                 _mc_region.reserved() + _md_region.reserved() +
1216                                 _st_region.reserved() + _od_region.reserved();
1217   const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1218                              _mc_region.used() + _md_region.used() +
1219                              _st_region.used() + _od_region.used();
1220   const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1221 
1222   _mc_region.print(total_reserved);
1223   _rw_region.print(total_reserved);
1224   _ro_region.print(total_reserved);
1225   _md_region.print(total_reserved);
1226   _st_region.print(total_reserved);
1227   _od_region.print(total_reserved);
1228 
1229   tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1230                  total_bytes, total_reserved, total_u_perc);
1231 
1232   // During patching, some virtual methods may be called, so at this point
1233   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1234   MetaspaceShared::patch_cpp_vtable_pointers();
1235 
1236   // The vtable clones contain addresses of the current process.
1237   // We don't want to write these addresses into the archive.
1238   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1239 
1240   // Create and write the archive file that maps the shared spaces.
1241 
1242   FileMapInfo* mapinfo = new FileMapInfo();
1243   mapinfo->populate_header(os::vm_allocation_granularity());
1244   mapinfo->set_read_only_tables_start(read_only_tables_start);
1245   mapinfo->set_misc_data_patching_start(vtbl_list);
1246   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1247   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1248   mapinfo->set_core_spaces_size(core_spaces_size);
1249 
1250   for (int pass=1; pass<=2; pass++) {
1251     if (pass == 1) {
1252       // The first pass doesn't actually write the data to disk. All it
1253       // does is to update the fields in the mapinfo->_header.
1254     } else {
1255       // After the first pass, the contents of mapinfo->_header are finalized,
1256       // so we can compute the header's CRC, and write the contents of the header
1257       // and the regions into disk.
1258       mapinfo->open_for_write();
1259       mapinfo->set_header_crc(mapinfo->compute_header_crc());
1260     }
1261     mapinfo->write_header();
1262 
1263     // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1264     // so it needs to be read/write.
1265     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1266     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1267     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1268     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1269     write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1270     mapinfo->write_string_regions(_string_regions);
1271   }
1272 
1273   mapinfo->close();
1274 
1275   // Restore the vtable in case we invoke any virtual methods.
1276   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1277 
1278   if (log_is_enabled(Info, cds)) {
1279     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1280                                                  int(_mc_region.used()), int(_md_region.used()));
1281   }
1282 }
1283 
1284 // Update a Java object to point its Klass* to the new location after
1285 // shared archive has been compacted.
1286 void MetaspaceShared::relocate_klass_ptr(oop o) {
1287   assert(DumpSharedSpaces, "sanity");
1288   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1289   o->set_klass(k);
1290 }
1291 
1292 class LinkSharedClassesClosure : public KlassClosure {
1293   Thread* THREAD;
1294   bool    _made_progress;
1295  public:
1296   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1297 
1298   void reset()               { _made_progress = false; }
1299   bool made_progress() const { return _made_progress; }
1300 
1301   void do_klass(Klass* k) {
1302     if (k->is_instance_klass()) {
1303       InstanceKlass* ik = InstanceKlass::cast(k);
1304       // Link the class to cause the bytecodes to be rewritten and the
1305       // cpcache to be created. Class verification is done according
1306       // to -Xverify setting.
1307       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1308       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1309     }
1310   }
1311 };
1312 
1313 class CheckSharedClassesClosure : public KlassClosure {
1314   bool    _made_progress;
1315  public:
1316   CheckSharedClassesClosure() : _made_progress(false) {}
1317 
1318   void reset()               { _made_progress = false; }
1319   bool made_progress() const { return _made_progress; }
1320   void do_klass(Klass* k) {
1321     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1322       _made_progress = true;
1323     }
1324   }
1325 };
1326 
1327 void MetaspaceShared::check_shared_class_loader_type(Klass* k) {
1328   if (k->is_instance_klass()) {
1329     InstanceKlass* ik = InstanceKlass::cast(k);
1330     u2 loader_type = ik->loader_type();
1331     ResourceMark rm;
1332     guarantee(loader_type != 0,
1333               "Class loader type is not set for this class %s", ik->name()->as_C_string());
1334   }
1335 }
1336 
1337 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1338   // We need to iterate because verification may cause additional classes
1339   // to be loaded.
1340   LinkSharedClassesClosure link_closure(THREAD);
1341   do {
1342     link_closure.reset();
1343     ClassLoaderDataGraph::loaded_classes_do(&link_closure);
1344     guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1345   } while (link_closure.made_progress());
1346 
1347   if (_has_error_classes) {
1348     // Mark all classes whose super class or interfaces failed verification.
1349     CheckSharedClassesClosure check_closure;
1350     do {
1351       // Not completely sure if we need to do this iteratively. Anyway,
1352       // we should come here only if there are unverifiable classes, which
1353       // shouldn't happen in normal cases. So better safe than sorry.
1354       check_closure.reset();
1355       ClassLoaderDataGraph::loaded_classes_do(&check_closure);
1356     } while (check_closure.made_progress());
1357 
1358     if (IgnoreUnverifiableClassesDuringDump) {
1359       // This is useful when running JCK or SQE tests. You should not
1360       // enable this when running real apps.
1361       SystemDictionary::remove_classes_in_error_state();
1362     } else {
1363       tty->print_cr("Please remove the unverifiable classes from your class list and try again");
1364       exit(1);
1365     }
1366   }
1367 
1368   // Copy the verification constraints from C_HEAP-alloced GrowableArrays to RO-alloced
1369   // Arrays
1370   SystemDictionaryShared::finalize_verification_constraints();
1371 }
1372 
1373 void MetaspaceShared::prepare_for_dumping() {
1374   Arguments::check_unsupported_dumping_properties();
1375   ClassLoader::initialize_shared_path();
1376   FileMapInfo::allocate_classpath_entry_table();
1377 }
1378 
1379 // Preload classes from a list, populate the shared spaces and dump to a
1380 // file.
1381 void MetaspaceShared::preload_and_dump(TRAPS) {
1382   { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1383     ResourceMark rm;
1384     char class_list_path_str[JVM_MAXPATHLEN];
1385     // Preload classes to be shared.
1386     // Should use some os:: method rather than fopen() here. aB.
1387     const char* class_list_path;
1388     if (SharedClassListFile == NULL) {
1389       // Construct the path to the class list (in jre/lib)
1390       // Walk up two directories from the location of the VM and
1391       // optionally tack on "lib" (depending on platform)
1392       os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1393       for (int i = 0; i < 3; i++) {
1394         char *end = strrchr(class_list_path_str, *os::file_separator());
1395         if (end != NULL) *end = '\0';
1396       }
1397       int class_list_path_len = (int)strlen(class_list_path_str);
1398       if (class_list_path_len >= 3) {
1399         if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1400           if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1401             jio_snprintf(class_list_path_str + class_list_path_len,
1402                          sizeof(class_list_path_str) - class_list_path_len,
1403                          "%slib", os::file_separator());
1404             class_list_path_len += 4;
1405           }
1406         }
1407       }
1408       if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1409         jio_snprintf(class_list_path_str + class_list_path_len,
1410                      sizeof(class_list_path_str) - class_list_path_len,
1411                      "%sclasslist", os::file_separator());
1412       }
1413       class_list_path = class_list_path_str;
1414     } else {
1415       class_list_path = SharedClassListFile;
1416     }
1417 
1418     tty->print_cr("Loading classes to share ...");
1419     _has_error_classes = false;
1420     int class_count = preload_classes(class_list_path, THREAD);
1421     if (ExtraSharedClassListFile) {
1422       class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1423     }
1424     tty->print_cr("Loading classes to share: done.");
1425 
1426     log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1427 
1428     // Rewrite and link classes
1429     tty->print_cr("Rewriting and linking classes ...");
1430 
1431     // Link any classes which got missed. This would happen if we have loaded classes that
1432     // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1433     // fails verification, all other interfaces that were not specified in the classlist but
1434     // are implemented by K are not verified.
1435     link_and_cleanup_shared_classes(CATCH);
1436     tty->print_cr("Rewriting and linking classes: done");
1437 
1438     VM_PopulateDumpSharedSpace op;
1439     VMThread::execute(&op);
1440   }
1441 
1442   if (PrintSystemDictionaryAtExit) {
1443     SystemDictionary::print();
1444   }
1445 
1446   // Since various initialization steps have been undone by this process,
1447   // it is not reasonable to continue running a java process.
1448   exit(0);
1449 }
1450 
1451 
1452 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1453   ClassListParser parser(class_list_path);
1454   int class_count = 0;
1455 
1456     while (parser.parse_one_line()) {
1457       Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
1458 
1459       CLEAR_PENDING_EXCEPTION;
1460       if (klass != NULL) {
1461         if (log_is_enabled(Trace, cds)) {
1462           ResourceMark rm;
1463           log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1464         }
1465 
1466         InstanceKlass* ik = InstanceKlass::cast(klass);
1467 
1468         // Link the class to cause the bytecodes to be rewritten and the
1469         // cpcache to be created. The linking is done as soon as classes
1470         // are loaded in order that the related data structures (klass and
1471         // cpCache) are located together.
1472         try_link_class(ik, THREAD);
1473         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1474 
1475         class_count++;
1476       }
1477     }
1478 
1479   return class_count;
1480 }
1481 
1482 // Returns true if the class's status has changed
1483 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
1484   assert(DumpSharedSpaces, "should only be called during dumping");
1485   if (ik->init_state() < InstanceKlass::linked) {
1486     bool saved = BytecodeVerificationLocal;
1487     if (!(ik->is_shared_boot_class())) {
1488       // The verification decision is based on BytecodeVerificationRemote
1489       // for non-system classes. Since we are using the NULL classloader
1490       // to load non-system classes during dumping, we need to temporarily
1491       // change BytecodeVerificationLocal to be the same as
1492       // BytecodeVerificationRemote. Note this can cause the parent system
1493       // classes also being verified. The extra overhead is acceptable during
1494       // dumping.
1495       BytecodeVerificationLocal = BytecodeVerificationRemote;
1496     }
1497     ik->link_class(THREAD);
1498     if (HAS_PENDING_EXCEPTION) {
1499       ResourceMark rm;
1500       tty->print_cr("Preload Warning: Verification failed for %s",
1501                     ik->external_name());
1502       CLEAR_PENDING_EXCEPTION;
1503       ik->set_in_error_state();
1504       _has_error_classes = true;
1505     }
1506     BytecodeVerificationLocal = saved;
1507     return true;
1508   } else {
1509     return false;
1510   }
1511 }
1512 
1513 // Closure for serializing initialization data in from a data area
1514 // (ptr_array) read from the shared file.
1515 
1516 class ReadClosure : public SerializeClosure {
1517 private:
1518   intptr_t** _ptr_array;
1519 
1520   inline intptr_t nextPtr() {
1521     return *(*_ptr_array)++;
1522   }
1523 
1524 public:
1525   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1526 
1527   void do_ptr(void** p) {
1528     assert(*p == NULL, "initializing previous initialized pointer.");
1529     intptr_t obj = nextPtr();
1530     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1531            "hit tag while initializing ptrs.");
1532     *p = (void*)obj;
1533   }
1534 
1535   void do_u4(u4* p) {
1536     intptr_t obj = nextPtr();
1537     *p = (u4)(uintx(obj));
1538   }
1539 
1540   void do_tag(int tag) {
1541     int old_tag;
1542     old_tag = (int)(intptr_t)nextPtr();
1543     // do_int(&old_tag);
1544     assert(tag == old_tag, "old tag doesn't match");
1545     FileMapInfo::assert_mark(tag == old_tag);
1546   }
1547 
1548   void do_region(u_char* start, size_t size) {
1549     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1550     assert(size % sizeof(intptr_t) == 0, "bad size");
1551     do_tag((int)size);
1552     while (size > 0) {
1553       *(intptr_t*)start = nextPtr();
1554       start += sizeof(intptr_t);
1555       size -= sizeof(intptr_t);
1556     }
1557   }
1558 
1559   bool reading() const { return true; }
1560 };
1561 
1562 // Return true if given address is in the mapped shared space.
1563 bool MetaspaceShared::is_in_shared_space(const void* p) {
1564   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
1565 }
1566 
1567 // Return true if given address is in the misc data region
1568 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
1569   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
1570 }
1571 
1572 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
1573   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
1574     return true;
1575   }
1576   return false;
1577 }
1578 
1579 bool MetaspaceShared::is_string_region(int idx) {
1580   return (idx >= MetaspaceShared::first_string &&
1581           idx < MetaspaceShared::first_string + MetaspaceShared::max_strings);
1582 }
1583 
1584 void MetaspaceShared::print_shared_spaces() {
1585   if (UseSharedSpaces) {
1586     FileMapInfo::current_info()->print_shared_spaces();
1587   }
1588 }
1589 
1590 
1591 // Map shared spaces at requested addresses and return if succeeded.
1592 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
1593   size_t image_alignment = mapinfo->alignment();
1594 
1595 #ifndef _WINDOWS
1596   // Map in the shared memory and then map the regions on top of it.
1597   // On Windows, don't map the memory here because it will cause the
1598   // mappings of the regions to fail.
1599   ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
1600   if (!shared_rs.is_reserved()) return false;
1601 #endif
1602 
1603   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
1604 
1605   char* _ro_base = NULL;
1606   char* _rw_base = NULL;
1607   char* _mc_base = NULL;
1608   char* _md_base = NULL;
1609   char* _od_base = NULL;
1610 
1611   // Map each shared region
1612   if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
1613       mapinfo->verify_region_checksum(mc) &&
1614       (_rw_base = mapinfo->map_region(rw)) != NULL &&
1615       mapinfo->verify_region_checksum(rw) &&
1616       (_ro_base = mapinfo->map_region(ro)) != NULL &&
1617       mapinfo->verify_region_checksum(ro) &&
1618       (_md_base = mapinfo->map_region(md)) != NULL &&
1619       mapinfo->verify_region_checksum(md) &&
1620       (_od_base = mapinfo->map_region(od)) != NULL &&
1621       mapinfo->verify_region_checksum(od) &&
1622       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
1623       mapinfo->validate_classpath_entry_table()) {
1624     // Success (no need to do anything)
1625     return true;
1626   } else {
1627     // If there was a failure in mapping any of the spaces, unmap the ones
1628     // that succeeded
1629     if (_ro_base != NULL) mapinfo->unmap_region(ro);
1630     if (_rw_base != NULL) mapinfo->unmap_region(rw);
1631     if (_mc_base != NULL) mapinfo->unmap_region(mc);
1632     if (_md_base != NULL) mapinfo->unmap_region(md);
1633     if (_od_base != NULL) mapinfo->unmap_region(od);
1634 #ifndef _WINDOWS
1635     // Release the entire mapped region
1636     shared_rs.release();
1637 #endif
1638     // If -Xshare:on is specified, print out the error message and exit VM,
1639     // otherwise, set UseSharedSpaces to false and continue.
1640     if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
1641       vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
1642     } else {
1643       FLAG_SET_DEFAULT(UseSharedSpaces, false);
1644     }
1645     return false;
1646   }
1647 }
1648 
1649 // Read the miscellaneous data from the shared file, and
1650 // serialize it out to its various destinations.
1651 
1652 void MetaspaceShared::initialize_shared_spaces() {
1653   FileMapInfo *mapinfo = FileMapInfo::current_info();
1654   _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
1655   _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
1656   _core_spaces_size = mapinfo->core_spaces_size();
1657   char* buffer = mapinfo->misc_data_patching_start();
1658   clone_cpp_vtables((intptr_t*)buffer);
1659 
1660   // The rest of the data is now stored in the RW region
1661   buffer = mapinfo->read_only_tables_start();
1662   int sharedDictionaryLen = *(intptr_t*)buffer;
1663   buffer += sizeof(intptr_t);
1664   int number_of_entries = *(intptr_t*)buffer;
1665   buffer += sizeof(intptr_t);
1666   SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
1667                                           sharedDictionaryLen,
1668                                           number_of_entries);
1669   buffer += sharedDictionaryLen;
1670 
1671   // The following data are the linked list elements
1672   // (HashtableEntry objects) for the shared dictionary table.
1673 
1674   int len = *(intptr_t*)buffer;     // skip over shared dictionary entries
1675   buffer += sizeof(intptr_t);
1676   buffer += len;
1677 
1678   // Verify various attributes of the archive, plus initialize the
1679   // shared string/symbol tables
1680   intptr_t* array = (intptr_t*)buffer;
1681   ReadClosure rc(&array);
1682   serialize(&rc);
1683 
1684   // Initialize the run-time symbol table.
1685   SymbolTable::create_table();
1686 
1687   // Close the mapinfo file
1688   mapinfo->close();
1689 
1690   if (PrintSharedArchiveAndExit) {
1691     if (PrintSharedDictionary) {
1692       tty->print_cr("\nShared classes:\n");
1693       SystemDictionary::print_shared(false);
1694     }
1695     if (_archive_loading_failed) {
1696       tty->print_cr("archive is invalid");
1697       vm_exit(1);
1698     } else {
1699       tty->print_cr("archive is valid");
1700       vm_exit(0);
1701     }
1702   }
1703 }
1704 
1705 void MetaspaceShared::fixup_shared_string_regions() {
1706   FileMapInfo *mapinfo = FileMapInfo::current_info();
1707   mapinfo->fixup_string_regions();
1708 }
1709 
1710 // JVM/TI RedefineClasses() support:
1711 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
1712   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1713 
1714   if (UseSharedSpaces) {
1715     // remap the shared readonly space to shared readwrite, private
1716     FileMapInfo* mapinfo = FileMapInfo::current_info();
1717     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1718       return false;
1719     }
1720     _remapped_readwrite = true;
1721   }
1722   return true;
1723 }
1724 
1725 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1726   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1727   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1728   // or so.
1729   _mc_region.print_out_of_space_msg(name, needed_bytes);
1730   _rw_region.print_out_of_space_msg(name, needed_bytes);
1731   _ro_region.print_out_of_space_msg(name, needed_bytes);
1732   _md_region.print_out_of_space_msg(name, needed_bytes);
1733   _st_region.print_out_of_space_msg(name, needed_bytes);
1734   _od_region.print_out_of_space_msg(name, needed_bytes);
1735 
1736   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1737                                 "Please reduce the number of shared classes.");
1738 }