1 /* 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classListParser.hpp" 28 #include "classfile/classLoaderExt.hpp" 29 #include "classfile/dictionary.hpp" 30 #include "classfile/loaderConstraints.hpp" 31 #include "classfile/placeholders.hpp" 32 #include "classfile/sharedClassUtil.hpp" 33 #include "classfile/symbolTable.hpp" 34 #include "classfile/stringTable.hpp" 35 #include "classfile/systemDictionary.hpp" 36 #include "classfile/systemDictionaryShared.hpp" 37 #include "code/codeCache.hpp" 38 #if INCLUDE_ALL_GCS 39 #include "gc/g1/g1Allocator.inline.hpp" 40 #include "gc/g1/g1CollectedHeap.hpp" 41 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 42 #endif 43 #include "gc/shared/gcLocker.hpp" 44 #include "interpreter/bytecodeStream.hpp" 45 #include "interpreter/bytecodes.hpp" 46 #include "logging/log.hpp" 47 #include "logging/logMessage.hpp" 48 #include "memory/filemap.hpp" 49 #include "memory/metaspace.hpp" 50 #include "memory/metaspaceShared.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "oops/instanceClassLoaderKlass.hpp" 53 #include "oops/instanceMirrorKlass.hpp" 54 #include "oops/instanceRefKlass.hpp" 55 #include "oops/objArrayKlass.hpp" 56 #include "oops/objArrayOop.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "oops/typeArrayKlass.hpp" 59 #include "prims/jvmtiRedefineClasses.hpp" 60 #include "runtime/timerTrace.hpp" 61 #include "runtime/os.hpp" 62 #include "runtime/signature.hpp" 63 #include "runtime/vmThread.hpp" 64 #include "runtime/vm_operations.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/defaultStream.hpp" 67 #include "utilities/hashtable.inline.hpp" 68 #include "memory/metaspaceClosure.hpp" 69 70 ReservedSpace MetaspaceShared::_shared_rs; 71 VirtualSpace MetaspaceShared::_shared_vs; 72 MetaspaceSharedStats MetaspaceShared::_stats; 73 bool MetaspaceShared::_has_error_classes; 74 bool MetaspaceShared::_archive_loading_failed = false; 75 bool MetaspaceShared::_remapped_readwrite = false; 76 bool MetaspaceShared::_open_archive_heap_region_mapped = false; 77 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; 78 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; 79 size_t MetaspaceShared::_core_spaces_size = 0; 80 81 // The CDS archive is divided into the following regions: 82 // mc - misc code (the method entry trampolines) 83 // rw - read-write metadata 84 // ro - read-only metadata and read-only tables 85 // md - misc data (the c++ vtables) 86 // od - optional data (original class files) 87 // 88 // s0 - shared strings(closed archive heap space) #0 89 // s1 - shared strings(closed archive heap space) #1 (may be empty) 90 // oa0 - open archive heap space #0 91 // oa1 - open archive heap space #1 (may be empty) 92 // 93 // The mc, rw, ro, md and od regions are linearly allocated, starting from 94 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions 95 // are page-aligned, and there's no gap between any consecutive regions. 96 // 97 // These 5 regions are populated in the following steps: 98 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 99 // temporarily allocated outside of the shared regions. Only the method entry 100 // trampolines are written into the mc region. 101 // [2] ArchiveCompactor copies RW metadata into the rw region. 102 // [3] ArchiveCompactor copies RO metadata into the ro region. 103 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 104 // are copied into the ro region as read-only tables. 105 // [5] C++ vtables are copied into the md region. 106 // [6] Original class files are copied into the od region. 107 // 108 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects. 109 // Their layout is independent of the other 5 regions. 110 111 class DumpRegion { 112 private: 113 const char* _name; 114 char* _base; 115 char* _top; 116 char* _end; 117 bool _is_packed; 118 119 char* expand_top_to(char* newtop) { 120 assert(is_allocatable(), "must be initialized and not packed"); 121 assert(newtop >= _top, "must not grow backwards"); 122 if (newtop > _end) { 123 MetaspaceShared::report_out_of_space(_name, newtop - _top); 124 ShouldNotReachHere(); 125 } 126 MetaspaceShared::commit_shared_space_to(newtop); 127 _top = newtop; 128 return _top; 129 } 130 131 public: 132 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} 133 134 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { 135 char* p = (char*)align_up(_top, alignment); 136 char* newtop = p + align_up(num_bytes, alignment); 137 expand_top_to(newtop); 138 memset(p, 0, newtop - p); 139 return p; 140 } 141 142 void append_intptr_t(intptr_t n) { 143 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 144 intptr_t *p = (intptr_t*)_top; 145 char* newtop = _top + sizeof(intptr_t); 146 expand_top_to(newtop); 147 *p = n; 148 } 149 150 char* base() const { return _base; } 151 char* top() const { return _top; } 152 char* end() const { return _end; } 153 size_t reserved() const { return _end - _base; } 154 size_t used() const { return _top - _base; } 155 bool is_packed() const { return _is_packed; } 156 bool is_allocatable() const { 157 return !is_packed() && _base != NULL; 158 } 159 160 void print(size_t total_bytes) const { 161 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 162 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); 163 } 164 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 165 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 166 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 167 if (strcmp(_name, failing_region) == 0) { 168 tty->print_cr(" required = %d", int(needed_bytes)); 169 } else { 170 tty->cr(); 171 } 172 } 173 174 void init(const ReservedSpace* rs) { 175 _base = _top = rs->base(); 176 _end = rs->end(); 177 } 178 void init(char* b, char* t, char* e) { 179 _base = b; 180 _top = t; 181 _end = e; 182 } 183 184 void pack(DumpRegion* next = NULL) { 185 assert(!is_packed(), "sanity"); 186 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 187 _is_packed = true; 188 if (next != NULL) { 189 next->_base = next->_top = this->_end; 190 next->_end = MetaspaceShared::shared_rs()->end(); 191 } 192 } 193 bool contains(char* p) { 194 return base() <= p && p < top(); 195 } 196 }; 197 198 199 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); 200 size_t _total_string_region_size = 0, _total_open_archive_region_size = 0; 201 202 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 203 return _mc_region.allocate(num_bytes); 204 } 205 206 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 207 return _ro_region.allocate(num_bytes); 208 } 209 210 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 211 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 212 213 // If using shared space, open the file that contains the shared space 214 // and map in the memory before initializing the rest of metaspace (so 215 // the addresses don't conflict) 216 address cds_address = NULL; 217 FileMapInfo* mapinfo = new FileMapInfo(); 218 219 // Open the shared archive file, read and validate the header. If 220 // initialization fails, shared spaces [UseSharedSpaces] are 221 // disabled and the file is closed. 222 // Map in spaces now also 223 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) { 224 size_t cds_total = core_spaces_size(); 225 cds_address = (address)mapinfo->header()->region_addr(0); 226 #ifdef _LP64 227 if (Metaspace::using_class_space()) { 228 char* cds_end = (char*)(cds_address + cds_total); 229 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment()); 230 // If UseCompressedClassPointers is set then allocate the metaspace area 231 // above the heap and above the CDS area (if it exists). 232 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 233 // map_heap_regions() compares the current narrow oop and klass encodings 234 // with the archived ones, so it must be done after all encodings are determined. 235 mapinfo->map_heap_regions(); 236 } 237 #endif // _LP64 238 } else { 239 assert(!mapinfo->is_open() && !UseSharedSpaces, 240 "archive file not closed or shared spaces not disabled."); 241 } 242 } 243 244 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 245 assert(DumpSharedSpaces, "should be called for dump time only"); 246 const size_t reserve_alignment = Metaspace::reserve_alignment(); 247 bool large_pages = false; // No large pages when dumping the CDS archive. 248 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 249 250 #ifdef _LP64 251 // On 64-bit VM, the heap and class space layout will be the same as if 252 // you're running in -Xshare:on mode: 253 // 254 // +-- SharedBaseAddress (default = 0x800000000) 255 // v 256 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 257 // | Heap | Archive | | MC | RW | RO | MD | OD | class space | 258 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 259 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->| 260 // 261 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 262 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 263 #else 264 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 265 size_t cds_total = align_down(256*M, reserve_alignment); 266 #endif 267 268 // First try to reserve the space at the specified SharedBaseAddress. 269 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); 270 if (_shared_rs.is_reserved()) { 271 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 272 } else { 273 // Get a mmap region anywhere if the SharedBaseAddress fails. 274 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); 275 } 276 if (!_shared_rs.is_reserved()) { 277 vm_exit_during_initialization("Unable to reserve memory for shared space", 278 err_msg(SIZE_FORMAT " bytes.", cds_total)); 279 } 280 281 #ifdef _LP64 282 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 283 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 284 // will store Klasses into this space. 285 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 286 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 287 // then the RO parts. 288 289 assert(UseCompressedOops && UseCompressedClassPointers, 290 "UseCompressedOops and UseCompressedClassPointers must be set"); 291 292 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 293 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 294 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 295 _shared_rs = _shared_rs.first_part(max_archive_size); 296 297 // Set up compress class pointers. 298 Universe::set_narrow_klass_base((address)_shared_rs.base()); 299 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 300 // with AOT. 301 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 302 303 Metaspace::initialize_class_space(tmp_class_space); 304 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 305 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 306 307 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 308 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 309 #endif 310 311 // Start with 0 committed bytes. The memory will be committed as needed by 312 // MetaspaceShared::commit_shared_space_to(). 313 if (!_shared_vs.initialize(_shared_rs, 0)) { 314 vm_exit_during_initialization("Unable to allocate memory for shared space"); 315 } 316 317 _mc_region.init(&_shared_rs); 318 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 319 _shared_rs.size(), p2i(_shared_rs.base())); 320 } 321 322 void MetaspaceShared::commit_shared_space_to(char* newtop) { 323 assert(DumpSharedSpaces, "dump-time only"); 324 char* base = _shared_rs.base(); 325 size_t need_committed_size = newtop - base; 326 size_t has_committed_size = _shared_vs.committed_size(); 327 if (need_committed_size < has_committed_size) { 328 return; 329 } 330 331 size_t min_bytes = need_committed_size - has_committed_size; 332 size_t preferred_bytes = 1 * M; 333 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 334 335 size_t commit = MAX2(min_bytes, preferred_bytes); 336 assert(commit <= uncommitted, "sanity"); 337 338 bool result = _shared_vs.expand_by(commit, false); 339 if (!result) { 340 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 341 need_committed_size)); 342 } 343 344 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 345 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 346 } 347 348 // Read/write a data stream for restoring/preserving metadata pointers and 349 // miscellaneous data from/to the shared archive file. 350 351 void MetaspaceShared::serialize(SerializeClosure* soc) { 352 int tag = 0; 353 soc->do_tag(--tag); 354 355 // Verify the sizes of various metadata in the system. 356 soc->do_tag(sizeof(Method)); 357 soc->do_tag(sizeof(ConstMethod)); 358 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 359 soc->do_tag(sizeof(ConstantPool)); 360 soc->do_tag(sizeof(ConstantPoolCache)); 361 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 362 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 363 soc->do_tag(sizeof(Symbol)); 364 365 // Dump/restore miscellaneous metadata. 366 Universe::serialize(soc, true); 367 soc->do_tag(--tag); 368 369 // Dump/restore references to commonly used names and signatures. 370 vmSymbols::serialize(soc); 371 soc->do_tag(--tag); 372 373 // Dump/restore the symbol and string tables 374 SymbolTable::serialize(soc); 375 StringTable::serialize(soc); 376 soc->do_tag(--tag); 377 378 soc->do_tag(666); 379 } 380 381 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { 382 if (DumpSharedSpaces) { 383 if (_cds_i2i_entry_code_buffers == NULL) { 384 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 385 _cds_i2i_entry_code_buffers_size = total_size; 386 } 387 } else if (UseSharedSpaces) { 388 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized"); 389 } else { 390 return NULL; 391 } 392 393 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change"); 394 return _cds_i2i_entry_code_buffers; 395 } 396 397 // CDS code for dumping shared archive. 398 399 // Global object for holding classes that have been loaded. Since this 400 // is run at a safepoint just before exit, this is the entire set of classes. 401 static GrowableArray<Klass*>* _global_klass_objects; 402 403 static void collect_array_classes(Klass* k) { 404 _global_klass_objects->append_if_missing(k); 405 if (k->is_array_klass()) { 406 // Add in the array classes too 407 ArrayKlass* ak = ArrayKlass::cast(k); 408 Klass* h = ak->higher_dimension(); 409 if (h != NULL) { 410 h->array_klasses_do(collect_array_classes); 411 } 412 } 413 } 414 415 class CollectClassesClosure : public KlassClosure { 416 void do_klass(Klass* k) { 417 if (!UseAppCDS && !k->class_loader_data()->is_the_null_class_loader_data()) { 418 // AppCDS is not enabled. Let's omit non-boot classes. 419 return; 420 } 421 422 if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) { 423 _global_klass_objects->append_if_missing(k); 424 } 425 if (k->is_array_klass()) { 426 // Add in the array classes too 427 ArrayKlass* ak = ArrayKlass::cast(k); 428 Klass* h = ak->higher_dimension(); 429 if (h != NULL) { 430 h->array_klasses_do(collect_array_classes); 431 } 432 } 433 } 434 }; 435 436 static void remove_unshareable_in_classes() { 437 for (int i = 0; i < _global_klass_objects->length(); i++) { 438 Klass* k = _global_klass_objects->at(i); 439 if (!k->is_objArray_klass()) { 440 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 441 // on their array classes. 442 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 443 k->remove_unshareable_info(); 444 } 445 } 446 } 447 448 static void remove_java_mirror_in_classes() { 449 for (int i = 0; i < _global_klass_objects->length(); i++) { 450 Klass* k = _global_klass_objects->at(i); 451 if (!k->is_objArray_klass()) { 452 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 453 // on their array classes. 454 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 455 k->remove_java_mirror(); 456 } 457 } 458 } 459 460 static void rewrite_nofast_bytecode(Method* method) { 461 BytecodeStream bcs(method); 462 while (!bcs.is_last_bytecode()) { 463 Bytecodes::Code opcode = bcs.next(); 464 switch (opcode) { 465 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 466 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 467 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 468 case Bytecodes::_iload: { 469 if (!bcs.is_wide()) { 470 *bcs.bcp() = Bytecodes::_nofast_iload; 471 } 472 break; 473 } 474 default: break; 475 } 476 } 477 } 478 479 // Walk all methods in the class list to ensure that they won't be modified at 480 // run time. This includes: 481 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 482 // at run time by RewriteBytecodes/RewriteFrequentPairs 483 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 484 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 485 for (int i = 0; i < _global_klass_objects->length(); i++) { 486 Klass* k = _global_klass_objects->at(i); 487 if (k->is_instance_klass()) { 488 InstanceKlass* ik = InstanceKlass::cast(k); 489 for (int i = 0; i < ik->methods()->length(); i++) { 490 Method* m = ik->methods()->at(i); 491 rewrite_nofast_bytecode(m); 492 Fingerprinter fp(m); 493 // The side effect of this call sets method's fingerprint field. 494 fp.fingerprint(); 495 } 496 } 497 } 498 } 499 500 static void relocate_cached_class_file() { 501 for (int i = 0; i < _global_klass_objects->length(); i++) { 502 Klass* k = _global_klass_objects->at(i); 503 if (k->is_instance_klass()) { 504 InstanceKlass* ik = InstanceKlass::cast(k); 505 JvmtiCachedClassFileData* p = ik->get_archived_class_data(); 506 if (p != NULL) { 507 int size = offset_of(JvmtiCachedClassFileData, data) + p->length; 508 JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size); 509 q->length = p->length; 510 memcpy(q->data, p->data, p->length); 511 ik->set_archived_class_data(q); 512 } 513 } 514 } 515 } 516 517 NOT_PRODUCT( 518 static void assert_not_anonymous_class(InstanceKlass* k) { 519 assert(!(k->is_anonymous()), "cannot archive anonymous classes"); 520 } 521 522 // Anonymous classes are not stored inside any dictionaries. They are created by 523 // SystemDictionary::parse_stream() with a non-null host_klass. 524 static void assert_no_anonymoys_classes_in_dictionaries() { 525 ClassLoaderDataGraph::dictionary_classes_do(assert_not_anonymous_class); 526 }) 527 528 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 529 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 530 // 531 // Addresses of the vtables and the methods may be different across JVM runs, 532 // if libjvm.so is dynamically loaded at a different base address. 533 // 534 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 535 // 536 // + at dump time: we redirect the _vptr to point to our own vtables inside 537 // the CDS image 538 // + at run time: we clone the actual contents of the vtables from libjvm.so 539 // into our own tables. 540 541 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 542 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 543 f(ConstantPool) \ 544 f(InstanceKlass) \ 545 f(InstanceClassLoaderKlass) \ 546 f(InstanceMirrorKlass) \ 547 f(InstanceRefKlass) \ 548 f(Method) \ 549 f(ObjArrayKlass) \ 550 f(TypeArrayKlass) 551 552 class CppVtableInfo { 553 intptr_t _vtable_size; 554 intptr_t _cloned_vtable[1]; 555 public: 556 static int num_slots(int vtable_size) { 557 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 558 } 559 int vtable_size() { return int(uintx(_vtable_size)); } 560 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 561 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 562 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 563 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 564 static size_t byte_size(int vtable_size) { 565 CppVtableInfo i; 566 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 567 } 568 }; 569 570 template <class T> class CppVtableCloner : public T { 571 static intptr_t* vtable_of(Metadata& m) { 572 return *((intptr_t**)&m); 573 } 574 static CppVtableInfo* _info; 575 576 static int get_vtable_length(const char* name); 577 578 public: 579 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 580 static intptr_t* allocate(const char* name); 581 582 // Clone the vtable to ... 583 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 584 585 static void zero_vtable_clone() { 586 assert(DumpSharedSpaces, "dump-time only"); 587 _info->zero(); 588 } 589 590 // Switch the vtable pointer to point to the cloned vtable. 591 static void patch(Metadata* obj) { 592 assert(DumpSharedSpaces, "dump-time only"); 593 *(void**)obj = (void*)(_info->cloned_vtable()); 594 } 595 596 static bool is_valid_shared_object(const T* obj) { 597 intptr_t* vptr = *(intptr_t**)obj; 598 return vptr == _info->cloned_vtable(); 599 } 600 }; 601 602 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 603 604 template <class T> 605 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 606 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 607 int n = get_vtable_length(name); 608 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 609 _info->set_vtable_size(n); 610 611 intptr_t* p = clone_vtable(name, _info); 612 assert((char*)p == _md_region.top(), "must be"); 613 614 return p; 615 } 616 617 template <class T> 618 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 619 if (!DumpSharedSpaces) { 620 assert(_info == 0, "_info is initialized only at dump time"); 621 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 622 } 623 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 624 int n = info->vtable_size(); 625 intptr_t* srcvtable = vtable_of(tmp); 626 intptr_t* dstvtable = info->cloned_vtable(); 627 628 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 629 // safe to do memcpy. 630 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 631 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 632 return dstvtable + n; 633 } 634 635 // To determine the size of the vtable for each type, we use the following 636 // trick by declaring 2 subclasses: 637 // 638 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 639 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 640 // 641 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 642 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 643 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 644 // - Their last entry is different. 645 // 646 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 647 // and find the first entry that's different. 648 // 649 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 650 // esoteric compilers. 651 652 template <class T> class CppVtableTesterB: public T { 653 public: 654 virtual int last_virtual_method() {return 1;} 655 }; 656 657 template <class T> class CppVtableTesterA : public T { 658 public: 659 virtual void* last_virtual_method() { 660 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 661 // compiler/linker won't alias the two functions. 662 return NULL; 663 } 664 }; 665 666 template <class T> 667 int CppVtableCloner<T>::get_vtable_length(const char* name) { 668 CppVtableTesterA<T> a; 669 CppVtableTesterB<T> b; 670 671 intptr_t* avtable = vtable_of(a); 672 intptr_t* bvtable = vtable_of(b); 673 674 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 675 int vtable_len = 1; 676 for (; ; vtable_len++) { 677 if (avtable[vtable_len] != bvtable[vtable_len]) { 678 break; 679 } 680 } 681 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 682 683 return vtable_len; 684 } 685 686 #define ALLOC_CPP_VTABLE_CLONE(c) \ 687 CppVtableCloner<c>::allocate(#c); 688 689 #define CLONE_CPP_VTABLE(c) \ 690 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 691 692 #define ZERO_CPP_VTABLE(c) \ 693 CppVtableCloner<c>::zero_vtable_clone(); 694 695 // This can be called at both dump time and run time. 696 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 697 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 698 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 699 return p; 700 } 701 702 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 703 assert(DumpSharedSpaces, "dump-time only"); 704 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 705 } 706 707 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 708 void MetaspaceShared::allocate_cpp_vtable_clones() { 709 assert(DumpSharedSpaces, "dump-time only"); 710 // Layout (each slot is a intptr_t): 711 // [number of slots in the first vtable = n1] 712 // [ <n1> slots for the first vtable] 713 // [number of slots in the first second = n2] 714 // [ <n2> slots for the second vtable] 715 // ... 716 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 717 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 718 } 719 720 // Switch the vtable pointer to point to the cloned vtable. We assume the 721 // vtable pointer is in first slot in object. 722 void MetaspaceShared::patch_cpp_vtable_pointers() { 723 int n = _global_klass_objects->length(); 724 for (int i = 0; i < n; i++) { 725 Klass* obj = _global_klass_objects->at(i); 726 if (obj->is_instance_klass()) { 727 InstanceKlass* ik = InstanceKlass::cast(obj); 728 if (ik->is_class_loader_instance_klass()) { 729 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 730 } else if (ik->is_reference_instance_klass()) { 731 CppVtableCloner<InstanceRefKlass>::patch(ik); 732 } else if (ik->is_mirror_instance_klass()) { 733 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 734 } else { 735 CppVtableCloner<InstanceKlass>::patch(ik); 736 } 737 ConstantPool* cp = ik->constants(); 738 CppVtableCloner<ConstantPool>::patch(cp); 739 for (int j = 0; j < ik->methods()->length(); j++) { 740 Method* m = ik->methods()->at(j); 741 CppVtableCloner<Method>::patch(m); 742 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 743 } 744 } else if (obj->is_objArray_klass()) { 745 CppVtableCloner<ObjArrayKlass>::patch(obj); 746 } else { 747 assert(obj->is_typeArray_klass(), "sanity"); 748 CppVtableCloner<TypeArrayKlass>::patch(obj); 749 } 750 } 751 } 752 753 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 754 assert(is_in_shared_metaspace(m), "must be"); 755 return CppVtableCloner<Method>::is_valid_shared_object(m); 756 } 757 758 // Closure for serializing initialization data out to a data area to be 759 // written to the shared file. 760 761 class WriteClosure : public SerializeClosure { 762 private: 763 DumpRegion* _dump_region; 764 765 public: 766 WriteClosure(DumpRegion* r) { 767 _dump_region = r; 768 } 769 770 void do_ptr(void** p) { 771 _dump_region->append_intptr_t((intptr_t)*p); 772 } 773 774 void do_u4(u4* p) { 775 void* ptr = (void*)(uintx(*p)); 776 do_ptr(&ptr); 777 } 778 779 void do_tag(int tag) { 780 _dump_region->append_intptr_t((intptr_t)tag); 781 } 782 783 void do_region(u_char* start, size_t size) { 784 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 785 assert(size % sizeof(intptr_t) == 0, "bad size"); 786 do_tag((int)size); 787 while (size > 0) { 788 _dump_region->append_intptr_t(*(intptr_t*)start); 789 start += sizeof(intptr_t); 790 size -= sizeof(intptr_t); 791 } 792 } 793 794 bool reading() const { return false; } 795 }; 796 797 // This is for dumping detailed statistics for the allocations 798 // in the shared spaces. 799 class DumpAllocStats : public ResourceObj { 800 public: 801 802 // Here's poor man's enum inheritance 803 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 804 METASPACE_OBJ_TYPES_DO(f) \ 805 f(SymbolHashentry) \ 806 f(SymbolBucket) \ 807 f(StringHashentry) \ 808 f(StringBucket) \ 809 f(Other) 810 811 enum Type { 812 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 813 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 814 _number_of_types 815 }; 816 817 static const char * type_name(Type type) { 818 switch(type) { 819 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 820 default: 821 ShouldNotReachHere(); 822 return NULL; 823 } 824 } 825 826 public: 827 enum { RO = 0, RW = 1 }; 828 829 int _counts[2][_number_of_types]; 830 int _bytes [2][_number_of_types]; 831 832 DumpAllocStats() { 833 memset(_counts, 0, sizeof(_counts)); 834 memset(_bytes, 0, sizeof(_bytes)); 835 }; 836 837 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 838 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 839 int which = (read_only) ? RO : RW; 840 _counts[which][type] ++; 841 _bytes [which][type] += byte_size; 842 } 843 844 void record_other_type(int byte_size, bool read_only) { 845 int which = (read_only) ? RO : RW; 846 _bytes [which][OtherType] += byte_size; 847 } 848 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 849 }; 850 851 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 852 // Calculate size of data that was not allocated by Metaspace::allocate() 853 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 854 855 // symbols 856 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 857 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 858 859 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 860 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 861 862 // strings 863 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 864 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 865 866 _counts[RO][StringBucketType] = stats->string.bucket_count; 867 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 868 869 // TODO: count things like dictionary, vtable, etc 870 _bytes[RW][OtherType] += mc_all + md_all; 871 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 872 873 // prevent divide-by-zero 874 if (ro_all < 1) { 875 ro_all = 1; 876 } 877 if (rw_all < 1) { 878 rw_all = 1; 879 } 880 881 int all_ro_count = 0; 882 int all_ro_bytes = 0; 883 int all_rw_count = 0; 884 int all_rw_bytes = 0; 885 886 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 887 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 888 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 889 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 890 891 LogMessage(cds) msg; 892 893 msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); 894 msg.info("%s", hdr); 895 msg.info("%s", sep); 896 for (int type = 0; type < int(_number_of_types); type ++) { 897 const char *name = type_name((Type)type); 898 int ro_count = _counts[RO][type]; 899 int ro_bytes = _bytes [RO][type]; 900 int rw_count = _counts[RW][type]; 901 int rw_bytes = _bytes [RW][type]; 902 int count = ro_count + rw_count; 903 int bytes = ro_bytes + rw_bytes; 904 905 double ro_perc = percent_of(ro_bytes, ro_all); 906 double rw_perc = percent_of(rw_bytes, rw_all); 907 double perc = percent_of(bytes, ro_all + rw_all); 908 909 msg.info(fmt_stats, name, 910 ro_count, ro_bytes, ro_perc, 911 rw_count, rw_bytes, rw_perc, 912 count, bytes, perc); 913 914 all_ro_count += ro_count; 915 all_ro_bytes += ro_bytes; 916 all_rw_count += rw_count; 917 all_rw_bytes += rw_bytes; 918 } 919 920 int all_count = all_ro_count + all_rw_count; 921 int all_bytes = all_ro_bytes + all_rw_bytes; 922 923 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 924 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 925 double all_perc = percent_of(all_bytes, ro_all + rw_all); 926 927 msg.info("%s", sep); 928 msg.info(fmt_stats, "Total", 929 all_ro_count, all_ro_bytes, all_ro_perc, 930 all_rw_count, all_rw_bytes, all_rw_perc, 931 all_count, all_bytes, all_perc); 932 933 assert(all_ro_bytes == ro_all, "everything should have been counted"); 934 assert(all_rw_bytes == rw_all, "everything should have been counted"); 935 936 #undef fmt_stats 937 } 938 939 // Populate the shared space. 940 941 class VM_PopulateDumpSharedSpace: public VM_Operation { 942 private: 943 GrowableArray<MemRegion> *_string_regions; 944 GrowableArray<MemRegion> *_open_archive_heap_regions; 945 946 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 947 void dump_symbols(); 948 char* dump_read_only_tables(); 949 void print_region_stats(); 950 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 951 const char *name, const size_t total_size); 952 public: 953 954 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 955 void doit(); // outline because gdb sucks 956 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec); 957 }; // class VM_PopulateDumpSharedSpace 958 959 class SortedSymbolClosure: public SymbolClosure { 960 GrowableArray<Symbol*> _symbols; 961 virtual void do_symbol(Symbol** sym) { 962 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 963 _symbols.append(*sym); 964 } 965 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 966 if (a[0] < b[0]) { 967 return -1; 968 } else if (a[0] == b[0]) { 969 return 0; 970 } else { 971 return 1; 972 } 973 } 974 975 public: 976 SortedSymbolClosure() { 977 SymbolTable::symbols_do(this); 978 _symbols.sort(compare_symbols_by_address); 979 } 980 GrowableArray<Symbol*>* get_sorted_symbols() { 981 return &_symbols; 982 } 983 }; 984 985 // ArchiveCompactor -- 986 // 987 // This class is the central piece of shared archive compaction -- all metaspace data are 988 // initially allocated outside of the shared regions. ArchiveCompactor copies the 989 // metaspace data into their final location in the shared regions. 990 991 class ArchiveCompactor : AllStatic { 992 static DumpAllocStats* _alloc_stats; 993 static SortedSymbolClosure* _ssc; 994 995 static unsigned my_hash(const address& a) { 996 return primitive_hash<address>(a); 997 } 998 static bool my_equals(const address& a0, const address& a1) { 999 return primitive_equals<address>(a0, a1); 1000 } 1001 typedef ResourceHashtable< 1002 address, address, 1003 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address> 1004 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address> 1005 16384, ResourceObj::C_HEAP> RelocationTable; 1006 static RelocationTable* _new_loc_table; 1007 1008 public: 1009 static void initialize() { 1010 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1011 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable; 1012 } 1013 static DumpAllocStats* alloc_stats() { 1014 return _alloc_stats; 1015 } 1016 1017 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1018 address obj = ref->obj(); 1019 int bytes = ref->size() * BytesPerWord; 1020 char* p; 1021 size_t alignment = BytesPerWord; 1022 char* oldtop; 1023 char* newtop; 1024 1025 if (read_only) { 1026 oldtop = _ro_region.top(); 1027 p = _ro_region.allocate(bytes, alignment); 1028 newtop = _ro_region.top(); 1029 } else { 1030 oldtop = _rw_region.top(); 1031 p = _rw_region.allocate(bytes, alignment); 1032 newtop = _rw_region.top(); 1033 } 1034 memcpy(p, obj, bytes); 1035 bool isnew = _new_loc_table->put(obj, (address)p); 1036 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1037 assert(isnew, "must be"); 1038 1039 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1040 if (ref->msotype() == MetaspaceObj::SymbolType) { 1041 uintx delta = MetaspaceShared::object_delta(p); 1042 if (delta > MAX_SHARED_DELTA) { 1043 // This is just a sanity check and should not appear in any real world usage. This 1044 // happens only if you allocate more than 2GB of Symbols and would require 1045 // millions of shared classes. 1046 vm_exit_during_initialization("Too many Symbols in the CDS archive", 1047 "Please reduce the number of shared classes."); 1048 } 1049 } 1050 } 1051 1052 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1053 address* pp = _new_loc_table->get(ref->obj()); 1054 assert(pp != NULL, "must be"); 1055 return *pp; 1056 } 1057 1058 private: 1059 // Makes a shallow copy of visited MetaspaceObj's 1060 class ShallowCopier: public UniqueMetaspaceClosure { 1061 bool _read_only; 1062 public: 1063 ShallowCopier(bool read_only) : _read_only(read_only) {} 1064 1065 virtual void do_unique_ref(Ref* ref, bool read_only) { 1066 if (read_only == _read_only) { 1067 allocate(ref, read_only); 1068 } 1069 } 1070 }; 1071 1072 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1073 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1074 public: 1075 virtual void do_unique_ref(Ref* ref, bool read_only) { 1076 address new_loc = get_new_loc(ref); 1077 RefRelocator refer; 1078 ref->metaspace_pointers_do_at(&refer, new_loc); 1079 } 1080 }; 1081 1082 // Relocate a reference to point to its shallow copy 1083 class RefRelocator: public MetaspaceClosure { 1084 public: 1085 virtual bool do_ref(Ref* ref, bool read_only) { 1086 if (ref->not_null()) { 1087 ref->update(get_new_loc(ref)); 1088 } 1089 return false; // Do not recurse. 1090 } 1091 }; 1092 1093 #ifdef ASSERT 1094 class IsRefInArchiveChecker: public MetaspaceClosure { 1095 public: 1096 virtual bool do_ref(Ref* ref, bool read_only) { 1097 if (ref->not_null()) { 1098 char* obj = (char*)ref->obj(); 1099 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1100 "must be relocated to point to CDS archive"); 1101 } 1102 return false; // Do not recurse. 1103 } 1104 }; 1105 #endif 1106 1107 public: 1108 static void copy_and_compact() { 1109 // We should no longer allocate anything from the metaspace, so that 1110 // we can have a stable set of MetaspaceObjs to work with. 1111 Metaspace::freeze(); 1112 1113 ResourceMark rm; 1114 SortedSymbolClosure the_ssc; // StackObj 1115 _ssc = &the_ssc; 1116 1117 tty->print_cr("Scanning all metaspace objects ... "); 1118 { 1119 // allocate and shallow-copy RW objects, immediately following the MC region 1120 tty->print_cr("Allocating RW objects ... "); 1121 _mc_region.pack(&_rw_region); 1122 1123 ResourceMark rm; 1124 ShallowCopier rw_copier(false); 1125 iterate_roots(&rw_copier); 1126 } 1127 { 1128 // allocate and shallow-copy of RO object, immediately following the RW region 1129 tty->print_cr("Allocating RO objects ... "); 1130 _rw_region.pack(&_ro_region); 1131 1132 ResourceMark rm; 1133 ShallowCopier ro_copier(true); 1134 iterate_roots(&ro_copier); 1135 } 1136 { 1137 tty->print_cr("Relocating embedded pointers ... "); 1138 ResourceMark rm; 1139 ShallowCopyEmbeddedRefRelocator emb_reloc; 1140 iterate_roots(&emb_reloc); 1141 } 1142 { 1143 tty->print_cr("Relocating external roots ... "); 1144 ResourceMark rm; 1145 RefRelocator ext_reloc; 1146 iterate_roots(&ext_reloc); 1147 } 1148 1149 #ifdef ASSERT 1150 { 1151 tty->print_cr("Verifying external roots ... "); 1152 ResourceMark rm; 1153 IsRefInArchiveChecker checker; 1154 iterate_roots(&checker); 1155 } 1156 #endif 1157 1158 1159 // cleanup 1160 _ssc = NULL; 1161 } 1162 1163 // We must relocate the System::_well_known_klasses only after we have copied the 1164 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1165 // old objects which assert that their klass is the original klass. 1166 static void relocate_well_known_klasses() { 1167 { 1168 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1169 ResourceMark rm; 1170 RefRelocator ext_reloc; 1171 SystemDictionary::well_known_klasses_do(&ext_reloc); 1172 } 1173 // NOTE: after this point, we shouldn't have any globals that can reach the old 1174 // objects. 1175 1176 // We cannot use any of the objects in the heap anymore (except for the objects 1177 // in the CDS shared string regions) because their headers no longer point to 1178 // valid Klasses. 1179 } 1180 1181 static void iterate_roots(MetaspaceClosure* it) { 1182 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1183 for (int i=0; i<symbols->length(); i++) { 1184 it->push(symbols->adr_at(i)); 1185 } 1186 if (_global_klass_objects != NULL) { 1187 // Need to fix up the pointers 1188 for (int i = 0; i < _global_klass_objects->length(); i++) { 1189 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1190 it->push(_global_klass_objects->adr_at(i)); 1191 } 1192 } 1193 FileMapInfo::metaspace_pointers_do(it); 1194 SystemDictionary::classes_do(it); 1195 Universe::metaspace_pointers_do(it); 1196 SymbolTable::metaspace_pointers_do(it); 1197 vmSymbols::metaspace_pointers_do(it); 1198 } 1199 1200 static Klass* get_relocated_klass(Klass* orig_klass) { 1201 address* pp = _new_loc_table->get((address)orig_klass); 1202 assert(pp != NULL, "must be"); 1203 Klass* klass = (Klass*)(*pp); 1204 assert(klass->is_klass(), "must be"); 1205 return klass; 1206 } 1207 }; 1208 1209 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1210 SortedSymbolClosure* ArchiveCompactor::_ssc; 1211 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1212 1213 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, 1214 DumpRegion* dump_region, bool read_only, bool allow_exec) { 1215 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1216 } 1217 1218 void VM_PopulateDumpSharedSpace::dump_symbols() { 1219 tty->print_cr("Dumping symbol table ..."); 1220 1221 NOT_PRODUCT(SymbolTable::verify()); 1222 SymbolTable::write_to_archive(); 1223 } 1224 1225 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1226 char* oldtop = _ro_region.top(); 1227 // Reorder the system dictionary. Moving the symbols affects 1228 // how the hash table indices are calculated. 1229 SystemDictionary::reorder_dictionary_for_sharing(); 1230 tty->print("Removing java_mirror ... "); 1231 remove_java_mirror_in_classes(); 1232 tty->print_cr("done. "); 1233 NOT_PRODUCT(SystemDictionary::verify();) 1234 1235 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets(); 1236 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t)); 1237 SystemDictionary::copy_buckets(buckets_top, _ro_region.top()); 1238 1239 size_t table_bytes = SystemDictionary::count_bytes_for_table(); 1240 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t)); 1241 SystemDictionary::copy_table(table_top, _ro_region.top()); 1242 1243 // Write the other data to the output array. 1244 WriteClosure wc(&_ro_region); 1245 MetaspaceShared::serialize(&wc); 1246 1247 char* newtop = _ro_region.top(); 1248 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true); 1249 return buckets_top; 1250 } 1251 1252 void VM_PopulateDumpSharedSpace::doit() { 1253 Thread* THREAD = VMThread::vm_thread(); 1254 1255 NOT_PRODUCT(SystemDictionary::verify();) 1256 // The following guarantee is meant to ensure that no loader constraints 1257 // exist yet, since the constraints table is not shared. This becomes 1258 // more important now that we don't re-initialize vtables/itables for 1259 // shared classes at runtime, where constraints were previously created. 1260 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1261 "loader constraints are not saved"); 1262 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1263 "placeholders are not saved"); 1264 // Revisit and implement this if we prelink method handle call sites: 1265 guarantee(SystemDictionary::invoke_method_table() == NULL || 1266 SystemDictionary::invoke_method_table()->number_of_entries() == 0, 1267 "invoke method table is not saved"); 1268 1269 // At this point, many classes have been loaded. 1270 // Gather systemDictionary classes in a global array and do everything to 1271 // that so we don't have to walk the SystemDictionary again. 1272 _global_klass_objects = new GrowableArray<Klass*>(1000); 1273 CollectClassesClosure collect_classes; 1274 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1275 1276 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1277 { 1278 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1279 for (int i = 0; i < _global_klass_objects->length(); i++) { 1280 Klass* k = _global_klass_objects->at(i); 1281 if (k->is_instance_klass()) { 1282 num_inst ++; 1283 } else if (k->is_objArray_klass()) { 1284 num_obj_array ++; 1285 } else { 1286 assert(k->is_typeArray_klass(), "sanity"); 1287 num_type_array ++; 1288 } 1289 } 1290 tty->print_cr(" instance classes = %5d", num_inst); 1291 tty->print_cr(" obj array classes = %5d", num_obj_array); 1292 tty->print_cr(" type array classes = %5d", num_type_array); 1293 } 1294 1295 // Ensure the ConstMethods won't be modified at run-time 1296 tty->print("Updating ConstMethods ... "); 1297 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1298 tty->print_cr("done. "); 1299 1300 // Move classes from platform/system dictionaries into the boot dictionary 1301 SystemDictionary::combine_shared_dictionaries(); 1302 1303 // Remove all references outside the metadata 1304 tty->print("Removing unshareable information ... "); 1305 remove_unshareable_in_classes(); 1306 tty->print_cr("done. "); 1307 1308 // We don't support archiving anonymous classes. Verify that they are not stored in 1309 // the any dictionaries. 1310 NOT_PRODUCT(assert_no_anonymoys_classes_in_dictionaries()); 1311 1312 SystemDictionaryShared::finalize_verification_constraints(); 1313 1314 ArchiveCompactor::initialize(); 1315 ArchiveCompactor::copy_and_compact(); 1316 1317 dump_symbols(); 1318 1319 // Dump supported java heap objects 1320 _string_regions = NULL; 1321 _open_archive_heap_regions = NULL; 1322 dump_java_heap_objects(); 1323 1324 ArchiveCompactor::relocate_well_known_klasses(); 1325 1326 char* read_only_tables_start = dump_read_only_tables(); 1327 _ro_region.pack(&_md_region); 1328 1329 char* vtbl_list = _md_region.top(); 1330 MetaspaceShared::allocate_cpp_vtable_clones(); 1331 _md_region.pack(&_od_region); 1332 1333 // Relocate the archived class file data into the od region 1334 relocate_cached_class_file(); 1335 _od_region.pack(); 1336 1337 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size 1338 // is just the spaces between the two ends. 1339 size_t core_spaces_size = _od_region.end() - _mc_region.base(); 1340 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), 1341 "should already be aligned"); 1342 1343 // During patching, some virtual methods may be called, so at this point 1344 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1345 MetaspaceShared::patch_cpp_vtable_pointers(); 1346 1347 // The vtable clones contain addresses of the current process. 1348 // We don't want to write these addresses into the archive. 1349 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1350 1351 // Create and write the archive file that maps the shared spaces. 1352 1353 FileMapInfo* mapinfo = new FileMapInfo(); 1354 mapinfo->populate_header(os::vm_allocation_granularity()); 1355 mapinfo->set_read_only_tables_start(read_only_tables_start); 1356 mapinfo->set_misc_data_patching_start(vtbl_list); 1357 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); 1358 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); 1359 mapinfo->set_core_spaces_size(core_spaces_size); 1360 1361 for (int pass=1; pass<=2; pass++) { 1362 if (pass == 1) { 1363 // The first pass doesn't actually write the data to disk. All it 1364 // does is to update the fields in the mapinfo->_header. 1365 } else { 1366 // After the first pass, the contents of mapinfo->_header are finalized, 1367 // so we can compute the header's CRC, and write the contents of the header 1368 // and the regions into disk. 1369 mapinfo->open_for_write(); 1370 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1371 } 1372 mapinfo->write_header(); 1373 1374 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1375 // so it needs to be read/write. 1376 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1377 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1378 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1379 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1380 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); 1381 1382 _total_string_region_size = mapinfo->write_archive_heap_regions( 1383 _string_regions, 1384 MetaspaceShared::first_string, 1385 MetaspaceShared::max_strings); 1386 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1387 _open_archive_heap_regions, 1388 MetaspaceShared::first_open_archive_heap_region, 1389 MetaspaceShared::max_open_archive_heap_region); 1390 } 1391 1392 mapinfo->close(); 1393 1394 // Restore the vtable in case we invoke any virtual methods. 1395 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1396 1397 print_region_stats(); 1398 1399 if (log_is_enabled(Info, cds)) { 1400 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1401 int(_mc_region.used()), int(_md_region.used())); 1402 } 1403 1404 if (PrintSystemDictionaryAtExit) { 1405 SystemDictionary::print(); 1406 } 1407 // There may be other pending VM operations that operate on the InstanceKlasses, 1408 // which will fail because InstanceKlasses::remove_unshareable_info() 1409 // has been called. Forget these operations and exit the VM directly. 1410 vm_direct_exit(0); 1411 } 1412 1413 void VM_PopulateDumpSharedSpace::print_region_stats() { 1414 // Print statistics of all the regions 1415 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1416 _mc_region.reserved() + _md_region.reserved() + 1417 _od_region.reserved() + 1418 _total_string_region_size + 1419 _total_open_archive_region_size; 1420 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1421 _mc_region.used() + _md_region.used() + 1422 _od_region.used() + 1423 _total_string_region_size + 1424 _total_open_archive_region_size; 1425 const double total_u_perc = percent_of(total_bytes, total_reserved); 1426 1427 _mc_region.print(total_reserved); 1428 _rw_region.print(total_reserved); 1429 _ro_region.print(total_reserved); 1430 _md_region.print(total_reserved); 1431 _od_region.print(total_reserved); 1432 print_heap_region_stats(_string_regions, "st", total_reserved); 1433 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1434 1435 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1436 total_bytes, total_reserved, total_u_perc); 1437 } 1438 1439 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1440 const char *name, const size_t total_size) { 1441 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1442 for (int i = 0; i < arr_len; i++) { 1443 char* start = (char*)heap_mem->at(i).start(); 1444 size_t size = heap_mem->at(i).byte_size(); 1445 char* top = start + size; 1446 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1447 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1448 1449 } 1450 } 1451 1452 // Update a Java object to point its Klass* to the new location after 1453 // shared archive has been compacted. 1454 void MetaspaceShared::relocate_klass_ptr(oop o) { 1455 assert(DumpSharedSpaces, "sanity"); 1456 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1457 o->set_klass(k); 1458 } 1459 1460 class LinkSharedClassesClosure : public KlassClosure { 1461 Thread* THREAD; 1462 bool _made_progress; 1463 public: 1464 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1465 1466 void reset() { _made_progress = false; } 1467 bool made_progress() const { return _made_progress; } 1468 1469 void do_klass(Klass* k) { 1470 if (k->is_instance_klass()) { 1471 InstanceKlass* ik = InstanceKlass::cast(k); 1472 // Link the class to cause the bytecodes to be rewritten and the 1473 // cpcache to be created. Class verification is done according 1474 // to -Xverify setting. 1475 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1476 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1477 1478 ik->constants()->resolve_class_constants(THREAD); 1479 } 1480 } 1481 }; 1482 1483 class CheckSharedClassesClosure : public KlassClosure { 1484 bool _made_progress; 1485 public: 1486 CheckSharedClassesClosure() : _made_progress(false) {} 1487 1488 void reset() { _made_progress = false; } 1489 bool made_progress() const { return _made_progress; } 1490 void do_klass(Klass* k) { 1491 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1492 _made_progress = true; 1493 } 1494 } 1495 }; 1496 1497 void MetaspaceShared::check_shared_class_loader_type(Klass* k) { 1498 if (k->is_instance_klass()) { 1499 InstanceKlass* ik = InstanceKlass::cast(k); 1500 u2 loader_type = ik->loader_type(); 1501 ResourceMark rm; 1502 guarantee(loader_type != 0, 1503 "Class loader type is not set for this class %s", ik->name()->as_C_string()); 1504 } 1505 } 1506 1507 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1508 // We need to iterate because verification may cause additional classes 1509 // to be loaded. 1510 LinkSharedClassesClosure link_closure(THREAD); 1511 do { 1512 link_closure.reset(); 1513 ClassLoaderDataGraph::loaded_classes_do(&link_closure); 1514 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1515 } while (link_closure.made_progress()); 1516 1517 if (_has_error_classes) { 1518 // Mark all classes whose super class or interfaces failed verification. 1519 CheckSharedClassesClosure check_closure; 1520 do { 1521 // Not completely sure if we need to do this iteratively. Anyway, 1522 // we should come here only if there are unverifiable classes, which 1523 // shouldn't happen in normal cases. So better safe than sorry. 1524 check_closure.reset(); 1525 ClassLoaderDataGraph::loaded_classes_do(&check_closure); 1526 } while (check_closure.made_progress()); 1527 1528 if (IgnoreUnverifiableClassesDuringDump) { 1529 // This is useful when running JCK or SQE tests. You should not 1530 // enable this when running real apps. 1531 SystemDictionary::remove_classes_in_error_state(); 1532 } else { 1533 tty->print_cr("Please remove the unverifiable classes from your class list and try again"); 1534 exit(1); 1535 } 1536 } 1537 } 1538 1539 void MetaspaceShared::prepare_for_dumping() { 1540 Arguments::check_unsupported_dumping_properties(); 1541 ClassLoader::initialize_shared_path(); 1542 FileMapInfo::allocate_classpath_entry_table(); 1543 } 1544 1545 // Preload classes from a list, populate the shared spaces and dump to a 1546 // file. 1547 void MetaspaceShared::preload_and_dump(TRAPS) { 1548 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1549 ResourceMark rm; 1550 char class_list_path_str[JVM_MAXPATHLEN]; 1551 // Preload classes to be shared. 1552 // Should use some os:: method rather than fopen() here. aB. 1553 const char* class_list_path; 1554 if (SharedClassListFile == NULL) { 1555 // Construct the path to the class list (in jre/lib) 1556 // Walk up two directories from the location of the VM and 1557 // optionally tack on "lib" (depending on platform) 1558 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1559 for (int i = 0; i < 3; i++) { 1560 char *end = strrchr(class_list_path_str, *os::file_separator()); 1561 if (end != NULL) *end = '\0'; 1562 } 1563 int class_list_path_len = (int)strlen(class_list_path_str); 1564 if (class_list_path_len >= 3) { 1565 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1566 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1567 jio_snprintf(class_list_path_str + class_list_path_len, 1568 sizeof(class_list_path_str) - class_list_path_len, 1569 "%slib", os::file_separator()); 1570 class_list_path_len += 4; 1571 } 1572 } 1573 } 1574 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1575 jio_snprintf(class_list_path_str + class_list_path_len, 1576 sizeof(class_list_path_str) - class_list_path_len, 1577 "%sclasslist", os::file_separator()); 1578 } 1579 class_list_path = class_list_path_str; 1580 } else { 1581 class_list_path = SharedClassListFile; 1582 } 1583 1584 tty->print_cr("Loading classes to share ..."); 1585 _has_error_classes = false; 1586 int class_count = preload_classes(class_list_path, THREAD); 1587 if (ExtraSharedClassListFile) { 1588 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1589 } 1590 tty->print_cr("Loading classes to share: done."); 1591 1592 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1593 1594 // Rewrite and link classes 1595 tty->print_cr("Rewriting and linking classes ..."); 1596 1597 // Link any classes which got missed. This would happen if we have loaded classes that 1598 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1599 // fails verification, all other interfaces that were not specified in the classlist but 1600 // are implemented by K are not verified. 1601 link_and_cleanup_shared_classes(CATCH); 1602 tty->print_cr("Rewriting and linking classes: done"); 1603 1604 SystemDictionary::clear_invoke_method_table(); 1605 1606 VM_PopulateDumpSharedSpace op; 1607 VMThread::execute(&op); 1608 } 1609 } 1610 1611 1612 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1613 ClassListParser parser(class_list_path); 1614 int class_count = 0; 1615 1616 while (parser.parse_one_line()) { 1617 Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD); 1618 if (HAS_PENDING_EXCEPTION) { 1619 if (klass == NULL && 1620 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1621 // print a warning only when the pending exception is class not found 1622 tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name()); 1623 } 1624 CLEAR_PENDING_EXCEPTION; 1625 } 1626 if (klass != NULL) { 1627 if (log_is_enabled(Trace, cds)) { 1628 ResourceMark rm; 1629 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1630 } 1631 1632 if (klass->is_instance_klass()) { 1633 InstanceKlass* ik = InstanceKlass::cast(klass); 1634 1635 // Link the class to cause the bytecodes to be rewritten and the 1636 // cpcache to be created. The linking is done as soon as classes 1637 // are loaded in order that the related data structures (klass and 1638 // cpCache) are located together. 1639 try_link_class(ik, THREAD); 1640 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1641 } 1642 1643 class_count++; 1644 } 1645 } 1646 1647 return class_count; 1648 } 1649 1650 // Returns true if the class's status has changed 1651 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1652 assert(DumpSharedSpaces, "should only be called during dumping"); 1653 if (ik->init_state() < InstanceKlass::linked) { 1654 bool saved = BytecodeVerificationLocal; 1655 if (!(ik->is_shared_boot_class())) { 1656 // The verification decision is based on BytecodeVerificationRemote 1657 // for non-system classes. Since we are using the NULL classloader 1658 // to load non-system classes during dumping, we need to temporarily 1659 // change BytecodeVerificationLocal to be the same as 1660 // BytecodeVerificationRemote. Note this can cause the parent system 1661 // classes also being verified. The extra overhead is acceptable during 1662 // dumping. 1663 BytecodeVerificationLocal = BytecodeVerificationRemote; 1664 } 1665 ik->link_class(THREAD); 1666 if (HAS_PENDING_EXCEPTION) { 1667 ResourceMark rm; 1668 tty->print_cr("Preload Warning: Verification failed for %s", 1669 ik->external_name()); 1670 CLEAR_PENDING_EXCEPTION; 1671 ik->set_in_error_state(); 1672 _has_error_classes = true; 1673 } 1674 BytecodeVerificationLocal = saved; 1675 return true; 1676 } else { 1677 return false; 1678 } 1679 } 1680 1681 #if INCLUDE_CDS_JAVA_HEAP 1682 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1683 if (!MetaspaceShared::is_heap_object_archiving_allowed()) { 1684 if (log_is_enabled(Info, cds)) { 1685 log_info(cds)( 1686 "Archived java heap is not supported as UseG1GC, " 1687 "UseCompressedOops and UseCompressedClassPointers are required." 1688 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 1689 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 1690 BOOL_TO_STR(UseCompressedClassPointers)); 1691 } 1692 return; 1693 } 1694 1695 { 1696 NoSafepointVerifier nsv; 1697 1698 // Cache for recording where the archived objects are copied to 1699 MetaspaceShared::create_archive_object_cache(); 1700 1701 tty->print_cr("Dumping String objects to closed archive heap region ..."); 1702 NOT_PRODUCT(StringTable::verify()); 1703 // The string space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details. 1704 _string_regions = new GrowableArray<MemRegion>(2); 1705 StringTable::write_to_archive(_string_regions); 1706 1707 tty->print_cr("Dumping objects to open archive heap region ..."); 1708 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1709 MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions); 1710 1711 MetaspaceShared::destroy_archive_object_cache(); 1712 } 1713 1714 G1HeapVerifier::verify_archive_regions(); 1715 } 1716 1717 void MetaspaceShared::dump_open_archive_heap_objects( 1718 GrowableArray<MemRegion> * open_archive) { 1719 assert(UseG1GC, "Only support G1 GC"); 1720 assert(UseCompressedOops && UseCompressedClassPointers, 1721 "Only support UseCompressedOops and UseCompressedClassPointers enabled"); 1722 1723 Thread* THREAD = Thread::current(); 1724 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 1725 1726 MetaspaceShared::archive_resolved_constants(THREAD); 1727 1728 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 1729 os::vm_allocation_granularity()); 1730 } 1731 1732 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL; 1733 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) { 1734 assert(DumpSharedSpaces, "dump-time only"); 1735 1736 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache(); 1737 oop* p = cache->get(obj); 1738 if (p != NULL) { 1739 // already archived 1740 return *p; 1741 } 1742 1743 int len = obj->size(); 1744 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 1745 return NULL; 1746 } 1747 1748 int hash = obj->identity_hash(); 1749 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 1750 if (archived_oop != NULL) { 1751 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len); 1752 relocate_klass_ptr(archived_oop); 1753 cache->put(obj, archived_oop); 1754 } 1755 return archived_oop; 1756 } 1757 1758 void MetaspaceShared::archive_resolved_constants(Thread* THREAD) { 1759 int i; 1760 for (i = 0; i < _global_klass_objects->length(); i++) { 1761 Klass* k = _global_klass_objects->at(i); 1762 if (k->is_instance_klass()) { 1763 InstanceKlass* ik = InstanceKlass::cast(k); 1764 ik->constants()->archive_resolved_references(THREAD); 1765 } 1766 } 1767 } 1768 1769 void MetaspaceShared::fixup_mapped_heap_regions() { 1770 FileMapInfo *mapinfo = FileMapInfo::current_info(); 1771 mapinfo->fixup_mapped_heap_regions(); 1772 } 1773 #endif // INCLUDE_CDS_JAVA_HEAP 1774 1775 // Closure for serializing initialization data in from a data area 1776 // (ptr_array) read from the shared file. 1777 1778 class ReadClosure : public SerializeClosure { 1779 private: 1780 intptr_t** _ptr_array; 1781 1782 inline intptr_t nextPtr() { 1783 return *(*_ptr_array)++; 1784 } 1785 1786 public: 1787 ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; } 1788 1789 void do_ptr(void** p) { 1790 assert(*p == NULL, "initializing previous initialized pointer."); 1791 intptr_t obj = nextPtr(); 1792 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1793 "hit tag while initializing ptrs."); 1794 *p = (void*)obj; 1795 } 1796 1797 void do_u4(u4* p) { 1798 intptr_t obj = nextPtr(); 1799 *p = (u4)(uintx(obj)); 1800 } 1801 1802 void do_tag(int tag) { 1803 int old_tag; 1804 old_tag = (int)(intptr_t)nextPtr(); 1805 // do_int(&old_tag); 1806 assert(tag == old_tag, "old tag doesn't match"); 1807 FileMapInfo::assert_mark(tag == old_tag); 1808 } 1809 1810 void do_region(u_char* start, size_t size) { 1811 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1812 assert(size % sizeof(intptr_t) == 0, "bad size"); 1813 do_tag((int)size); 1814 while (size > 0) { 1815 *(intptr_t*)start = nextPtr(); 1816 start += sizeof(intptr_t); 1817 size -= sizeof(intptr_t); 1818 } 1819 } 1820 1821 bool reading() const { return true; } 1822 }; 1823 1824 // Return true if given address is in the misc data region 1825 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 1826 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 1827 } 1828 1829 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 1830 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 1831 return true; 1832 } 1833 return false; 1834 } 1835 1836 void MetaspaceShared::print_shared_spaces() { 1837 if (UseSharedSpaces) { 1838 FileMapInfo::current_info()->print_shared_spaces(); 1839 } 1840 } 1841 1842 1843 // Map shared spaces at requested addresses and return if succeeded. 1844 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { 1845 size_t image_alignment = mapinfo->alignment(); 1846 1847 #ifndef _WINDOWS 1848 // Map in the shared memory and then map the regions on top of it. 1849 // On Windows, don't map the memory here because it will cause the 1850 // mappings of the regions to fail. 1851 ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); 1852 if (!shared_rs.is_reserved()) return false; 1853 #endif 1854 1855 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 1856 1857 char* ro_base = NULL; char* ro_top; 1858 char* rw_base = NULL; char* rw_top; 1859 char* mc_base = NULL; char* mc_top; 1860 char* md_base = NULL; char* md_top; 1861 char* od_base = NULL; char* od_top; 1862 1863 // Map each shared region 1864 if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL && 1865 (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL && 1866 (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL && 1867 (md_base = mapinfo->map_region(md, &md_top)) != NULL && 1868 (od_base = mapinfo->map_region(od, &od_top)) != NULL && 1869 (image_alignment == (size_t)os::vm_allocation_granularity()) && 1870 mapinfo->validate_classpath_entry_table()) { 1871 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for 1872 // fast checking in MetaspaceShared::is_in_shared_metaspace() and 1873 // MetaspaceObj::is_shared(). 1874 // 1875 // We require that mc->rw->ro->md->od to be laid out consecutively, with no 1876 // gaps between them. That way, we can ensure that the OS won't be able to 1877 // allocate any new memory spaces inside _shared_metaspace_{base,top}, which 1878 // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace(). 1879 assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be"); 1880 assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be"); 1881 assert(mc_top == rw_base, "must be"); 1882 assert(rw_top == ro_base, "must be"); 1883 assert(ro_top == md_base, "must be"); 1884 assert(md_top == od_base, "must be"); 1885 1886 MetaspaceObj::_shared_metaspace_base = (void*)mc_base; 1887 MetaspaceObj::_shared_metaspace_top = (void*)od_top; 1888 return true; 1889 } else { 1890 // If there was a failure in mapping any of the spaces, unmap the ones 1891 // that succeeded 1892 if (ro_base != NULL) mapinfo->unmap_region(ro); 1893 if (rw_base != NULL) mapinfo->unmap_region(rw); 1894 if (mc_base != NULL) mapinfo->unmap_region(mc); 1895 if (md_base != NULL) mapinfo->unmap_region(md); 1896 if (od_base != NULL) mapinfo->unmap_region(od); 1897 #ifndef _WINDOWS 1898 // Release the entire mapped region 1899 shared_rs.release(); 1900 #endif 1901 // If -Xshare:on is specified, print out the error message and exit VM, 1902 // otherwise, set UseSharedSpaces to false and continue. 1903 if (RequireSharedSpaces || PrintSharedArchiveAndExit) { 1904 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); 1905 } else { 1906 FLAG_SET_DEFAULT(UseSharedSpaces, false); 1907 } 1908 return false; 1909 } 1910 } 1911 1912 // Read the miscellaneous data from the shared file, and 1913 // serialize it out to its various destinations. 1914 1915 void MetaspaceShared::initialize_shared_spaces() { 1916 FileMapInfo *mapinfo = FileMapInfo::current_info(); 1917 _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers(); 1918 _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size(); 1919 _core_spaces_size = mapinfo->core_spaces_size(); 1920 char* buffer = mapinfo->misc_data_patching_start(); 1921 clone_cpp_vtables((intptr_t*)buffer); 1922 1923 // The rest of the data is now stored in the RW region 1924 buffer = mapinfo->read_only_tables_start(); 1925 int sharedDictionaryLen = *(intptr_t*)buffer; 1926 buffer += sizeof(intptr_t); 1927 int number_of_entries = *(intptr_t*)buffer; 1928 buffer += sizeof(intptr_t); 1929 SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer, 1930 sharedDictionaryLen, 1931 number_of_entries); 1932 buffer += sharedDictionaryLen; 1933 1934 // The following data are the linked list elements 1935 // (HashtableEntry objects) for the shared dictionary table. 1936 1937 int len = *(intptr_t*)buffer; // skip over shared dictionary entries 1938 buffer += sizeof(intptr_t); 1939 buffer += len; 1940 1941 // Verify various attributes of the archive, plus initialize the 1942 // shared string/symbol tables 1943 intptr_t* array = (intptr_t*)buffer; 1944 ReadClosure rc(&array); 1945 serialize(&rc); 1946 1947 // Initialize the run-time symbol table. 1948 SymbolTable::create_table(); 1949 1950 // Close the mapinfo file 1951 mapinfo->close(); 1952 1953 if (PrintSharedArchiveAndExit) { 1954 if (PrintSharedDictionary) { 1955 tty->print_cr("\nShared classes:\n"); 1956 SystemDictionary::print_shared(tty); 1957 } 1958 if (_archive_loading_failed) { 1959 tty->print_cr("archive is invalid"); 1960 vm_exit(1); 1961 } else { 1962 tty->print_cr("archive is valid"); 1963 vm_exit(0); 1964 } 1965 } 1966 } 1967 1968 // JVM/TI RedefineClasses() support: 1969 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 1970 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1971 1972 if (UseSharedSpaces) { 1973 // remap the shared readonly space to shared readwrite, private 1974 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1975 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 1976 return false; 1977 } 1978 _remapped_readwrite = true; 1979 } 1980 return true; 1981 } 1982 1983 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 1984 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 1985 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 1986 // or so. 1987 _mc_region.print_out_of_space_msg(name, needed_bytes); 1988 _rw_region.print_out_of_space_msg(name, needed_bytes); 1989 _ro_region.print_out_of_space_msg(name, needed_bytes); 1990 _md_region.print_out_of_space_msg(name, needed_bytes); 1991 _od_region.print_out_of_space_msg(name, needed_bytes); 1992 1993 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 1994 "Please reduce the number of shared classes."); 1995 }