1 /* 2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classFileStream.hpp" 28 #include "classfile/classLoaderDataGraph.hpp" 29 #include "classfile/classListParser.hpp" 30 #include "classfile/classLoaderExt.hpp" 31 #include "classfile/dictionary.hpp" 32 #include "classfile/klassFactory.hpp" 33 #include "classfile/loaderConstraints.hpp" 34 #include "classfile/javaClasses.inline.hpp" 35 #include "classfile/placeholders.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/stringTable.hpp" 38 #include "classfile/systemDictionary.hpp" 39 #include "classfile/systemDictionaryShared.hpp" 40 #include "code/codeCache.hpp" 41 #include "gc/shared/softRefPolicy.hpp" 42 #include "interpreter/bytecodeStream.hpp" 43 #include "interpreter/bytecodes.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logMessage.hpp" 46 #include "memory/archiveUtils.inline.hpp" 47 #include "memory/dynamicArchive.hpp" 48 #include "memory/filemap.hpp" 49 #include "memory/heapShared.inline.hpp" 50 #include "memory/metaspace.hpp" 51 #include "memory/metaspaceClosure.hpp" 52 #include "memory/metaspaceShared.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "memory/universe.hpp" 55 #include "oops/compressedOops.inline.hpp" 56 #include "oops/instanceClassLoaderKlass.hpp" 57 #include "oops/instanceMirrorKlass.hpp" 58 #include "oops/instanceRefKlass.hpp" 59 #include "oops/methodData.hpp" 60 #include "oops/objArrayKlass.hpp" 61 #include "oops/objArrayOop.hpp" 62 #include "oops/oop.inline.hpp" 63 #include "oops/typeArrayKlass.hpp" 64 #include "oops/typeArrayOop.inline.hpp" 65 #include "prims/jvmtiRedefineClasses.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/os.hpp" 69 #include "runtime/safepointVerifiers.hpp" 70 #include "runtime/signature.hpp" 71 #include "runtime/timerTrace.hpp" 72 #include "runtime/vmThread.hpp" 73 #include "runtime/vmOperations.hpp" 74 #include "utilities/align.hpp" 75 #include "utilities/bitMap.inline.hpp" 76 #include "utilities/ostream.hpp" 77 #include "utilities/defaultStream.hpp" 78 #include "utilities/hashtable.inline.hpp" 79 #if INCLUDE_G1GC 80 #include "gc/g1/g1CollectedHeap.hpp" 81 #endif 82 83 ReservedSpace MetaspaceShared::_shared_rs; 84 VirtualSpace MetaspaceShared::_shared_vs; 85 ReservedSpace MetaspaceShared::_symbol_rs; 86 VirtualSpace MetaspaceShared::_symbol_vs; 87 MetaspaceSharedStats MetaspaceShared::_stats; 88 bool MetaspaceShared::_has_error_classes; 89 bool MetaspaceShared::_archive_loading_failed = false; 90 bool MetaspaceShared::_remapped_readwrite = false; 91 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 92 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 93 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 94 intx MetaspaceShared::_relocation_delta; 95 char* MetaspaceShared::_requested_base_address; 96 bool MetaspaceShared::_use_optimized_module_handling = true; 97 98 // The CDS archive is divided into the following regions: 99 // mc - misc code (the method entry trampolines, c++ vtables) 100 // rw - read-write metadata 101 // ro - read-only metadata and read-only tables 102 // 103 // ca0 - closed archive heap space #0 104 // ca1 - closed archive heap space #1 (may be empty) 105 // oa0 - open archive heap space #0 106 // oa1 - open archive heap space #1 (may be empty) 107 // 108 // The mc, rw, and ro regions are linearly allocated, starting from 109 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions 110 // are page-aligned, and there's no gap between any consecutive regions. 111 // 112 // These 3 regions are populated in the following steps: 113 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 114 // temporarily allocated outside of the shared regions. Only the method entry 115 // trampolines are written into the mc region. 116 // [2] C++ vtables are copied into the mc region. 117 // [3] ArchiveCompactor copies RW metadata into the rw region. 118 // [4] ArchiveCompactor copies RO metadata into the ro region. 119 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 120 // are copied into the ro region as read-only tables. 121 // 122 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 123 // Their layout is independent of the other 4 regions. 124 125 char* DumpRegion::expand_top_to(char* newtop) { 126 assert(is_allocatable(), "must be initialized and not packed"); 127 assert(newtop >= _top, "must not grow backwards"); 128 if (newtop > _end) { 129 MetaspaceShared::report_out_of_space(_name, newtop - _top); 130 ShouldNotReachHere(); 131 } 132 133 if (_rs == MetaspaceShared::shared_rs()) { 134 uintx delta; 135 if (DynamicDumpSharedSpaces) { 136 delta = DynamicArchive::object_delta_uintx(newtop); 137 } else { 138 delta = MetaspaceShared::object_delta_uintx(newtop); 139 } 140 if (delta > MAX_SHARED_DELTA) { 141 // This is just a sanity check and should not appear in any real world usage. This 142 // happens only if you allocate more than 2GB of shared objects and would require 143 // millions of shared classes. 144 vm_exit_during_initialization("Out of memory in the CDS archive", 145 "Please reduce the number of shared classes."); 146 } 147 } 148 149 MetaspaceShared::commit_to(_rs, _vs, newtop); 150 _top = newtop; 151 return _top; 152 } 153 154 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 155 char* p = (char*)align_up(_top, alignment); 156 char* newtop = p + align_up(num_bytes, alignment); 157 expand_top_to(newtop); 158 memset(p, 0, newtop - p); 159 return p; 160 } 161 162 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 163 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 164 intptr_t *p = (intptr_t*)_top; 165 char* newtop = _top + sizeof(intptr_t); 166 expand_top_to(newtop); 167 *p = n; 168 if (need_to_mark) { 169 ArchivePtrMarker::mark_pointer(p); 170 } 171 } 172 173 void DumpRegion::print(size_t total_bytes) const { 174 log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 175 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 176 p2i(_base + MetaspaceShared::final_delta())); 177 } 178 179 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 180 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 181 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 182 if (strcmp(_name, failing_region) == 0) { 183 log_error(cds)(" required = %d", int(needed_bytes)); 184 } 185 } 186 187 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) { 188 _rs = rs; 189 _vs = vs; 190 // Start with 0 committed bytes. The memory will be committed as needed by 191 // MetaspaceShared::commit_to(). 192 if (!_vs->initialize(*_rs, 0)) { 193 fatal("Unable to allocate memory for shared space"); 194 } 195 _base = _top = _rs->base(); 196 _end = _rs->end(); 197 } 198 199 void DumpRegion::pack(DumpRegion* next) { 200 assert(!is_packed(), "sanity"); 201 _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment()); 202 _is_packed = true; 203 if (next != NULL) { 204 next->_rs = _rs; 205 next->_vs = _vs; 206 next->_base = next->_top = this->_end; 207 next->_end = _rs->end(); 208 } 209 } 210 211 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols"); 212 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 213 214 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) { 215 first_space->init(&_shared_rs, &_shared_vs); 216 } 217 218 DumpRegion* MetaspaceShared::misc_code_dump_space() { 219 return &_mc_region; 220 } 221 222 DumpRegion* MetaspaceShared::read_write_dump_space() { 223 return &_rw_region; 224 } 225 226 DumpRegion* MetaspaceShared::read_only_dump_space() { 227 return &_ro_region; 228 } 229 230 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 231 ReservedSpace* rs) { 232 current->pack(next); 233 } 234 235 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) { 236 return _symbol_region.allocate(num_bytes); 237 } 238 239 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 240 return _mc_region.allocate(num_bytes); 241 } 242 243 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 244 return _ro_region.allocate(num_bytes); 245 } 246 247 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); } 248 249 static bool shared_base_valid(char* shared_base) { 250 #ifdef _LP64 251 return CompressedKlassPointers::is_valid_base((address)shared_base); 252 #else 253 return true; 254 #endif 255 } 256 257 static bool shared_base_too_high(char* shared_base, size_t cds_total) { 258 if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) { 259 // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so 260 // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around. 261 return true; 262 } 263 if (max_uintx - uintx(shared_base) < uintx(cds_total)) { 264 // The end of the archive will wrap around 265 return true; 266 } 267 268 return false; 269 } 270 271 static char* compute_shared_base(size_t cds_total) { 272 char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 273 const char* err = NULL; 274 if (shared_base_too_high(shared_base, cds_total)) { 275 err = "too high"; 276 } else if (!shared_base_valid(shared_base)) { 277 err = "invalid for this platform"; 278 } 279 if (err) { 280 log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT, 281 p2i((void*)SharedBaseAddress), err, 282 p2i((void*)Arguments::default_SharedBaseAddress())); 283 SharedBaseAddress = Arguments::default_SharedBaseAddress(); 284 shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 285 } 286 assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity"); 287 return shared_base; 288 } 289 290 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 291 assert(DumpSharedSpaces, "should be called for dump time only"); 292 293 const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment(); 294 295 #ifdef _LP64 296 // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1, 297 // will use that to house both the archives and the ccs. See below for 298 // details. 299 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 300 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 301 #else 302 // We don't support archives larger than 256MB on 32-bit due to limited 303 // virtual address space. 304 size_t cds_total = align_down(256*M, reserve_alignment); 305 #endif 306 307 char* shared_base = compute_shared_base(cds_total); 308 _requested_base_address = shared_base; 309 310 // Whether to use SharedBaseAddress as attach address. 311 bool use_requested_base = true; 312 313 if (shared_base == NULL) { 314 use_requested_base = false; 315 } 316 317 if (ArchiveRelocationMode == 1) { 318 log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); 319 use_requested_base = false; 320 } 321 322 // First try to reserve the space at the specified SharedBaseAddress. 323 assert(!_shared_rs.is_reserved(), "must be"); 324 if (use_requested_base) { 325 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 326 false /* large */, (char*)shared_base); 327 if (_shared_rs.is_reserved()) { 328 assert(_shared_rs.base() == shared_base, "should match"); 329 } else { 330 log_info(cds)("dumptime space reservation: failed to map at " 331 "SharedBaseAddress " PTR_FORMAT, p2i(shared_base)); 332 } 333 } 334 if (!_shared_rs.is_reserved()) { 335 // Get a reserved space anywhere if attaching at the SharedBaseAddress 336 // fails: 337 if (UseCompressedClassPointers) { 338 // If we need to reserve class space as well, let the platform handle 339 // the reservation. 340 LP64_ONLY(_shared_rs = 341 Metaspace::reserve_address_space_for_compressed_classes(cds_total);) 342 NOT_LP64(ShouldNotReachHere();) 343 } else { 344 // anywhere is fine. 345 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 346 false /* large */, (char*)NULL); 347 } 348 } 349 350 if (!_shared_rs.is_reserved()) { 351 vm_exit_during_initialization("Unable to reserve memory for shared space", 352 err_msg(SIZE_FORMAT " bytes.", cds_total)); 353 } 354 355 #ifdef _LP64 356 357 if (UseCompressedClassPointers) { 358 359 assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity"); 360 361 // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space 362 // must be allocated near the cds such as that the compressed Klass pointer 363 // encoding can be used to en/decode pointers from both cds and ccs. Since 364 // Metaspace cannot do this (it knows nothing about cds), we do it for 365 // Metaspace here and pass it the space to use for ccs. 366 // 367 // We do this by reserving space for the ccs behind the archives. Note 368 // however that ccs follows a different alignment 369 // (Metaspace::reserve_alignment), so there may be a gap between ccs and 370 // cds. 371 // We use a similar layout at runtime, see reserve_address_space_for_archives(). 372 // 373 // +-- SharedBaseAddress (default = 0x800000000) 374 // v 375 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 376 // | Heap | Archive | | MC | RW | RO | [gap] | class space | 377 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 378 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 379 // 380 // Note: ccs must follow the archives, and the archives must start at the 381 // encoding base. However, the exact placement of ccs does not matter as 382 // long as it it resides in the encoding range of CompressedKlassPointers 383 // and comes after the archive. 384 // 385 // We do this by splitting up the allocated 4G into 3G of archive space, 386 // followed by 1G for the ccs: 387 // + The upper 1 GB is used as the "temporary compressed class space" 388 // -- preload_classes() will store Klasses into this space. 389 // + The lower 3 GB is used for the archive -- when preload_classes() 390 // is done, ArchiveCompactor will copy the class metadata into this 391 // space, first the RW parts, then the RO parts. 392 393 // Starting address of ccs must be aligned to Metaspace::reserve_alignment()... 394 size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment()); 395 address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment()); 396 size_t archive_size = class_space_start - (address)_shared_rs.base(); 397 398 ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size); 399 _shared_rs = _shared_rs.first_part(archive_size); 400 401 // ... as does the size of ccs. 402 tmp_class_space = tmp_class_space.first_part(class_space_size); 403 CompressedClassSpaceSize = class_space_size; 404 405 // Let Metaspace initialize ccs 406 Metaspace::initialize_class_space(tmp_class_space); 407 408 // and set up CompressedKlassPointers encoding. 409 CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total); 410 411 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 412 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 413 414 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 415 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 416 417 assert(_shared_rs.end() == tmp_class_space.base() && 418 is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) && 419 is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) && 420 is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity"); 421 } 422 423 #endif 424 425 init_shared_dump_space(&_mc_region); 426 SharedBaseAddress = (size_t)_shared_rs.base(); 427 log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 428 _shared_rs.size(), p2i(_shared_rs.base())); 429 430 // We don't want any valid object to be at the very bottom of the archive. 431 // See ArchivePtrMarker::mark_pointer(). 432 MetaspaceShared::misc_code_space_alloc(16); 433 434 size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M); 435 _symbol_rs = ReservedSpace(symbol_rs_size); 436 if (!_symbol_rs.is_reserved()) { 437 vm_exit_during_initialization("Unable to reserve memory for symbols", 438 err_msg(SIZE_FORMAT " bytes.", symbol_rs_size)); 439 } 440 _symbol_region.init(&_symbol_rs, &_symbol_vs); 441 } 442 443 // Called by universe_post_init() 444 void MetaspaceShared::post_initialize(TRAPS) { 445 if (UseSharedSpaces) { 446 int size = FileMapInfo::get_number_of_shared_paths(); 447 if (size > 0) { 448 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 449 if (!DynamicDumpSharedSpaces) { 450 FileMapInfo* info; 451 if (FileMapInfo::dynamic_info() == NULL) { 452 info = FileMapInfo::current_info(); 453 } else { 454 info = FileMapInfo::dynamic_info(); 455 } 456 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 457 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 458 } 459 } 460 } 461 } 462 463 static GrowableArrayCHeap<Handle, mtClassShared>* _extra_interned_strings = NULL; 464 465 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 466 _extra_interned_strings = new GrowableArrayCHeap<Handle, mtClassShared>(10000); 467 468 HashtableTextDump reader(filename); 469 reader.check_version("VERSION: 1.0"); 470 471 while (reader.remain() > 0) { 472 int utf8_length; 473 int prefix_type = reader.scan_prefix(&utf8_length); 474 ResourceMark rm(THREAD); 475 if (utf8_length == 0x7fffffff) { 476 // buf_len will overflown 32-bit value. 477 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 478 } 479 int buf_len = utf8_length+1; 480 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 481 reader.get_utf8(utf8_buffer, utf8_length); 482 utf8_buffer[utf8_length] = '\0'; 483 484 if (prefix_type == HashtableTextDump::SymbolPrefix) { 485 SymbolTable::new_permanent_symbol(utf8_buffer); 486 } else{ 487 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 488 oop s = StringTable::intern(utf8_buffer, THREAD); 489 490 if (HAS_PENDING_EXCEPTION) { 491 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 492 reader.last_line_no(), utf8_length); 493 CLEAR_PENDING_EXCEPTION; 494 } else { 495 #if INCLUDE_G1GC 496 if (UseG1GC) { 497 typeArrayOop body = java_lang_String::value(s); 498 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 499 if (hr->is_humongous()) { 500 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 501 // to maximize free heap space and minimize fragmentation. 502 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 503 reader.last_line_no(), utf8_length); 504 continue; 505 } 506 } 507 #endif 508 // Interned strings are GC'ed if there are no references to it, so let's 509 // add a reference to keep this string alive. 510 assert(s != NULL, "must succeed"); 511 Handle h(THREAD, s); 512 _extra_interned_strings->append(h); 513 } 514 } 515 } 516 } 517 518 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) { 519 Arguments::assert_is_dumping_archive(); 520 char* base = rs->base(); 521 size_t need_committed_size = newtop - base; 522 size_t has_committed_size = vs->committed_size(); 523 if (need_committed_size < has_committed_size) { 524 return; 525 } 526 527 size_t min_bytes = need_committed_size - has_committed_size; 528 size_t preferred_bytes = 1 * M; 529 size_t uncommitted = vs->reserved_size() - has_committed_size; 530 531 size_t commit =MAX2(min_bytes, preferred_bytes); 532 commit = MIN2(commit, uncommitted); 533 assert(commit <= uncommitted, "sanity"); 534 535 bool result = vs->expand_by(commit, false); 536 if (rs == &_shared_rs) { 537 ArchivePtrMarker::expand_ptr_end((address*)vs->high()); 538 } 539 540 if (!result) { 541 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 542 need_committed_size)); 543 } 544 545 assert(rs == &_shared_rs || rs == &_symbol_rs, "must be"); 546 const char* which = (rs == &_shared_rs) ? "shared" : "symbol"; 547 log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 548 which, commit, vs->actual_committed_size(), vs->high()); 549 } 550 551 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { 552 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); 553 } 554 555 // Read/write a data stream for restoring/preserving metadata pointers and 556 // miscellaneous data from/to the shared archive file. 557 558 void MetaspaceShared::serialize(SerializeClosure* soc) { 559 int tag = 0; 560 soc->do_tag(--tag); 561 562 // Verify the sizes of various metadata in the system. 563 soc->do_tag(sizeof(Method)); 564 soc->do_tag(sizeof(ConstMethod)); 565 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 566 soc->do_tag(sizeof(ConstantPool)); 567 soc->do_tag(sizeof(ConstantPoolCache)); 568 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 569 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 570 soc->do_tag(sizeof(Symbol)); 571 572 // Dump/restore miscellaneous metadata. 573 JavaClasses::serialize_offsets(soc); 574 Universe::serialize(soc); 575 soc->do_tag(--tag); 576 577 // Dump/restore references to commonly used names and signatures. 578 vmSymbols::serialize(soc); 579 soc->do_tag(--tag); 580 581 // Dump/restore the symbol/string/subgraph_info tables 582 SymbolTable::serialize_shared_table_header(soc); 583 StringTable::serialize_shared_table_header(soc); 584 HeapShared::serialize_subgraph_info_table_header(soc); 585 SystemDictionaryShared::serialize_dictionary_headers(soc); 586 587 InstanceMirrorKlass::serialize_offsets(soc); 588 589 // Dump/restore well known classes (pointers) 590 SystemDictionaryShared::serialize_well_known_klasses(soc); 591 soc->do_tag(--tag); 592 593 serialize_cloned_cpp_vtptrs(soc); 594 soc->do_tag(--tag); 595 596 soc->do_tag(666); 597 } 598 599 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 600 if (DumpSharedSpaces) { 601 if (_i2i_entry_code_buffers == NULL) { 602 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 603 _i2i_entry_code_buffers_size = total_size; 604 } 605 } else if (UseSharedSpaces) { 606 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 607 } else { 608 return NULL; 609 } 610 611 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 612 return _i2i_entry_code_buffers; 613 } 614 615 uintx MetaspaceShared::object_delta_uintx(void* obj) { 616 Arguments::assert_is_dumping_archive(); 617 if (DumpSharedSpaces) { 618 assert(shared_rs()->contains(obj), "must be"); 619 } else { 620 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 621 } 622 address base_address = address(SharedBaseAddress); 623 uintx deltax = address(obj) - base_address; 624 return deltax; 625 } 626 627 // Global object for holding classes that have been loaded. Since this 628 // is run at a safepoint just before exit, this is the entire set of classes. 629 static GrowableArray<Klass*>* _global_klass_objects; 630 631 static int global_klass_compare(Klass** a, Klass **b) { 632 return a[0]->name()->fast_compare(b[0]->name()); 633 } 634 635 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 636 return _global_klass_objects; 637 } 638 639 static void collect_array_classes(Klass* k) { 640 _global_klass_objects->append_if_missing(k); 641 if (k->is_array_klass()) { 642 // Add in the array classes too 643 ArrayKlass* ak = ArrayKlass::cast(k); 644 Klass* h = ak->higher_dimension(); 645 if (h != NULL) { 646 h->array_klasses_do(collect_array_classes); 647 } 648 } 649 } 650 651 class CollectClassesClosure : public KlassClosure { 652 void do_klass(Klass* k) { 653 if (k->is_instance_klass() && 654 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 655 // Don't add to the _global_klass_objects 656 } else { 657 _global_klass_objects->append_if_missing(k); 658 } 659 if (k->is_array_klass()) { 660 // Add in the array classes too 661 ArrayKlass* ak = ArrayKlass::cast(k); 662 Klass* h = ak->higher_dimension(); 663 if (h != NULL) { 664 h->array_klasses_do(collect_array_classes); 665 } 666 } 667 } 668 }; 669 670 // Global object for holding symbols that created during class loading. See SymbolTable::new_symbol 671 static GrowableArray<Symbol*>* _global_symbol_objects = NULL; 672 673 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 674 if (a[0] < b[0]) { 675 return -1; 676 } else if (a[0] == b[0]) { 677 ResourceMark rm; 678 log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string()); 679 return 0; 680 } else { 681 return 1; 682 } 683 } 684 685 void MetaspaceShared::add_symbol(Symbol* sym) { 686 MutexLocker ml(CDSAddSymbol_lock, Mutex::_no_safepoint_check_flag); 687 if (_global_symbol_objects == NULL) { 688 _global_symbol_objects = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Symbol*>(2048, mtSymbol); 689 } 690 _global_symbol_objects->append(sym); 691 } 692 693 GrowableArray<Symbol*>* MetaspaceShared::collected_symbols() { 694 return _global_symbol_objects; 695 } 696 697 static void remove_unshareable_in_classes() { 698 for (int i = 0; i < _global_klass_objects->length(); i++) { 699 Klass* k = _global_klass_objects->at(i); 700 if (!k->is_objArray_klass()) { 701 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 702 // on their array classes. 703 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 704 k->remove_unshareable_info(); 705 } 706 } 707 } 708 709 static void remove_java_mirror_in_classes() { 710 for (int i = 0; i < _global_klass_objects->length(); i++) { 711 Klass* k = _global_klass_objects->at(i); 712 if (!k->is_objArray_klass()) { 713 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 714 // on their array classes. 715 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 716 k->remove_java_mirror(); 717 } 718 } 719 } 720 721 static void rewrite_nofast_bytecode(const methodHandle& method) { 722 BytecodeStream bcs(method); 723 while (!bcs.is_last_bytecode()) { 724 Bytecodes::Code opcode = bcs.next(); 725 switch (opcode) { 726 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 727 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 728 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 729 case Bytecodes::_iload: { 730 if (!bcs.is_wide()) { 731 *bcs.bcp() = Bytecodes::_nofast_iload; 732 } 733 break; 734 } 735 default: break; 736 } 737 } 738 } 739 740 // Walk all methods in the class list to ensure that they won't be modified at 741 // run time. This includes: 742 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 743 // at run time by RewriteBytecodes/RewriteFrequentPairs 744 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 745 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) { 746 for (int i = 0; i < _global_klass_objects->length(); i++) { 747 Klass* k = _global_klass_objects->at(i); 748 if (k->is_instance_klass()) { 749 InstanceKlass* ik = InstanceKlass::cast(k); 750 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik); 751 } 752 } 753 } 754 755 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) { 756 for (int i = 0; i < ik->methods()->length(); i++) { 757 methodHandle m(thread, ik->methods()->at(i)); 758 rewrite_nofast_bytecode(m); 759 Fingerprinter fp(m); 760 // The side effect of this call sets method's fingerprint field. 761 fp.fingerprint(); 762 } 763 } 764 765 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 766 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 767 // 768 // Addresses of the vtables and the methods may be different across JVM runs, 769 // if libjvm.so is dynamically loaded at a different base address. 770 // 771 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 772 // 773 // + at dump time: we redirect the _vptr to point to our own vtables inside 774 // the CDS image 775 // + at run time: we clone the actual contents of the vtables from libjvm.so 776 // into our own tables. 777 778 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 779 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 780 f(ConstantPool) \ 781 f(InstanceKlass) \ 782 f(InstanceClassLoaderKlass) \ 783 f(InstanceMirrorKlass) \ 784 f(InstanceRefKlass) \ 785 f(Method) \ 786 f(ObjArrayKlass) \ 787 f(TypeArrayKlass) 788 789 class CppVtableInfo { 790 intptr_t _vtable_size; 791 intptr_t _cloned_vtable[1]; 792 public: 793 static int num_slots(int vtable_size) { 794 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 795 } 796 int vtable_size() { return int(uintx(_vtable_size)); } 797 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 798 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 799 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 800 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 801 static size_t byte_size(int vtable_size) { 802 CppVtableInfo i; 803 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 804 } 805 }; 806 807 static inline intptr_t* vtable_of(Metadata* m) { 808 return *((intptr_t**)m); 809 } 810 811 template <class T> class CppVtableCloner : public T { 812 static CppVtableInfo* _info; 813 814 static int get_vtable_length(const char* name); 815 816 public: 817 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 818 static intptr_t* allocate(const char* name); 819 820 // Clone the vtable to ... 821 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 822 823 static void zero_vtable_clone() { 824 assert(DumpSharedSpaces, "dump-time only"); 825 _info->zero(); 826 } 827 828 static bool is_valid_shared_object(const T* obj) { 829 intptr_t* vptr = *(intptr_t**)obj; 830 return vptr == _info->cloned_vtable(); 831 } 832 833 static void init_orig_cpp_vtptr(int kind); 834 }; 835 836 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 837 838 template <class T> 839 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 840 assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment"); 841 int n = get_vtable_length(name); 842 _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 843 _info->set_vtable_size(n); 844 845 intptr_t* p = clone_vtable(name, _info); 846 assert((char*)p == _mc_region.top(), "must be"); 847 848 return _info->cloned_vtable(); 849 } 850 851 template <class T> 852 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 853 if (!DumpSharedSpaces) { 854 assert(_info == 0, "_info is initialized only at dump time"); 855 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 856 } 857 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 858 int n = info->vtable_size(); 859 intptr_t* srcvtable = vtable_of(&tmp); 860 intptr_t* dstvtable = info->cloned_vtable(); 861 862 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 863 // safe to do memcpy. 864 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 865 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 866 return dstvtable + n; 867 } 868 869 // To determine the size of the vtable for each type, we use the following 870 // trick by declaring 2 subclasses: 871 // 872 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 873 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 874 // 875 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 876 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 877 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 878 // - Their last entry is different. 879 // 880 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 881 // and find the first entry that's different. 882 // 883 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 884 // esoteric compilers. 885 886 template <class T> class CppVtableTesterB: public T { 887 public: 888 virtual int last_virtual_method() {return 1;} 889 }; 890 891 template <class T> class CppVtableTesterA : public T { 892 public: 893 virtual void* last_virtual_method() { 894 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 895 // compiler/linker won't alias the two functions. 896 return NULL; 897 } 898 }; 899 900 template <class T> 901 int CppVtableCloner<T>::get_vtable_length(const char* name) { 902 CppVtableTesterA<T> a; 903 CppVtableTesterB<T> b; 904 905 intptr_t* avtable = vtable_of(&a); 906 intptr_t* bvtable = vtable_of(&b); 907 908 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 909 int vtable_len = 1; 910 for (; ; vtable_len++) { 911 if (avtable[vtable_len] != bvtable[vtable_len]) { 912 break; 913 } 914 } 915 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 916 917 return vtable_len; 918 } 919 920 #define ALLOC_CPP_VTABLE_CLONE(c) \ 921 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \ 922 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); 923 924 #define CLONE_CPP_VTABLE(c) \ 925 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 926 927 #define ZERO_CPP_VTABLE(c) \ 928 CppVtableCloner<c>::zero_vtable_clone(); 929 930 #define INIT_ORIG_CPP_VTPTRS(c) \ 931 CppVtableCloner<c>::init_orig_cpp_vtptr(c##_Kind); 932 933 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 934 935 enum ClonedVtableKind { 936 // E.g., ConstantPool_Kind == 0, InstanceKlass_Kind == 1, etc. 937 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 938 _num_cloned_vtable_kinds 939 }; 940 941 // This is a map of all the original vtptrs. E.g., for 942 // ConstantPool *cp = new (...) ConstantPool(...) ; // a dynamically allocated constant pool 943 // the following holds true: 944 // _orig_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 945 static intptr_t* _orig_cpp_vtptrs[_num_cloned_vtable_kinds]; 946 static bool _orig_cpp_vtptrs_inited = false; 947 948 template <class T> 949 void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) { 950 assert(kind < _num_cloned_vtable_kinds, "sanity"); 951 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 952 intptr_t* srcvtable = vtable_of(&tmp); 953 _orig_cpp_vtptrs[kind] = srcvtable; 954 } 955 956 // This is the index of all the cloned vtables. E.g., for 957 // ConstantPool* cp = ....; // an archived constant pool 958 // InstanceKlass* ik = ....;// an archived class 959 // the following holds true: 960 // _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 961 // _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0] 962 static intptr_t** _cloned_cpp_vtptrs = NULL; 963 964 void MetaspaceShared::allocate_cloned_cpp_vtptrs() { 965 assert(DumpSharedSpaces, "must"); 966 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 967 _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 968 } 969 970 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 971 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 972 } 973 974 intptr_t* MetaspaceShared::get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj) { 975 if (!_orig_cpp_vtptrs_inited) { 976 CPP_VTABLE_PATCH_TYPES_DO(INIT_ORIG_CPP_VTPTRS); 977 _orig_cpp_vtptrs_inited = true; 978 } 979 980 Arguments::assert_is_dumping_archive(); 981 int kind = -1; 982 switch (msotype) { 983 case MetaspaceObj::SymbolType: 984 case MetaspaceObj::TypeArrayU1Type: 985 case MetaspaceObj::TypeArrayU2Type: 986 case MetaspaceObj::TypeArrayU4Type: 987 case MetaspaceObj::TypeArrayU8Type: 988 case MetaspaceObj::TypeArrayOtherType: 989 case MetaspaceObj::ConstMethodType: 990 case MetaspaceObj::ConstantPoolCacheType: 991 case MetaspaceObj::AnnotationsType: 992 case MetaspaceObj::MethodCountersType: 993 case MetaspaceObj::RecordComponentType: 994 // These have no vtables. 995 break; 996 case MetaspaceObj::MethodDataType: 997 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 998 ShouldNotReachHere(); 999 break; 1000 default: 1001 for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) { 1002 if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind]) { 1003 break; 1004 } 1005 } 1006 if (kind >= _num_cloned_vtable_kinds) { 1007 fatal("Cannot find C++ vtable for " INTPTR_FORMAT " -- you probably added" 1008 " a new subtype of Klass or MetaData without updating CPP_VTABLE_PATCH_TYPES_DO", 1009 p2i(obj)); 1010 } 1011 } 1012 1013 if (kind >= 0) { 1014 assert(kind < _num_cloned_vtable_kinds, "must be"); 1015 return _cloned_cpp_vtptrs[kind]; 1016 } else { 1017 return NULL; 1018 } 1019 } 1020 1021 // This can be called at both dump time and run time: 1022 // - clone the contents of the c++ vtables into the space 1023 // allocated by allocate_cpp_vtable_clones() 1024 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 1025 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 1026 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 1027 } 1028 1029 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 1030 assert(DumpSharedSpaces, "dump-time only"); 1031 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 1032 } 1033 1034 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 1035 char* MetaspaceShared::allocate_cpp_vtable_clones() { 1036 char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables 1037 1038 assert(DumpSharedSpaces, "dump-time only"); 1039 // Layout (each slot is a intptr_t): 1040 // [number of slots in the first vtable = n1] 1041 // [ <n1> slots for the first vtable] 1042 // [number of slots in the first second = n2] 1043 // [ <n2> slots for the second vtable] 1044 // ... 1045 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 1046 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 1047 1048 return cloned_vtables; 1049 } 1050 1051 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 1052 assert(is_in_shared_metaspace(m), "must be"); 1053 return CppVtableCloner<Method>::is_valid_shared_object(m); 1054 } 1055 1056 void WriteClosure::do_oop(oop* o) { 1057 if (*o == NULL) { 1058 _dump_region->append_intptr_t(0); 1059 } else { 1060 assert(HeapShared::is_heap_object_archiving_allowed(), 1061 "Archiving heap object is not allowed"); 1062 _dump_region->append_intptr_t( 1063 (intptr_t)CompressedOops::encode_not_null(*o)); 1064 } 1065 } 1066 1067 void WriteClosure::do_region(u_char* start, size_t size) { 1068 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1069 assert(size % sizeof(intptr_t) == 0, "bad size"); 1070 do_tag((int)size); 1071 while (size > 0) { 1072 _dump_region->append_intptr_t(*(intptr_t*)start, true); 1073 start += sizeof(intptr_t); 1074 size -= sizeof(intptr_t); 1075 } 1076 } 1077 1078 // This is for dumping detailed statistics for the allocations 1079 // in the shared spaces. 1080 class DumpAllocStats : public ResourceObj { 1081 public: 1082 1083 // Here's poor man's enum inheritance 1084 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 1085 METASPACE_OBJ_TYPES_DO(f) \ 1086 f(SymbolHashentry) \ 1087 f(SymbolBucket) \ 1088 f(StringHashentry) \ 1089 f(StringBucket) \ 1090 f(Other) 1091 1092 enum Type { 1093 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 1094 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 1095 _number_of_types 1096 }; 1097 1098 static const char * type_name(Type type) { 1099 switch(type) { 1100 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 1101 default: 1102 ShouldNotReachHere(); 1103 return NULL; 1104 } 1105 } 1106 1107 public: 1108 enum { RO = 0, RW = 1 }; 1109 1110 int _counts[2][_number_of_types]; 1111 int _bytes [2][_number_of_types]; 1112 1113 DumpAllocStats() { 1114 memset(_counts, 0, sizeof(_counts)); 1115 memset(_bytes, 0, sizeof(_bytes)); 1116 }; 1117 1118 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 1119 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 1120 int which = (read_only) ? RO : RW; 1121 _counts[which][type] ++; 1122 _bytes [which][type] += byte_size; 1123 } 1124 1125 void record_other_type(int byte_size, bool read_only) { 1126 int which = (read_only) ? RO : RW; 1127 _bytes [which][OtherType] += byte_size; 1128 } 1129 void print_stats(int ro_all, int rw_all, int mc_all); 1130 }; 1131 1132 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { 1133 // Calculate size of data that was not allocated by Metaspace::allocate() 1134 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1135 1136 // symbols 1137 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1138 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1139 1140 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1141 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1142 1143 // strings 1144 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1145 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1146 1147 _counts[RO][StringBucketType] = stats->string.bucket_count; 1148 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1149 1150 // TODO: count things like dictionary, vtable, etc 1151 _bytes[RW][OtherType] += mc_all; 1152 rw_all += mc_all; // mc is mapped Read/Write 1153 1154 // prevent divide-by-zero 1155 if (ro_all < 1) { 1156 ro_all = 1; 1157 } 1158 if (rw_all < 1) { 1159 rw_all = 1; 1160 } 1161 1162 int all_ro_count = 0; 1163 int all_ro_bytes = 0; 1164 int all_rw_count = 0; 1165 int all_rw_bytes = 0; 1166 1167 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1168 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1169 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1170 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1171 1172 LogMessage(cds) msg; 1173 1174 msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):"); 1175 msg.debug("%s", hdr); 1176 msg.debug("%s", sep); 1177 for (int type = 0; type < int(_number_of_types); type ++) { 1178 const char *name = type_name((Type)type); 1179 int ro_count = _counts[RO][type]; 1180 int ro_bytes = _bytes [RO][type]; 1181 int rw_count = _counts[RW][type]; 1182 int rw_bytes = _bytes [RW][type]; 1183 int count = ro_count + rw_count; 1184 int bytes = ro_bytes + rw_bytes; 1185 1186 double ro_perc = percent_of(ro_bytes, ro_all); 1187 double rw_perc = percent_of(rw_bytes, rw_all); 1188 double perc = percent_of(bytes, ro_all + rw_all); 1189 1190 msg.debug(fmt_stats, name, 1191 ro_count, ro_bytes, ro_perc, 1192 rw_count, rw_bytes, rw_perc, 1193 count, bytes, perc); 1194 1195 all_ro_count += ro_count; 1196 all_ro_bytes += ro_bytes; 1197 all_rw_count += rw_count; 1198 all_rw_bytes += rw_bytes; 1199 } 1200 1201 int all_count = all_ro_count + all_rw_count; 1202 int all_bytes = all_ro_bytes + all_rw_bytes; 1203 1204 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1205 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1206 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1207 1208 msg.debug("%s", sep); 1209 msg.debug(fmt_stats, "Total", 1210 all_ro_count, all_ro_bytes, all_ro_perc, 1211 all_rw_count, all_rw_bytes, all_rw_perc, 1212 all_count, all_bytes, all_perc); 1213 1214 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1215 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1216 1217 #undef fmt_stats 1218 } 1219 1220 // Populate the shared space. 1221 1222 class VM_PopulateDumpSharedSpace: public VM_Operation { 1223 private: 1224 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1225 GrowableArray<MemRegion> *_open_archive_heap_regions; 1226 1227 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1228 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1229 1230 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1231 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1232 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1233 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1234 void dump_symbols(); 1235 char* dump_read_only_tables(); 1236 void print_class_stats(); 1237 void print_region_stats(FileMapInfo* map_info); 1238 void print_bitmap_region_stats(size_t size, size_t total_size); 1239 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1240 const char *name, size_t total_size); 1241 void relocate_to_requested_base_address(CHeapBitMap* ptrmap); 1242 1243 public: 1244 1245 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1246 void doit(); // outline because gdb sucks 1247 bool allow_nested_vm_operations() const { return true; } 1248 }; // class VM_PopulateDumpSharedSpace 1249 1250 // ArchiveCompactor -- 1251 // 1252 // This class is the central piece of shared archive compaction -- all metaspace data are 1253 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1254 // metaspace data into their final location in the shared regions. 1255 1256 class ArchiveCompactor : AllStatic { 1257 static const int INITIAL_TABLE_SIZE = 8087; 1258 static const int MAX_TABLE_SIZE = 1000000; 1259 1260 static DumpAllocStats* _alloc_stats; 1261 1262 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1263 static RelocationTable* _new_loc_table; 1264 1265 public: 1266 static void initialize() { 1267 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1268 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1269 } 1270 static DumpAllocStats* alloc_stats() { 1271 return _alloc_stats; 1272 } 1273 1274 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1275 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1276 // that are allocated in the RO space. 1277 class OtherROAllocMark { 1278 char* _oldtop; 1279 public: 1280 OtherROAllocMark() { 1281 _oldtop = _ro_region.top(); 1282 } 1283 ~OtherROAllocMark() { 1284 char* newtop = _ro_region.top(); 1285 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1286 } 1287 }; 1288 1289 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1290 address obj = ref->obj(); 1291 int bytes = ref->size() * BytesPerWord; 1292 char* p; 1293 size_t alignment = BytesPerWord; 1294 char* oldtop; 1295 char* newtop; 1296 1297 if (read_only) { 1298 oldtop = _ro_region.top(); 1299 p = _ro_region.allocate(bytes, alignment); 1300 newtop = _ro_region.top(); 1301 } else { 1302 oldtop = _rw_region.top(); 1303 if (ref->msotype() == MetaspaceObj::ClassType) { 1304 // Save a pointer immediate in front of an InstanceKlass, so 1305 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1306 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1307 // in systemDictionaryShared.cpp. 1308 Klass* klass = (Klass*)obj; 1309 if (klass->is_instance_klass()) { 1310 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1311 _rw_region.allocate(sizeof(address), BytesPerWord); 1312 } 1313 } 1314 p = _rw_region.allocate(bytes, alignment); 1315 newtop = _rw_region.top(); 1316 } 1317 memcpy(p, obj, bytes); 1318 1319 intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)p); 1320 if (archived_vtable != NULL) { 1321 *(address*)p = (address)archived_vtable; 1322 ArchivePtrMarker::mark_pointer((address*)p); 1323 } 1324 1325 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1326 _new_loc_table->add(obj, (address)p); 1327 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1328 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1329 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1330 } 1331 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1332 } 1333 1334 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1335 address* pp = _new_loc_table->lookup(ref->obj()); 1336 assert(pp != NULL, "must be"); 1337 return *pp; 1338 } 1339 1340 private: 1341 // Makes a shallow copy of visited MetaspaceObj's 1342 class ShallowCopier: public UniqueMetaspaceClosure { 1343 bool _read_only; 1344 public: 1345 ShallowCopier(bool read_only) : _read_only(read_only) {} 1346 1347 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1348 if (read_only == _read_only) { 1349 allocate(ref, read_only); 1350 } 1351 return true; // recurse into ref.obj() 1352 } 1353 }; 1354 1355 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1356 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1357 public: 1358 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1359 address new_loc = get_new_loc(ref); 1360 RefRelocator refer; 1361 ref->metaspace_pointers_do_at(&refer, new_loc); 1362 return true; // recurse into ref.obj() 1363 } 1364 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 1365 assert(type == _method_entry_ref, "only special type allowed for now"); 1366 address obj = ref->obj(); 1367 address new_obj = get_new_loc(ref); 1368 size_t offset = pointer_delta(p, obj, sizeof(u1)); 1369 intptr_t* new_p = (intptr_t*)(new_obj + offset); 1370 assert(*p == *new_p, "must be a copy"); 1371 ArchivePtrMarker::mark_pointer((address*)new_p); 1372 } 1373 }; 1374 1375 // Relocate a reference to point to its shallow copy 1376 class RefRelocator: public MetaspaceClosure { 1377 public: 1378 virtual bool do_ref(Ref* ref, bool read_only) { 1379 if (ref->not_null()) { 1380 ref->update(get_new_loc(ref)); 1381 ArchivePtrMarker::mark_pointer(ref->addr()); 1382 } 1383 return false; // Do not recurse. 1384 } 1385 }; 1386 1387 #ifdef ASSERT 1388 class IsRefInArchiveChecker: public MetaspaceClosure { 1389 public: 1390 virtual bool do_ref(Ref* ref, bool read_only) { 1391 if (ref->not_null()) { 1392 char* obj = (char*)ref->obj(); 1393 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1394 "must be relocated to point to CDS archive"); 1395 } 1396 return false; // Do not recurse. 1397 } 1398 }; 1399 #endif 1400 1401 public: 1402 static void copy_and_compact() { 1403 ResourceMark rm; 1404 1405 log_info(cds)("Scanning all metaspace objects ... "); 1406 { 1407 // allocate and shallow-copy RW objects, immediately following the MC region 1408 log_info(cds)("Allocating RW objects ... "); 1409 _mc_region.pack(&_rw_region); 1410 1411 ResourceMark rm; 1412 ShallowCopier rw_copier(false); 1413 iterate_roots(&rw_copier); 1414 } 1415 { 1416 // allocate and shallow-copy of RO object, immediately following the RW region 1417 log_info(cds)("Allocating RO objects ... "); 1418 _rw_region.pack(&_ro_region); 1419 1420 ResourceMark rm; 1421 ShallowCopier ro_copier(true); 1422 iterate_roots(&ro_copier); 1423 } 1424 { 1425 log_info(cds)("Relocating embedded pointers ... "); 1426 ResourceMark rm; 1427 ShallowCopyEmbeddedRefRelocator emb_reloc; 1428 iterate_roots(&emb_reloc); 1429 } 1430 { 1431 log_info(cds)("Relocating external roots ... "); 1432 ResourceMark rm; 1433 RefRelocator ext_reloc; 1434 iterate_roots(&ext_reloc); 1435 } 1436 { 1437 log_info(cds)("Fixing symbol identity hash ... "); 1438 os::init_random(0x12345678); 1439 GrowableArray<Symbol*>* all_symbols = MetaspaceShared::collected_symbols(); 1440 all_symbols->sort(compare_symbols_by_address); 1441 for (int i = 0; i < all_symbols->length(); i++) { 1442 assert(all_symbols->at(i)->is_permanent(), "archived symbols must be permanent"); 1443 all_symbols->at(i)->update_identity_hash(); 1444 } 1445 } 1446 #ifdef ASSERT 1447 { 1448 log_info(cds)("Verifying external roots ... "); 1449 ResourceMark rm; 1450 IsRefInArchiveChecker checker; 1451 iterate_roots(&checker); 1452 } 1453 #endif 1454 } 1455 1456 // We must relocate the System::_well_known_klasses only after we have copied the 1457 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1458 // old objects which assert that their klass is the original klass. 1459 static void relocate_well_known_klasses() { 1460 { 1461 log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... "); 1462 ResourceMark rm; 1463 RefRelocator ext_reloc; 1464 SystemDictionary::well_known_klasses_do(&ext_reloc); 1465 } 1466 // NOTE: after this point, we shouldn't have any globals that can reach the old 1467 // objects. 1468 1469 // We cannot use any of the objects in the heap anymore (except for the 1470 // shared strings) because their headers no longer point to valid Klasses. 1471 } 1472 1473 static void iterate_roots(MetaspaceClosure* it) { 1474 // To ensure deterministic contents in the archive, we just need to ensure that 1475 // we iterate the MetsapceObjs in a deterministic order. It doesn't matter where 1476 // the MetsapceObjs are located originally, as they are copied sequentially into 1477 // the archive during the iteration. 1478 // 1479 // The only issue here is that the symbol table and the system directories may be 1480 // randomly ordered, so we copy the symbols and klasses into two arrays and sort 1481 // them deterministically. 1482 // 1483 // During -Xshare:dump, the order of Symbol creation is strictly determined by 1484 // the SharedClassListFile (class loading is done in a single thread and the JIT 1485 // is disabled). Also, Symbols are allocated in monotonically increasing addresses 1486 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by 1487 // ascending address order, we ensure that all Symbols are copied into deterministic 1488 // locations in the archive. 1489 GrowableArray<Symbol*>* symbols = _global_symbol_objects; 1490 for (int i = 0; i < symbols->length(); i++) { 1491 it->push(symbols->adr_at(i)); 1492 } 1493 if (_global_klass_objects != NULL) { 1494 // Need to fix up the pointers 1495 for (int i = 0; i < _global_klass_objects->length(); i++) { 1496 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1497 it->push(_global_klass_objects->adr_at(i)); 1498 } 1499 } 1500 FileMapInfo::metaspace_pointers_do(it, false); 1501 SystemDictionaryShared::dumptime_classes_do(it); 1502 Universe::metaspace_pointers_do(it); 1503 SymbolTable::metaspace_pointers_do(it); 1504 vmSymbols::metaspace_pointers_do(it); 1505 1506 it->finish(); 1507 } 1508 1509 static Klass* get_relocated_klass(Klass* orig_klass) { 1510 assert(DumpSharedSpaces, "dump time only"); 1511 address* pp = _new_loc_table->lookup((address)orig_klass); 1512 assert(pp != NULL, "must be"); 1513 Klass* klass = (Klass*)(*pp); 1514 assert(klass->is_klass(), "must be"); 1515 return klass; 1516 } 1517 }; 1518 1519 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1520 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1521 1522 void VM_PopulateDumpSharedSpace::dump_symbols() { 1523 log_info(cds)("Dumping symbol table ..."); 1524 1525 NOT_PRODUCT(SymbolTable::verify()); 1526 SymbolTable::write_to_archive(); 1527 } 1528 1529 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1530 ArchiveCompactor::OtherROAllocMark mark; 1531 1532 log_info(cds)("Removing java_mirror ... "); 1533 if (!HeapShared::is_heap_object_archiving_allowed()) { 1534 Universe::clear_basic_type_mirrors(); 1535 } 1536 remove_java_mirror_in_classes(); 1537 log_info(cds)("done. "); 1538 1539 SystemDictionaryShared::write_to_archive(); 1540 1541 // Write the other data to the output array. 1542 char* start = _ro_region.top(); 1543 WriteClosure wc(&_ro_region); 1544 MetaspaceShared::serialize(&wc); 1545 1546 // Write the bitmaps for patching the archive heap regions 1547 _closed_archive_heap_oopmaps = NULL; 1548 _open_archive_heap_oopmaps = NULL; 1549 dump_archive_heap_oopmaps(); 1550 1551 return start; 1552 } 1553 1554 void VM_PopulateDumpSharedSpace::print_class_stats() { 1555 log_info(cds)("Number of classes %d", _global_klass_objects->length()); 1556 { 1557 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1558 for (int i = 0; i < _global_klass_objects->length(); i++) { 1559 Klass* k = _global_klass_objects->at(i); 1560 if (k->is_instance_klass()) { 1561 num_inst ++; 1562 } else if (k->is_objArray_klass()) { 1563 num_obj_array ++; 1564 } else { 1565 assert(k->is_typeArray_klass(), "sanity"); 1566 num_type_array ++; 1567 } 1568 } 1569 log_info(cds)(" instance classes = %5d", num_inst); 1570 log_info(cds)(" obj array classes = %5d", num_obj_array); 1571 log_info(cds)(" type array classes = %5d", num_type_array); 1572 } 1573 } 1574 1575 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) { 1576 intx addr_delta = MetaspaceShared::final_delta(); 1577 if (addr_delta == 0) { 1578 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top()); 1579 } else { 1580 // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR). 1581 // This means that the current content of the archive is based on a random 1582 // address. Let's relocate all the pointers, so that it can be mapped to 1583 // MetaspaceShared::requested_base_address() without runtime relocation. 1584 // 1585 // Note: both the base and dynamic archive are written with 1586 // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address() 1587 1588 // Patch all pointers that are marked by ptrmap within this region, 1589 // where we have just dumped all the metaspace data. 1590 address patch_base = (address)SharedBaseAddress; 1591 address patch_end = (address)_ro_region.top(); 1592 size_t size = patch_end - patch_base; 1593 1594 // the current value of the pointers to be patched must be within this 1595 // range (i.e., must point to valid metaspace objects) 1596 address valid_old_base = patch_base; 1597 address valid_old_end = patch_end; 1598 1599 // after patching, the pointers must point inside this range 1600 // (the requested location of the archive, as mapped at runtime). 1601 address valid_new_base = (address)MetaspaceShared::requested_base_address(); 1602 address valid_new_end = valid_new_base + size; 1603 1604 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " 1605 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), 1606 p2i(valid_new_base), p2i(valid_new_end)); 1607 1608 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1609 valid_new_base, valid_new_end, addr_delta, ptrmap); 1610 ptrmap->iterate(&patcher); 1611 ArchivePtrMarker::compact(patcher.max_non_null_offset()); 1612 } 1613 } 1614 1615 void VM_PopulateDumpSharedSpace::doit() { 1616 HeapShared::run_full_gc_in_vm_thread(); 1617 CHeapBitMap ptrmap; 1618 MetaspaceShared::initialize_ptr_marker(&ptrmap); 1619 1620 // We should no longer allocate anything from the metaspace, so that: 1621 // 1622 // (1) Metaspace::allocate might trigger GC if we have run out of 1623 // committed metaspace, but we can't GC because we're running 1624 // in the VM thread. 1625 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1626 Metaspace::freeze(); 1627 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1628 1629 Thread* THREAD = VMThread::vm_thread(); 1630 1631 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1632 1633 NOT_PRODUCT(SystemDictionary::verify();) 1634 // The following guarantee is meant to ensure that no loader constraints 1635 // exist yet, since the constraints table is not shared. This becomes 1636 // more important now that we don't re-initialize vtables/itables for 1637 // shared classes at runtime, where constraints were previously created. 1638 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1639 "loader constraints are not saved"); 1640 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1641 "placeholders are not saved"); 1642 1643 // At this point, many classes have been loaded. 1644 // Gather systemDictionary classes in a global array and do everything to 1645 // that so we don't have to walk the SystemDictionary again. 1646 SystemDictionaryShared::check_excluded_classes(); 1647 _global_klass_objects = new GrowableArray<Klass*>(1000); 1648 CollectClassesClosure collect_classes; 1649 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1650 _global_klass_objects->sort(global_klass_compare); 1651 1652 print_class_stats(); 1653 1654 // Ensure the ConstMethods won't be modified at run-time 1655 log_info(cds)("Updating ConstMethods ... "); 1656 rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD); 1657 log_info(cds)("done. "); 1658 1659 // Remove all references outside the metadata 1660 log_info(cds)("Removing unshareable information ... "); 1661 remove_unshareable_in_classes(); 1662 log_info(cds)("done. "); 1663 1664 MetaspaceShared::allocate_cloned_cpp_vtptrs(); 1665 char* cloned_vtables = _mc_region.top(); 1666 MetaspaceShared::allocate_cpp_vtable_clones(); 1667 1668 ArchiveCompactor::initialize(); 1669 ArchiveCompactor::copy_and_compact(); 1670 1671 dump_symbols(); 1672 1673 // Dump supported java heap objects 1674 _closed_archive_heap_regions = NULL; 1675 _open_archive_heap_regions = NULL; 1676 dump_java_heap_objects(); 1677 1678 ArchiveCompactor::relocate_well_known_klasses(); 1679 1680 char* serialized_data = dump_read_only_tables(); 1681 _ro_region.pack(); 1682 1683 // The vtable clones contain addresses of the current process. 1684 // We don't want to write these addresses into the archive. Same for i2i buffer. 1685 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1686 memset(MetaspaceShared::i2i_entry_code_buffers(), 0, 1687 MetaspaceShared::i2i_entry_code_buffers_size()); 1688 1689 // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address() 1690 // without runtime relocation. 1691 relocate_to_requested_base_address(&ptrmap); 1692 1693 // Create and write the archive file that maps the shared spaces. 1694 1695 FileMapInfo* mapinfo = new FileMapInfo(true); 1696 mapinfo->populate_header(os::vm_allocation_granularity()); 1697 mapinfo->set_serialized_data(serialized_data); 1698 mapinfo->set_cloned_vtables(cloned_vtables); 1699 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1700 MetaspaceShared::i2i_entry_code_buffers_size()); 1701 mapinfo->open_for_write(); 1702 MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps); 1703 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1704 _closed_archive_heap_regions, 1705 _closed_archive_heap_oopmaps, 1706 MetaspaceShared::first_closed_archive_heap_region, 1707 MetaspaceShared::max_closed_archive_heap_region); 1708 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1709 _open_archive_heap_regions, 1710 _open_archive_heap_oopmaps, 1711 MetaspaceShared::first_open_archive_heap_region, 1712 MetaspaceShared::max_open_archive_heap_region); 1713 1714 mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address()); 1715 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1716 mapinfo->write_header(); 1717 print_region_stats(mapinfo); 1718 mapinfo->close(); 1719 1720 if (log_is_enabled(Info, cds)) { 1721 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1722 int(_mc_region.used())); 1723 } 1724 1725 if (PrintSystemDictionaryAtExit) { 1726 SystemDictionary::print(); 1727 } 1728 1729 if (AllowArchivingWithJavaAgent) { 1730 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1731 "for testing purposes only and should not be used in a production environment"); 1732 } 1733 1734 // There may be other pending VM operations that operate on the InstanceKlasses, 1735 // which will fail because InstanceKlasses::remove_unshareable_info() 1736 // has been called. Forget these operations and exit the VM directly. 1737 vm_direct_exit(0); 1738 } 1739 1740 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) { 1741 // Print statistics of all the regions 1742 const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used(); 1743 const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned(); 1744 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1745 _mc_region.reserved() + 1746 bitmap_reserved + 1747 _total_closed_archive_region_size + 1748 _total_open_archive_region_size; 1749 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1750 _mc_region.used() + 1751 bitmap_used + 1752 _total_closed_archive_region_size + 1753 _total_open_archive_region_size; 1754 const double total_u_perc = percent_of(total_bytes, total_reserved); 1755 1756 _mc_region.print(total_reserved); 1757 _rw_region.print(total_reserved); 1758 _ro_region.print(total_reserved); 1759 print_bitmap_region_stats(bitmap_used, total_reserved); 1760 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1761 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1762 1763 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1764 total_bytes, total_reserved, total_u_perc); 1765 } 1766 1767 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { 1768 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", 1769 size, size/double(total_size)*100.0, size); 1770 } 1771 1772 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1773 const char *name, size_t total_size) { 1774 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1775 for (int i = 0; i < arr_len; i++) { 1776 char* start = (char*)heap_mem->at(i).start(); 1777 size_t size = heap_mem->at(i).byte_size(); 1778 char* top = start + size; 1779 log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1780 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1781 1782 } 1783 } 1784 1785 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo, 1786 GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps, 1787 GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) { 1788 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with 1789 // MetaspaceShared::n_regions (internal to hotspot). 1790 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); 1791 1792 // mc contains the trampoline code for method entries, which are patched at run time, 1793 // so it needs to be read/write. 1794 write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1795 write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1796 write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1797 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps); 1798 } 1799 1800 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1801 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1802 } 1803 1804 // Update a Java object to point its Klass* to the new location after 1805 // shared archive has been compacted. 1806 void MetaspaceShared::relocate_klass_ptr(oop o) { 1807 assert(DumpSharedSpaces, "sanity"); 1808 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1809 o->set_klass(k); 1810 } 1811 1812 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { 1813 assert(DumpSharedSpaces, "sanity"); 1814 k = ArchiveCompactor::get_relocated_klass(k); 1815 if (is_final) { 1816 k = (Klass*)(address(k) + final_delta()); 1817 } 1818 return k; 1819 } 1820 1821 class LinkSharedClassesClosure : public KlassClosure { 1822 Thread* THREAD; 1823 bool _made_progress; 1824 public: 1825 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1826 1827 void reset() { _made_progress = false; } 1828 bool made_progress() const { return _made_progress; } 1829 1830 void do_klass(Klass* k) { 1831 if (k->is_instance_klass()) { 1832 InstanceKlass* ik = InstanceKlass::cast(k); 1833 // For dynamic CDS dump, only link classes loaded by the builtin class loaders. 1834 bool do_linking = DumpSharedSpaces ? true : !ik->is_shared_unregistered_class(); 1835 if (do_linking) { 1836 // Link the class to cause the bytecodes to be rewritten and the 1837 // cpcache to be created. Class verification is done according 1838 // to -Xverify setting. 1839 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1840 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1841 1842 if (DumpSharedSpaces) { 1843 // The following function is used to resolve all Strings in the statically 1844 // dumped classes to archive all the Strings. The archive heap is not supported 1845 // for the dynamic archive. 1846 ik->constants()->resolve_class_constants(THREAD); 1847 } 1848 } 1849 } 1850 } 1851 }; 1852 1853 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1854 // We need to iterate because verification may cause additional classes 1855 // to be loaded. 1856 LinkSharedClassesClosure link_closure(THREAD); 1857 do { 1858 link_closure.reset(); 1859 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1860 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1861 } while (link_closure.made_progress()); 1862 } 1863 1864 void MetaspaceShared::prepare_for_dumping() { 1865 Arguments::check_unsupported_dumping_properties(); 1866 ClassLoader::initialize_shared_path(); 1867 } 1868 1869 // Preload classes from a list, populate the shared spaces and dump to a 1870 // file. 1871 void MetaspaceShared::preload_and_dump(TRAPS) { 1872 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1873 ResourceMark rm(THREAD); 1874 char class_list_path_str[JVM_MAXPATHLEN]; 1875 // Preload classes to be shared. 1876 const char* class_list_path; 1877 if (SharedClassListFile == NULL) { 1878 // Construct the path to the class list (in jre/lib) 1879 // Walk up two directories from the location of the VM and 1880 // optionally tack on "lib" (depending on platform) 1881 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1882 for (int i = 0; i < 3; i++) { 1883 char *end = strrchr(class_list_path_str, *os::file_separator()); 1884 if (end != NULL) *end = '\0'; 1885 } 1886 int class_list_path_len = (int)strlen(class_list_path_str); 1887 if (class_list_path_len >= 3) { 1888 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1889 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1890 jio_snprintf(class_list_path_str + class_list_path_len, 1891 sizeof(class_list_path_str) - class_list_path_len, 1892 "%slib", os::file_separator()); 1893 class_list_path_len += 4; 1894 } 1895 } 1896 } 1897 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1898 jio_snprintf(class_list_path_str + class_list_path_len, 1899 sizeof(class_list_path_str) - class_list_path_len, 1900 "%sclasslist", os::file_separator()); 1901 } 1902 class_list_path = class_list_path_str; 1903 } else { 1904 class_list_path = SharedClassListFile; 1905 } 1906 1907 log_info(cds)("Loading classes to share ..."); 1908 _has_error_classes = false; 1909 int class_count = preload_classes(class_list_path, THREAD); 1910 if (ExtraSharedClassListFile) { 1911 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1912 } 1913 log_info(cds)("Loading classes to share: done."); 1914 1915 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1916 1917 if (SharedArchiveConfigFile) { 1918 log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile); 1919 read_extra_data(SharedArchiveConfigFile, THREAD); 1920 } 1921 log_info(cds)("Reading extra data: done."); 1922 1923 HeapShared::init_subgraph_entry_fields(THREAD); 1924 1925 // Rewrite and link classes 1926 log_info(cds)("Rewriting and linking classes ..."); 1927 1928 // Link any classes which got missed. This would happen if we have loaded classes that 1929 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1930 // fails verification, all other interfaces that were not specified in the classlist but 1931 // are implemented by K are not verified. 1932 link_and_cleanup_shared_classes(CATCH); 1933 log_info(cds)("Rewriting and linking classes: done"); 1934 1935 VM_PopulateDumpSharedSpace op; 1936 MutexLocker ml(THREAD, HeapShared::is_heap_object_archiving_allowed() ? 1937 Heap_lock : NULL); // needed by HeapShared::run_gc() 1938 VMThread::execute(&op); 1939 } 1940 } 1941 1942 static GrowableArray<char *>* lambda_list = NULL; 1943 1944 void MetaspaceShared::regenerate_holder_classes(TRAPS) { 1945 assert(lambda_list != NULL, "Bad List"); 1946 ResourceMark rm(THREAD); 1947 1948 Symbol* helper_name = vmSymbols::java_lang_invoke_InvokerBytecodeGeneratorHelper(); 1949 Klass* helper_klass = SystemDictionary::resolve_or_null(helper_name, THREAD); 1950 guarantee(helper_klass != NULL, "java/lang/invoke/InvokerByteCodeGeneratorHelper exist!"); 1951 1952 int len = lambda_list->length(); 1953 objArrayHandle list_lines = oopFactory::new_objArray_handle(SystemDictionary::String_klass(), len, CHECK); 1954 for (int i = 0; i < len; i++) { 1955 Handle h_line = java_lang_String::create_from_str(lambda_list->at(i), CHECK); 1956 list_lines->obj_at_put(i, h_line()); 1957 } 1958 1959 // 1960 // Object[] InvokerBytecodeGeneratorHelper.generateMethodHandleHolderClasses(String[] lines) 1961 // the returned Object[] layout: 1962 // name, byte[], name, byte[] .... 1963 Symbol* method = vmSymbols::generateMethodHandleHolderClasses(); 1964 Symbol* signrs = vmSymbols::generateMethodHandleHolderClasses_signature(); 1965 1966 jobject ret_obj; 1967 JavaValue result(T_OBJECT); 1968 JavaCalls::call_static(&result, helper_klass, method, signrs, list_lines, THREAD); 1969 ret_obj = result.get_jobject(); 1970 if (!HAS_PENDING_EXCEPTION) { 1971 if (ret_obj == NULL) { 1972 log_info(cds)("Failed call to %s.%s", helper_name->as_C_string(), method->as_C_string()); 1973 return; 1974 } 1975 } else { 1976 log_info(cds)("Exception happened: %s", PENDING_EXCEPTION->klass()->name()->as_C_string()); 1977 CLEAR_PENDING_EXCEPTION; 1978 return; 1979 } 1980 1981 objArrayHandle h_array(THREAD, (objArrayOop)ret_obj); 1982 int sz = h_array->length(); 1983 assert(sz % 2 == 0 && sz >= 2, "Must be even size of length"); 1984 for (int i = 0; i < sz; i+= 2) { 1985 Handle h_name(THREAD, h_array->obj_at(i)); 1986 Handle h_bytes(THREAD, h_array->obj_at(i+1)); 1987 assert(h_name != NULL, "Class name is NULL"); 1988 assert(h_bytes != NULL, "Class bytes is NULL"); 1989 reload_class(h_name, h_bytes, THREAD); 1990 } 1991 1992 } 1993 1994 // the format maybe of "/java.base/package/class_name.class" 1995 char* get_full_class_name(char* path_name) { 1996 char* end = strstr(path_name, ".class"); 1997 if (end == NULL) { 1998 end = path_name + strlen(path_name); 1999 } 2000 char* start = strstr(path_name, "/java.base/"); 2001 if (start == NULL) { 2002 start = path_name; 2003 } else { 2004 start = path_name + strlen("/java.base/"); 2005 } 2006 assert(start < end, "Sanity check"); 2007 size_t size = end - start + 1; 2008 2009 char* full_name = (char*)os::malloc(size, mtInternal); 2010 size_t i = 0; 2011 while (i < size) { 2012 full_name[i++] = *start++; 2013 } 2014 full_name[size - 1] = '\0'; 2015 return full_name; 2016 } 2017 2018 // k - the class full name 2019 // v - the class bytes 2020 void MetaspaceShared::reload_class(Handle k, Handle v, TRAPS) { 2021 char* path_name = java_lang_String::as_utf8_string(k()); 2022 char* class_name = get_full_class_name(path_name); 2023 Symbol* sym = SymbolTable::probe((const char*)class_name, (int)strlen(class_name)); 2024 assert(sym != NULL, "The class should be loaded already"); 2025 // the class must exist 2026 Klass* klass = SystemDictionary::resolve_or_null(sym, THREAD); 2027 if (klass == NULL) { 2028 log_info(cds)("Class %s not present, skip", class_name); 2029 return; 2030 } 2031 2032 typeArrayOop bytes = (typeArrayOop)v(); 2033 int len = bytes->length(); 2034 u1* buf = (u1*)bytes->byte_at_addr(0); 2035 ClassFileStream st(buf, len, NULL, ClassFileStream::verify); 2036 ClassLoaderData* cld = ClassLoaderData::the_null_class_loader_data(); 2037 Handle protection_domain; 2038 ClassLoadInfo cl_info(protection_domain); 2039 2040 InstanceKlass* result = KlassFactory::create_from_stream(&st, 2041 sym, 2042 cld, 2043 cl_info, 2044 CHECK); 2045 2046 if (HAS_PENDING_EXCEPTION) { 2047 log_info(cds)("Exception happened: %s", PENDING_EXCEPTION->klass()->name()->as_C_string()); 2048 log_info(cds)("Could not create InstanceKlass for class %s", class_name); 2049 CLEAR_PENDING_EXCEPTION; 2050 return; 2051 } 2052 2053 // replace with the new created klass. 2054 { 2055 MutexLocker lock(THREAD, SystemDictionary_lock); 2056 InstanceKlass* old = cld->replace_class(sym, result); 2057 SystemDictionaryShared::set_excluded(old); 2058 log_info(cds)("Replace class %s, old: %p new: %p", class_name, old, result); 2059 } 2060 2061 // add to hierarchy and set state to loaded. 2062 SystemDictionaryShared::add_replaced_class(result, THREAD); 2063 // new class not linked yet. 2064 try_link_class(result, THREAD); 2065 assert(!HAS_PENDING_EXCEPTION, "Invariant"); 2066 } 2067 2068 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 2069 ClassListParser parser(class_list_path); 2070 int class_count = 0; 2071 2072 while (parser.parse_one_line()) { 2073 if (!parser.is_lambda_format()) { 2074 Klass* klass = parser.load_current_class(THREAD); 2075 if (HAS_PENDING_EXCEPTION) { 2076 if (klass == NULL && 2077 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 2078 // print a warning only when the pending exception is class not found 2079 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 2080 } 2081 CLEAR_PENDING_EXCEPTION; 2082 } 2083 if (klass != NULL) { 2084 if (log_is_enabled(Trace, cds)) { 2085 ResourceMark rm(THREAD); 2086 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 2087 } 2088 2089 if (klass->is_instance_klass()) { 2090 InstanceKlass* ik = InstanceKlass::cast(klass); 2091 2092 // Link the class to cause the bytecodes to be rewritten and the 2093 // cpcache to be created. The linking is done as soon as classes 2094 // are loaded in order that the related data structures (klass and 2095 // cpCache) are located together. 2096 try_link_class(ik, THREAD); 2097 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 2098 } 2099 class_count++; 2100 } 2101 } else { 2102 if (lambda_list == NULL) { 2103 lambda_list = new GrowableArray<char*>(8); 2104 } 2105 lambda_list->append(parser.current_line()); 2106 } 2107 } 2108 2109 // call java to generate holder classes then replace them in dictionary. 2110 if (lambda_list != NULL) { 2111 regenerate_holder_classes(THREAD); 2112 } 2113 return class_count; 2114 } 2115 2116 // Returns true if the class's status has changed 2117 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 2118 Arguments::assert_is_dumping_archive(); 2119 if (ik->init_state() < InstanceKlass::linked && 2120 !SystemDictionaryShared::has_class_failed_verification(ik)) { 2121 bool saved = BytecodeVerificationLocal; 2122 if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) { 2123 // The verification decision is based on BytecodeVerificationRemote 2124 // for non-system classes. Since we are using the NULL classloader 2125 // to load non-system classes for customized class loaders during dumping, 2126 // we need to temporarily change BytecodeVerificationLocal to be the same as 2127 // BytecodeVerificationRemote. Note this can cause the parent system 2128 // classes also being verified. The extra overhead is acceptable during 2129 // dumping. 2130 BytecodeVerificationLocal = BytecodeVerificationRemote; 2131 } 2132 ik->link_class(THREAD); 2133 if (HAS_PENDING_EXCEPTION) { 2134 ResourceMark rm(THREAD); 2135 log_warning(cds)("Preload Warning: Verification failed for %s", 2136 ik->external_name()); 2137 CLEAR_PENDING_EXCEPTION; 2138 SystemDictionaryShared::set_class_has_failed_verification(ik); 2139 _has_error_classes = true; 2140 } 2141 BytecodeVerificationLocal = saved; 2142 return true; 2143 } else { 2144 return false; 2145 } 2146 } 2147 2148 #if INCLUDE_CDS_JAVA_HEAP 2149 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 2150 // The closed and open archive heap space has maximum two regions. 2151 // See FileMapInfo::write_archive_heap_regions() for details. 2152 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 2153 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 2154 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 2155 _open_archive_heap_regions); 2156 ArchiveCompactor::OtherROAllocMark mark; 2157 HeapShared::write_subgraph_info_table(); 2158 } 2159 2160 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 2161 if (HeapShared::is_heap_object_archiving_allowed()) { 2162 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2163 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 2164 2165 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2166 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 2167 } 2168 } 2169 2170 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 2171 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 2172 for (int i=0; i<regions->length(); i++) { 2173 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 2174 size_t size_in_bits = oopmap.size(); 2175 size_t size_in_bytes = oopmap.size_in_bytes(); 2176 uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal); 2177 oopmap.write_to(buffer, size_in_bytes); 2178 log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 2179 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 2180 p2i(buffer), size_in_bytes, 2181 p2i(regions->at(i).start()), regions->at(i).byte_size()); 2182 2183 ArchiveHeapOopmapInfo info; 2184 info._oopmap = (address)buffer; 2185 info._oopmap_size_in_bits = size_in_bits; 2186 info._oopmap_size_in_bytes = size_in_bytes; 2187 oopmaps->append(info); 2188 } 2189 } 2190 #endif // INCLUDE_CDS_JAVA_HEAP 2191 2192 void ReadClosure::do_ptr(void** p) { 2193 assert(*p == NULL, "initializing previous initialized pointer."); 2194 intptr_t obj = nextPtr(); 2195 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 2196 "hit tag while initializing ptrs."); 2197 *p = (void*)obj; 2198 } 2199 2200 void ReadClosure::do_u4(u4* p) { 2201 intptr_t obj = nextPtr(); 2202 *p = (u4)(uintx(obj)); 2203 } 2204 2205 void ReadClosure::do_bool(bool* p) { 2206 intptr_t obj = nextPtr(); 2207 *p = (bool)(uintx(obj)); 2208 } 2209 2210 void ReadClosure::do_tag(int tag) { 2211 int old_tag; 2212 old_tag = (int)(intptr_t)nextPtr(); 2213 // do_int(&old_tag); 2214 assert(tag == old_tag, "old tag doesn't match"); 2215 FileMapInfo::assert_mark(tag == old_tag); 2216 } 2217 2218 void ReadClosure::do_oop(oop *p) { 2219 narrowOop o = (narrowOop)nextPtr(); 2220 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 2221 *p = NULL; 2222 } else { 2223 assert(HeapShared::is_heap_object_archiving_allowed(), 2224 "Archived heap object is not allowed"); 2225 assert(HeapShared::open_archive_heap_region_mapped(), 2226 "Open archive heap region is not mapped"); 2227 *p = HeapShared::decode_from_archive(o); 2228 } 2229 } 2230 2231 void ReadClosure::do_region(u_char* start, size_t size) { 2232 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 2233 assert(size % sizeof(intptr_t) == 0, "bad size"); 2234 do_tag((int)size); 2235 while (size > 0) { 2236 *(intptr_t*)start = nextPtr(); 2237 start += sizeof(intptr_t); 2238 size -= sizeof(intptr_t); 2239 } 2240 } 2241 2242 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { 2243 assert(base <= static_top && static_top <= top, "must be"); 2244 _shared_metaspace_static_top = static_top; 2245 MetaspaceObj::set_shared_metaspace_range(base, top); 2246 } 2247 2248 // Return true if given address is in the misc data region 2249 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2250 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2251 } 2252 2253 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2254 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2255 return true; 2256 } 2257 return false; 2258 } 2259 2260 bool MetaspaceShared::is_shared_dynamic(void* p) { 2261 if ((p < MetaspaceObj::shared_metaspace_top()) && 2262 (p >= _shared_metaspace_static_top)) { 2263 return true; 2264 } else { 2265 return false; 2266 } 2267 } 2268 2269 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 2270 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 2271 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2272 2273 FileMapInfo* static_mapinfo = open_static_archive(); 2274 FileMapInfo* dynamic_mapinfo = NULL; 2275 2276 if (static_mapinfo != NULL) { 2277 dynamic_mapinfo = open_dynamic_archive(); 2278 2279 // First try to map at the requested address 2280 result = map_archives(static_mapinfo, dynamic_mapinfo, true); 2281 if (result == MAP_ARCHIVE_MMAP_FAILURE) { 2282 // Mapping has failed (probably due to ASLR). Let's map at an address chosen 2283 // by the OS. 2284 log_info(cds)("Try to map archive(s) at an alternative address"); 2285 result = map_archives(static_mapinfo, dynamic_mapinfo, false); 2286 } 2287 } 2288 2289 if (result == MAP_ARCHIVE_SUCCESS) { 2290 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); 2291 char* cds_base = static_mapinfo->mapped_base(); 2292 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); 2293 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); 2294 _relocation_delta = static_mapinfo->relocation_delta(); 2295 if (dynamic_mapped) { 2296 FileMapInfo::set_shared_path_table(dynamic_mapinfo); 2297 } else { 2298 FileMapInfo::set_shared_path_table(static_mapinfo); 2299 } 2300 _requested_base_address = static_mapinfo->requested_base_address(); 2301 } else { 2302 set_shared_metaspace_range(NULL, NULL, NULL); 2303 UseSharedSpaces = false; 2304 FileMapInfo::fail_continue("Unable to map shared spaces"); 2305 if (PrintSharedArchiveAndExit) { 2306 vm_exit_during_initialization("Unable to use shared archive."); 2307 } 2308 } 2309 2310 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { 2311 delete static_mapinfo; 2312 } 2313 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { 2314 delete dynamic_mapinfo; 2315 } 2316 } 2317 2318 FileMapInfo* MetaspaceShared::open_static_archive() { 2319 FileMapInfo* mapinfo = new FileMapInfo(true); 2320 if (!mapinfo->initialize()) { 2321 delete(mapinfo); 2322 return NULL; 2323 } 2324 return mapinfo; 2325 } 2326 2327 FileMapInfo* MetaspaceShared::open_dynamic_archive() { 2328 if (DynamicDumpSharedSpaces) { 2329 return NULL; 2330 } 2331 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 2332 return NULL; 2333 } 2334 2335 FileMapInfo* mapinfo = new FileMapInfo(false); 2336 if (!mapinfo->initialize()) { 2337 delete(mapinfo); 2338 return NULL; 2339 } 2340 return mapinfo; 2341 } 2342 2343 // use_requested_addr: 2344 // true = map at FileMapHeader::_requested_base_address 2345 // false = map at an alternative address picked by OS. 2346 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, 2347 bool use_requested_addr) { 2348 if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) { 2349 log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address."); 2350 return MAP_ARCHIVE_MMAP_FAILURE; 2351 } 2352 2353 PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2354 // For product build only -- this is for benchmarking the cost of doing relocation. 2355 // For debug builds, the check is done below, after reserving the space, for better test coverage 2356 // (see comment below). 2357 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2358 return MAP_ARCHIVE_MMAP_FAILURE; 2359 }); 2360 2361 if (ArchiveRelocationMode == 2 && !use_requested_addr) { 2362 log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); 2363 return MAP_ARCHIVE_MMAP_FAILURE; 2364 }; 2365 2366 if (dynamic_mapinfo != NULL) { 2367 // Ensure that the OS won't be able to allocate new memory spaces between the two 2368 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). 2369 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); 2370 } 2371 2372 ReservedSpace archive_space_rs, class_space_rs; 2373 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2374 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, 2375 use_requested_addr, archive_space_rs, 2376 class_space_rs); 2377 if (mapped_base_address == NULL) { 2378 result = MAP_ARCHIVE_MMAP_FAILURE; 2379 log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr); 2380 } else { 2381 2382 #ifdef ASSERT 2383 // Some sanity checks after reserving address spaces for archives 2384 // and class space. 2385 assert(archive_space_rs.is_reserved(), "Sanity"); 2386 if (Metaspace::using_class_space()) { 2387 // Class space must closely follow the archive space. Both spaces 2388 // must be aligned correctly. 2389 assert(class_space_rs.is_reserved(), 2390 "A class space should have been reserved"); 2391 assert(class_space_rs.base() >= archive_space_rs.end(), 2392 "class space should follow the cds archive space"); 2393 assert(is_aligned(archive_space_rs.base(), 2394 MetaspaceShared::reserved_space_alignment()), 2395 "Archive space misaligned"); 2396 assert(is_aligned(class_space_rs.base(), 2397 Metaspace::reserve_alignment()), 2398 "class space misaligned"); 2399 } 2400 #endif // ASSERT 2401 2402 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2403 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); 2404 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2405 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); 2406 2407 if (MetaspaceShared::use_windows_memory_mapping()) { 2408 // We have now reserved address space for the archives, and will map in 2409 // the archive files into this space. 2410 // 2411 // Special handling for Windows: on Windows we cannot map a file view 2412 // into an existing memory mapping. So, we unmap the address range we 2413 // just reserved again, which will make it available for mapping the 2414 // archives. 2415 // Reserving this range has not been for naught however since it makes 2416 // us reasonably sure the address range is available. 2417 // 2418 // But still it may fail, since between unmapping the range and mapping 2419 // in the archive someone else may grab the address space. Therefore 2420 // there is a fallback in FileMap::map_region() where we just read in 2421 // the archive files sequentially instead of mapping it in. We couple 2422 // this with use_requested_addr, since we're going to patch all the 2423 // pointers anyway so there's no benefit to mmap. 2424 if (use_requested_addr) { 2425 log_info(cds)("Windows mmap workaround: releasing archive space."); 2426 archive_space_rs.release(); 2427 } 2428 } 2429 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); 2430 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? 2431 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; 2432 2433 DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2434 // This is for simulating mmap failures at the requested address. In 2435 // debug builds, we do it here (after all archives have possibly been 2436 // mapped), so we can thoroughly test the code for failure handling 2437 // (releasing all allocated resource, etc). 2438 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2439 if (static_result == MAP_ARCHIVE_SUCCESS) { 2440 static_result = MAP_ARCHIVE_MMAP_FAILURE; 2441 } 2442 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2443 dynamic_result = MAP_ARCHIVE_MMAP_FAILURE; 2444 } 2445 }); 2446 2447 if (static_result == MAP_ARCHIVE_SUCCESS) { 2448 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2449 result = MAP_ARCHIVE_SUCCESS; 2450 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { 2451 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); 2452 // No need to retry mapping the dynamic archive again, as it will never succeed 2453 // (bad file, etc) -- just keep the base archive. 2454 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", 2455 dynamic_mapinfo->full_path()); 2456 result = MAP_ARCHIVE_SUCCESS; 2457 // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no 2458 // easy API to do that right now. 2459 } else { 2460 result = MAP_ARCHIVE_MMAP_FAILURE; 2461 } 2462 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { 2463 result = MAP_ARCHIVE_OTHER_FAILURE; 2464 } else { 2465 result = MAP_ARCHIVE_MMAP_FAILURE; 2466 } 2467 } 2468 2469 if (result == MAP_ARCHIVE_SUCCESS) { 2470 SharedBaseAddress = (size_t)mapped_base_address; 2471 LP64_ONLY({ 2472 if (Metaspace::using_class_space()) { 2473 // Set up ccs in metaspace. 2474 Metaspace::initialize_class_space(class_space_rs); 2475 2476 // Set up compressed Klass pointer encoding: the encoding range must 2477 // cover both archive and class space. 2478 address cds_base = (address)static_mapinfo->mapped_base(); 2479 address ccs_end = (address)class_space_rs.end(); 2480 CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base); 2481 2482 // map_heap_regions() compares the current narrow oop and klass encodings 2483 // with the archived ones, so it must be done after all encodings are determined. 2484 static_mapinfo->map_heap_regions(); 2485 } 2486 }); 2487 log_info(cds)("Using optimized module handling %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled"); 2488 } else { 2489 unmap_archive(static_mapinfo); 2490 unmap_archive(dynamic_mapinfo); 2491 release_reserved_spaces(archive_space_rs, class_space_rs); 2492 } 2493 2494 return result; 2495 } 2496 2497 2498 // This will reserve two address spaces suitable to house Klass structures, one 2499 // for the cds archives (static archive and optionally dynamic archive) and 2500 // optionally one move for ccs. 2501 // 2502 // Since both spaces must fall within the compressed class pointer encoding 2503 // range, they are allocated close to each other. 2504 // 2505 // Space for archives will be reserved first, followed by a potential gap, 2506 // followed by the space for ccs: 2507 // 2508 // +-- Base address A B End 2509 // | | | | 2510 // v v v v 2511 // +-------------+--------------+ +----------------------+ 2512 // | static arc | [dyn. arch] | [gap] | compr. class space | 2513 // +-------------+--------------+ +----------------------+ 2514 // 2515 // (The gap may result from different alignment requirements between metaspace 2516 // and CDS) 2517 // 2518 // If UseCompressedClassPointers is disabled, only one address space will be 2519 // reserved: 2520 // 2521 // +-- Base address End 2522 // | | 2523 // v v 2524 // +-------------+--------------+ 2525 // | static arc | [dyn. arch] | 2526 // +-------------+--------------+ 2527 // 2528 // Base address: If use_archive_base_addr address is true, the Base address is 2529 // determined by the address stored in the static archive. If 2530 // use_archive_base_addr address is false, this base address is determined 2531 // by the platform. 2532 // 2533 // If UseCompressedClassPointers=1, the range encompassing both spaces will be 2534 // suitable to en/decode narrow Klass pointers: the base will be valid for 2535 // encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax. 2536 // 2537 // Return: 2538 // 2539 // - On success: 2540 // - archive_space_rs will be reserved and large enough to host static and 2541 // if needed dynamic archive: [Base, A). 2542 // archive_space_rs.base and size will be aligned to CDS reserve 2543 // granularity. 2544 // - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will 2545 // be reserved. Its start address will be aligned to metaspace reserve 2546 // alignment, which may differ from CDS alignment. It will follow the cds 2547 // archive space, close enough such that narrow class pointer encoding 2548 // covers both spaces. 2549 // If UseCompressedClassPointers=0, class_space_rs remains unreserved. 2550 // - On error: NULL is returned and the spaces remain unreserved. 2551 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, 2552 FileMapInfo* dynamic_mapinfo, 2553 bool use_archive_base_addr, 2554 ReservedSpace& archive_space_rs, 2555 ReservedSpace& class_space_rs) { 2556 2557 address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL); 2558 const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment(); 2559 2560 // Size and requested location of the archive_space_rs (for both static and dynamic archives) 2561 assert(static_mapinfo->mapping_base_offset() == 0, "Must be"); 2562 size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); 2563 size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment); 2564 2565 // If a base address is given, it must have valid alignment and be suitable as encoding base. 2566 if (base_address != NULL) { 2567 assert(is_aligned(base_address, archive_space_alignment), 2568 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2569 if (Metaspace::using_class_space()) { 2570 assert(CompressedKlassPointers::is_valid_base(base_address), 2571 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2572 } 2573 } 2574 2575 if (!Metaspace::using_class_space()) { 2576 // Get the simple case out of the way first: 2577 // no compressed class space, simple allocation. 2578 archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment, 2579 false /* bool large */, (char*)base_address); 2580 if (archive_space_rs.is_reserved()) { 2581 assert(base_address == NULL || 2582 (address)archive_space_rs.base() == base_address, "Sanity"); 2583 // Register archive space with NMT. 2584 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); 2585 return archive_space_rs.base(); 2586 } 2587 return NULL; 2588 } 2589 2590 #ifdef _LP64 2591 2592 // Complex case: two spaces adjacent to each other, both to be addressable 2593 // with narrow class pointers. 2594 // We reserve the whole range spanning both spaces, then split that range up. 2595 2596 const size_t class_space_alignment = Metaspace::reserve_alignment(); 2597 2598 // To simplify matters, lets assume that metaspace alignment will always be 2599 // equal or a multiple of archive alignment. 2600 assert(is_power_of_2(class_space_alignment) && 2601 is_power_of_2(archive_space_alignment) && 2602 class_space_alignment >= archive_space_alignment, 2603 "Sanity"); 2604 2605 const size_t class_space_size = CompressedClassSpaceSize; 2606 assert(CompressedClassSpaceSize > 0 && 2607 is_aligned(CompressedClassSpaceSize, class_space_alignment), 2608 "CompressedClassSpaceSize malformed: " 2609 SIZE_FORMAT, CompressedClassSpaceSize); 2610 2611 const size_t ccs_begin_offset = align_up(base_address + archive_space_size, 2612 class_space_alignment) - base_address; 2613 const size_t gap_size = ccs_begin_offset - archive_space_size; 2614 2615 const size_t total_range_size = 2616 align_up(archive_space_size + gap_size + class_space_size, 2617 os::vm_allocation_granularity()); 2618 2619 ReservedSpace total_rs; 2620 if (base_address != NULL) { 2621 // Reserve at the given archive base address, or not at all. 2622 total_rs = ReservedSpace(total_range_size, archive_space_alignment, 2623 false /* bool large */, (char*) base_address); 2624 } else { 2625 // Reserve at any address, but leave it up to the platform to choose a good one. 2626 total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size); 2627 } 2628 2629 if (!total_rs.is_reserved()) { 2630 return NULL; 2631 } 2632 2633 // Paranoid checks: 2634 assert(base_address == NULL || (address)total_rs.base() == base_address, 2635 "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base())); 2636 assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity"); 2637 assert(total_rs.size() == total_range_size, "Sanity"); 2638 assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity"); 2639 2640 // Now split up the space into ccs and cds archive. For simplicity, just leave 2641 // the gap reserved at the end of the archive space. 2642 archive_space_rs = total_rs.first_part(ccs_begin_offset, 2643 (size_t)os::vm_allocation_granularity(), 2644 /*split=*/true); 2645 class_space_rs = total_rs.last_part(ccs_begin_offset); 2646 2647 assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity"); 2648 assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity"); 2649 assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity"); 2650 assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity"); 2651 2652 // NMT: fix up the space tags 2653 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); 2654 MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass); 2655 2656 return archive_space_rs.base(); 2657 2658 #else 2659 ShouldNotReachHere(); 2660 return NULL; 2661 #endif 2662 2663 } 2664 2665 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs, 2666 ReservedSpace& class_space_rs) { 2667 if (archive_space_rs.is_reserved()) { 2668 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); 2669 archive_space_rs.release(); 2670 } 2671 if (class_space_rs.is_reserved()) { 2672 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); 2673 class_space_rs.release(); 2674 } 2675 } 2676 2677 static int archive_regions[] = {MetaspaceShared::mc, 2678 MetaspaceShared::rw, 2679 MetaspaceShared::ro}; 2680 static int archive_regions_count = 3; 2681 2682 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { 2683 assert(UseSharedSpaces, "must be runtime"); 2684 if (mapinfo == NULL) { 2685 return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded. 2686 } 2687 2688 mapinfo->set_is_mapped(false); 2689 2690 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { 2691 log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT 2692 " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); 2693 return MAP_ARCHIVE_OTHER_FAILURE; 2694 } 2695 2696 MapArchiveResult result = 2697 mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs); 2698 2699 if (result != MAP_ARCHIVE_SUCCESS) { 2700 unmap_archive(mapinfo); 2701 return result; 2702 } 2703 2704 if (!mapinfo->validate_shared_path_table()) { 2705 unmap_archive(mapinfo); 2706 return MAP_ARCHIVE_OTHER_FAILURE; 2707 } 2708 2709 mapinfo->set_is_mapped(true); 2710 return MAP_ARCHIVE_SUCCESS; 2711 } 2712 2713 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { 2714 assert(UseSharedSpaces, "must be runtime"); 2715 if (mapinfo != NULL) { 2716 mapinfo->unmap_regions(archive_regions, archive_regions_count); 2717 mapinfo->set_is_mapped(false); 2718 } 2719 } 2720 2721 // Read the miscellaneous data from the shared file, and 2722 // serialize it out to its various destinations. 2723 2724 void MetaspaceShared::initialize_shared_spaces() { 2725 FileMapInfo *static_mapinfo = FileMapInfo::current_info(); 2726 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); 2727 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); 2728 char* buffer = static_mapinfo->cloned_vtables(); 2729 clone_cpp_vtables((intptr_t*)buffer); 2730 2731 // Verify various attributes of the archive, plus initialize the 2732 // shared string/symbol tables 2733 buffer = static_mapinfo->serialized_data(); 2734 intptr_t* array = (intptr_t*)buffer; 2735 ReadClosure rc(&array); 2736 serialize(&rc); 2737 2738 // Initialize the run-time symbol table. 2739 SymbolTable::create_table(); 2740 2741 static_mapinfo->patch_archived_heap_embedded_pointers(); 2742 2743 // Close the mapinfo file 2744 static_mapinfo->close(); 2745 2746 static_mapinfo->unmap_region(MetaspaceShared::bm); 2747 2748 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); 2749 if (dynamic_mapinfo != NULL) { 2750 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data(); 2751 ReadClosure rc(&buffer); 2752 SymbolTable::serialize_shared_table_header(&rc, false); 2753 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); 2754 dynamic_mapinfo->close(); 2755 } 2756 2757 if (PrintSharedArchiveAndExit) { 2758 if (PrintSharedDictionary) { 2759 tty->print_cr("\nShared classes:\n"); 2760 SystemDictionaryShared::print_on(tty); 2761 } 2762 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { 2763 tty->print_cr("archive is invalid"); 2764 vm_exit(1); 2765 } else { 2766 tty->print_cr("archive is valid"); 2767 vm_exit(0); 2768 } 2769 } 2770 } 2771 2772 // JVM/TI RedefineClasses() support: 2773 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2774 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2775 2776 if (UseSharedSpaces) { 2777 // remap the shared readonly space to shared readwrite, private 2778 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2779 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2780 return false; 2781 } 2782 if (FileMapInfo::dynamic_info() != NULL) { 2783 mapinfo = FileMapInfo::dynamic_info(); 2784 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2785 return false; 2786 } 2787 } 2788 _remapped_readwrite = true; 2789 } 2790 return true; 2791 } 2792 2793 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2794 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2795 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2796 // or so. 2797 _mc_region.print_out_of_space_msg(name, needed_bytes); 2798 _rw_region.print_out_of_space_msg(name, needed_bytes); 2799 _ro_region.print_out_of_space_msg(name, needed_bytes); 2800 2801 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2802 "Please reduce the number of shared classes."); 2803 } 2804 2805 // This is used to relocate the pointers so that the base archive can be mapped at 2806 // MetaspaceShared::requested_base_address() without runtime relocation. 2807 intx MetaspaceShared::final_delta() { 2808 return intx(MetaspaceShared::requested_base_address()) // We want the base archive to be mapped to here at runtime 2809 - intx(SharedBaseAddress); // .. but the base archive is mapped at here at dump time 2810 } 2811 2812 void MetaspaceShared::print_on(outputStream* st) { 2813 if (UseSharedSpaces || DumpSharedSpaces) { 2814 st->print("CDS archive(s) mapped at: "); 2815 address base; 2816 address top; 2817 if (UseSharedSpaces) { // Runtime 2818 base = (address)MetaspaceObj::shared_metaspace_base(); 2819 address static_top = (address)_shared_metaspace_static_top; 2820 top = (address)MetaspaceObj::shared_metaspace_top(); 2821 st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top)); 2822 } else if (DumpSharedSpaces) { // Dump Time 2823 base = (address)_shared_rs.base(); 2824 top = (address)_shared_rs.end(); 2825 st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top)); 2826 } 2827 st->print("size " SIZE_FORMAT ", ", top - base); 2828 st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode); 2829 } else { 2830 st->print("CDS disabled."); 2831 } 2832 st->cr(); 2833 }