1 /* 2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "gc/shared/softRefPolicy.hpp" 40 #include "interpreter/bytecodeStream.hpp" 41 #include "interpreter/bytecodes.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logMessage.hpp" 44 #include "memory/archiveUtils.inline.hpp" 45 #include "memory/dynamicArchive.hpp" 46 #include "memory/filemap.hpp" 47 #include "memory/heapShared.inline.hpp" 48 #include "memory/metaspace.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/metaspaceShared.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "oops/compressedOops.inline.hpp" 54 #include "oops/instanceClassLoaderKlass.hpp" 55 #include "oops/instanceMirrorKlass.hpp" 56 #include "oops/instanceRefKlass.hpp" 57 #include "oops/methodData.hpp" 58 #include "oops/objArrayKlass.hpp" 59 #include "oops/objArrayOop.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "oops/typeArrayKlass.hpp" 62 #include "prims/jvmtiRedefineClasses.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/os.hpp" 65 #include "runtime/safepointVerifiers.hpp" 66 #include "runtime/signature.hpp" 67 #include "runtime/timerTrace.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/bitMap.inline.hpp" 72 #include "utilities/ostream.hpp" 73 #include "utilities/defaultStream.hpp" 74 #include "utilities/hashtable.inline.hpp" 75 #if INCLUDE_G1GC 76 #include "gc/g1/g1CollectedHeap.hpp" 77 #endif 78 79 ReservedSpace MetaspaceShared::_shared_rs; 80 VirtualSpace MetaspaceShared::_shared_vs; 81 ReservedSpace MetaspaceShared::_symbol_rs; 82 VirtualSpace MetaspaceShared::_symbol_vs; 83 MetaspaceSharedStats MetaspaceShared::_stats; 84 bool MetaspaceShared::_has_error_classes; 85 bool MetaspaceShared::_archive_loading_failed = false; 86 bool MetaspaceShared::_remapped_readwrite = false; 87 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 88 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 89 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 90 intx MetaspaceShared::_relocation_delta; 91 char* MetaspaceShared::_requested_base_address; 92 bool MetaspaceShared::_use_optimized_module_handling = true; 93 bool MetaspaceShared::_use_full_module_graph = true; 94 95 // The CDS archive is divided into the following regions: 96 // mc - misc code (the method entry trampolines, c++ vtables) 97 // rw - read-write metadata 98 // ro - read-only metadata and read-only tables 99 // 100 // ca0 - closed archive heap space #0 101 // ca1 - closed archive heap space #1 (may be empty) 102 // oa0 - open archive heap space #0 103 // oa1 - open archive heap space #1 (may be empty) 104 // 105 // The mc, rw, and ro regions are linearly allocated, starting from 106 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions 107 // are page-aligned, and there's no gap between any consecutive regions. 108 // 109 // These 3 regions are populated in the following steps: 110 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 111 // temporarily allocated outside of the shared regions. Only the method entry 112 // trampolines are written into the mc region. 113 // [2] C++ vtables are copied into the mc region. 114 // [3] ArchiveCompactor copies RW metadata into the rw region. 115 // [4] ArchiveCompactor copies RO metadata into the ro region. 116 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 117 // are copied into the ro region as read-only tables. 118 // 119 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 120 // Their layout is independent of the other 4 regions. 121 122 char* DumpRegion::expand_top_to(char* newtop) { 123 assert(is_allocatable(), "must be initialized and not packed"); 124 assert(newtop >= _top, "must not grow backwards"); 125 if (newtop > _end) { 126 MetaspaceShared::report_out_of_space(_name, newtop - _top); 127 ShouldNotReachHere(); 128 } 129 130 if (_rs == MetaspaceShared::shared_rs()) { 131 uintx delta; 132 if (DynamicDumpSharedSpaces) { 133 delta = DynamicArchive::object_delta_uintx(newtop); 134 } else { 135 delta = MetaspaceShared::object_delta_uintx(newtop); 136 } 137 if (delta > MAX_SHARED_DELTA) { 138 // This is just a sanity check and should not appear in any real world usage. This 139 // happens only if you allocate more than 2GB of shared objects and would require 140 // millions of shared classes. 141 vm_exit_during_initialization("Out of memory in the CDS archive", 142 "Please reduce the number of shared classes."); 143 } 144 } 145 146 MetaspaceShared::commit_to(_rs, _vs, newtop); 147 _top = newtop; 148 return _top; 149 } 150 151 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 152 char* p = (char*)align_up(_top, alignment); 153 char* newtop = p + align_up(num_bytes, alignment); 154 expand_top_to(newtop); 155 memset(p, 0, newtop - p); 156 return p; 157 } 158 159 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 160 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 161 intptr_t *p = (intptr_t*)_top; 162 char* newtop = _top + sizeof(intptr_t); 163 expand_top_to(newtop); 164 *p = n; 165 if (need_to_mark) { 166 ArchivePtrMarker::mark_pointer(p); 167 } 168 } 169 170 void DumpRegion::print(size_t total_bytes) const { 171 log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 172 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 173 p2i(_base + MetaspaceShared::final_delta())); 174 } 175 176 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 177 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 178 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 179 if (strcmp(_name, failing_region) == 0) { 180 log_error(cds)(" required = %d", int(needed_bytes)); 181 } 182 } 183 184 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) { 185 _rs = rs; 186 _vs = vs; 187 // Start with 0 committed bytes. The memory will be committed as needed by 188 // MetaspaceShared::commit_to(). 189 if (!_vs->initialize(*_rs, 0)) { 190 fatal("Unable to allocate memory for shared space"); 191 } 192 _base = _top = _rs->base(); 193 _end = _rs->end(); 194 } 195 196 void DumpRegion::pack(DumpRegion* next) { 197 assert(!is_packed(), "sanity"); 198 _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment()); 199 _is_packed = true; 200 if (next != NULL) { 201 next->_rs = _rs; 202 next->_vs = _vs; 203 next->_base = next->_top = this->_end; 204 next->_end = _rs->end(); 205 } 206 } 207 208 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols"); 209 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 210 211 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) { 212 first_space->init(&_shared_rs, &_shared_vs); 213 } 214 215 DumpRegion* MetaspaceShared::misc_code_dump_space() { 216 return &_mc_region; 217 } 218 219 DumpRegion* MetaspaceShared::read_write_dump_space() { 220 return &_rw_region; 221 } 222 223 DumpRegion* MetaspaceShared::read_only_dump_space() { 224 return &_ro_region; 225 } 226 227 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 228 ReservedSpace* rs) { 229 current->pack(next); 230 } 231 232 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) { 233 return _symbol_region.allocate(num_bytes); 234 } 235 236 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 237 return _mc_region.allocate(num_bytes); 238 } 239 240 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 241 return _ro_region.allocate(num_bytes); 242 } 243 244 char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) { 245 return _rw_region.allocate(num_bytes); 246 } 247 248 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); } 249 250 static bool shared_base_valid(char* shared_base) { 251 #ifdef _LP64 252 return CompressedKlassPointers::is_valid_base((address)shared_base); 253 #else 254 return true; 255 #endif 256 } 257 258 static bool shared_base_too_high(char* shared_base, size_t cds_total) { 259 if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) { 260 // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so 261 // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around. 262 return true; 263 } 264 if (max_uintx - uintx(shared_base) < uintx(cds_total)) { 265 // The end of the archive will wrap around 266 return true; 267 } 268 269 return false; 270 } 271 272 static char* compute_shared_base(size_t cds_total) { 273 char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 274 const char* err = NULL; 275 if (shared_base_too_high(shared_base, cds_total)) { 276 err = "too high"; 277 } else if (!shared_base_valid(shared_base)) { 278 err = "invalid for this platform"; 279 } 280 if (err) { 281 log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT, 282 p2i((void*)SharedBaseAddress), err, 283 p2i((void*)Arguments::default_SharedBaseAddress())); 284 SharedBaseAddress = Arguments::default_SharedBaseAddress(); 285 shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 286 } 287 assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity"); 288 return shared_base; 289 } 290 291 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 292 assert(DumpSharedSpaces, "should be called for dump time only"); 293 294 const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment(); 295 296 #ifdef _LP64 297 // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1, 298 // will use that to house both the archives and the ccs. See below for 299 // details. 300 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 301 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 302 #else 303 // We don't support archives larger than 256MB on 32-bit due to limited 304 // virtual address space. 305 size_t cds_total = align_down(256*M, reserve_alignment); 306 #endif 307 308 char* shared_base = compute_shared_base(cds_total); 309 _requested_base_address = shared_base; 310 311 // Whether to use SharedBaseAddress as attach address. 312 bool use_requested_base = true; 313 314 if (shared_base == NULL) { 315 use_requested_base = false; 316 } 317 318 if (ArchiveRelocationMode == 1) { 319 log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); 320 use_requested_base = false; 321 } 322 323 // First try to reserve the space at the specified SharedBaseAddress. 324 assert(!_shared_rs.is_reserved(), "must be"); 325 if (use_requested_base) { 326 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 327 false /* large */, (char*)shared_base); 328 if (_shared_rs.is_reserved()) { 329 assert(_shared_rs.base() == shared_base, "should match"); 330 } else { 331 log_info(cds)("dumptime space reservation: failed to map at " 332 "SharedBaseAddress " PTR_FORMAT, p2i(shared_base)); 333 } 334 } 335 if (!_shared_rs.is_reserved()) { 336 // Get a reserved space anywhere if attaching at the SharedBaseAddress 337 // fails: 338 if (UseCompressedClassPointers) { 339 // If we need to reserve class space as well, let the platform handle 340 // the reservation. 341 LP64_ONLY(_shared_rs = 342 Metaspace::reserve_address_space_for_compressed_classes(cds_total);) 343 NOT_LP64(ShouldNotReachHere();) 344 } else { 345 // anywhere is fine. 346 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 347 false /* large */, (char*)NULL); 348 } 349 } 350 351 if (!_shared_rs.is_reserved()) { 352 vm_exit_during_initialization("Unable to reserve memory for shared space", 353 err_msg(SIZE_FORMAT " bytes.", cds_total)); 354 } 355 356 #ifdef _LP64 357 358 if (UseCompressedClassPointers) { 359 360 assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity"); 361 362 // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space 363 // must be allocated near the cds such as that the compressed Klass pointer 364 // encoding can be used to en/decode pointers from both cds and ccs. Since 365 // Metaspace cannot do this (it knows nothing about cds), we do it for 366 // Metaspace here and pass it the space to use for ccs. 367 // 368 // We do this by reserving space for the ccs behind the archives. Note 369 // however that ccs follows a different alignment 370 // (Metaspace::reserve_alignment), so there may be a gap between ccs and 371 // cds. 372 // We use a similar layout at runtime, see reserve_address_space_for_archives(). 373 // 374 // +-- SharedBaseAddress (default = 0x800000000) 375 // v 376 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 377 // | Heap | Archive | | MC | RW | RO | [gap] | class space | 378 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 379 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 380 // 381 // Note: ccs must follow the archives, and the archives must start at the 382 // encoding base. However, the exact placement of ccs does not matter as 383 // long as it it resides in the encoding range of CompressedKlassPointers 384 // and comes after the archive. 385 // 386 // We do this by splitting up the allocated 4G into 3G of archive space, 387 // followed by 1G for the ccs: 388 // + The upper 1 GB is used as the "temporary compressed class space" 389 // -- preload_classes() will store Klasses into this space. 390 // + The lower 3 GB is used for the archive -- when preload_classes() 391 // is done, ArchiveCompactor will copy the class metadata into this 392 // space, first the RW parts, then the RO parts. 393 394 // Starting address of ccs must be aligned to Metaspace::reserve_alignment()... 395 size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment()); 396 address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment()); 397 size_t archive_size = class_space_start - (address)_shared_rs.base(); 398 399 ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size); 400 _shared_rs = _shared_rs.first_part(archive_size); 401 402 // ... as does the size of ccs. 403 tmp_class_space = tmp_class_space.first_part(class_space_size); 404 CompressedClassSpaceSize = class_space_size; 405 406 // Let Metaspace initialize ccs 407 Metaspace::initialize_class_space(tmp_class_space); 408 409 // and set up CompressedKlassPointers encoding. 410 CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total); 411 412 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 413 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 414 415 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 416 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 417 418 assert(_shared_rs.end() == tmp_class_space.base() && 419 is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) && 420 is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) && 421 is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity"); 422 } 423 424 #endif 425 426 init_shared_dump_space(&_mc_region); 427 SharedBaseAddress = (size_t)_shared_rs.base(); 428 log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 429 _shared_rs.size(), p2i(_shared_rs.base())); 430 431 // We don't want any valid object to be at the very bottom of the archive. 432 // See ArchivePtrMarker::mark_pointer(). 433 MetaspaceShared::misc_code_space_alloc(16); 434 435 size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M); 436 _symbol_rs = ReservedSpace(symbol_rs_size); 437 if (!_symbol_rs.is_reserved()) { 438 vm_exit_during_initialization("Unable to reserve memory for symbols", 439 err_msg(SIZE_FORMAT " bytes.", symbol_rs_size)); 440 } 441 _symbol_region.init(&_symbol_rs, &_symbol_vs); 442 } 443 444 // Called by universe_post_init() 445 void MetaspaceShared::post_initialize(TRAPS) { 446 if (UseSharedSpaces) { 447 int size = FileMapInfo::get_number_of_shared_paths(); 448 if (size > 0) { 449 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 450 if (!DynamicDumpSharedSpaces) { 451 FileMapInfo* info; 452 if (FileMapInfo::dynamic_info() == NULL) { 453 info = FileMapInfo::current_info(); 454 } else { 455 info = FileMapInfo::dynamic_info(); 456 } 457 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 458 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 459 } 460 } 461 } 462 } 463 464 static GrowableArrayCHeap<Handle, mtClassShared>* _extra_interned_strings = NULL; 465 466 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 467 _extra_interned_strings = new GrowableArrayCHeap<Handle, mtClassShared>(10000); 468 469 HashtableTextDump reader(filename); 470 reader.check_version("VERSION: 1.0"); 471 472 while (reader.remain() > 0) { 473 int utf8_length; 474 int prefix_type = reader.scan_prefix(&utf8_length); 475 ResourceMark rm(THREAD); 476 if (utf8_length == 0x7fffffff) { 477 // buf_len will overflown 32-bit value. 478 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 479 } 480 int buf_len = utf8_length+1; 481 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 482 reader.get_utf8(utf8_buffer, utf8_length); 483 utf8_buffer[utf8_length] = '\0'; 484 485 if (prefix_type == HashtableTextDump::SymbolPrefix) { 486 SymbolTable::new_permanent_symbol(utf8_buffer); 487 } else{ 488 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 489 oop s = StringTable::intern(utf8_buffer, THREAD); 490 491 if (HAS_PENDING_EXCEPTION) { 492 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 493 reader.last_line_no(), utf8_length); 494 CLEAR_PENDING_EXCEPTION; 495 } else { 496 #if INCLUDE_G1GC 497 if (UseG1GC) { 498 typeArrayOop body = java_lang_String::value(s); 499 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 500 if (hr->is_humongous()) { 501 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 502 // to maximize free heap space and minimize fragmentation. 503 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 504 reader.last_line_no(), utf8_length); 505 continue; 506 } 507 } 508 #endif 509 // Interned strings are GC'ed if there are no references to it, so let's 510 // add a reference to keep this string alive. 511 assert(s != NULL, "must succeed"); 512 Handle h(THREAD, s); 513 _extra_interned_strings->append(h); 514 } 515 } 516 } 517 } 518 519 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) { 520 Arguments::assert_is_dumping_archive(); 521 char* base = rs->base(); 522 size_t need_committed_size = newtop - base; 523 size_t has_committed_size = vs->committed_size(); 524 if (need_committed_size < has_committed_size) { 525 return; 526 } 527 528 size_t min_bytes = need_committed_size - has_committed_size; 529 size_t preferred_bytes = 1 * M; 530 size_t uncommitted = vs->reserved_size() - has_committed_size; 531 532 size_t commit =MAX2(min_bytes, preferred_bytes); 533 commit = MIN2(commit, uncommitted); 534 assert(commit <= uncommitted, "sanity"); 535 536 bool result = vs->expand_by(commit, false); 537 if (rs == &_shared_rs) { 538 ArchivePtrMarker::expand_ptr_end((address*)vs->high()); 539 } 540 541 if (!result) { 542 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 543 need_committed_size)); 544 } 545 546 assert(rs == &_shared_rs || rs == &_symbol_rs, "must be"); 547 const char* which = (rs == &_shared_rs) ? "shared" : "symbol"; 548 log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 549 which, commit, vs->actual_committed_size(), vs->high()); 550 } 551 552 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { 553 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); 554 } 555 556 // Read/write a data stream for restoring/preserving metadata pointers and 557 // miscellaneous data from/to the shared archive file. 558 559 void MetaspaceShared::serialize(SerializeClosure* soc) { 560 int tag = 0; 561 soc->do_tag(--tag); 562 563 // Verify the sizes of various metadata in the system. 564 soc->do_tag(sizeof(Method)); 565 soc->do_tag(sizeof(ConstMethod)); 566 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 567 soc->do_tag(sizeof(ConstantPool)); 568 soc->do_tag(sizeof(ConstantPoolCache)); 569 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 570 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 571 soc->do_tag(sizeof(Symbol)); 572 573 // Dump/restore miscellaneous metadata. 574 JavaClasses::serialize_offsets(soc); 575 Universe::serialize(soc); 576 soc->do_tag(--tag); 577 578 // Dump/restore references to commonly used names and signatures. 579 vmSymbols::serialize(soc); 580 soc->do_tag(--tag); 581 582 // Dump/restore the symbol/string/subgraph_info tables 583 SymbolTable::serialize_shared_table_header(soc); 584 StringTable::serialize_shared_table_header(soc); 585 HeapShared::serialize_subgraph_info_table_header(soc); 586 SystemDictionaryShared::serialize_dictionary_headers(soc); 587 588 InstanceMirrorKlass::serialize_offsets(soc); 589 590 // Dump/restore well known classes (pointers) 591 SystemDictionaryShared::serialize_well_known_klasses(soc); 592 soc->do_tag(--tag); 593 594 serialize_cloned_cpp_vtptrs(soc); 595 soc->do_tag(--tag); 596 597 CDS_JAVA_HEAP_ONLY(ClassLoaderData::serialize(soc)); 598 599 soc->do_tag(666); 600 } 601 602 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 603 if (DumpSharedSpaces) { 604 if (_i2i_entry_code_buffers == NULL) { 605 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 606 _i2i_entry_code_buffers_size = total_size; 607 } 608 } else if (UseSharedSpaces) { 609 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 610 } else { 611 return NULL; 612 } 613 614 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 615 return _i2i_entry_code_buffers; 616 } 617 618 uintx MetaspaceShared::object_delta_uintx(void* obj) { 619 Arguments::assert_is_dumping_archive(); 620 if (DumpSharedSpaces) { 621 assert(shared_rs()->contains(obj), "must be"); 622 } else { 623 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 624 } 625 address base_address = address(SharedBaseAddress); 626 uintx deltax = address(obj) - base_address; 627 return deltax; 628 } 629 630 // Global object for holding classes that have been loaded. Since this 631 // is run at a safepoint just before exit, this is the entire set of classes. 632 static GrowableArray<Klass*>* _global_klass_objects; 633 634 static int global_klass_compare(Klass** a, Klass **b) { 635 return a[0]->name()->fast_compare(b[0]->name()); 636 } 637 638 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 639 return _global_klass_objects; 640 } 641 642 static void collect_array_classes(Klass* k) { 643 _global_klass_objects->append_if_missing(k); 644 if (k->is_array_klass()) { 645 // Add in the array classes too 646 ArrayKlass* ak = ArrayKlass::cast(k); 647 Klass* h = ak->higher_dimension(); 648 if (h != NULL) { 649 h->array_klasses_do(collect_array_classes); 650 } 651 } 652 } 653 654 class CollectClassesClosure : public KlassClosure { 655 void do_klass(Klass* k) { 656 if (k->is_instance_klass() && 657 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 658 // Don't add to the _global_klass_objects 659 } else { 660 _global_klass_objects->append_if_missing(k); 661 } 662 if (k->is_array_klass()) { 663 // Add in the array classes too 664 ArrayKlass* ak = ArrayKlass::cast(k); 665 Klass* h = ak->higher_dimension(); 666 if (h != NULL) { 667 h->array_klasses_do(collect_array_classes); 668 } 669 } 670 } 671 }; 672 673 // Global object for holding symbols that created during class loading. See SymbolTable::new_symbol 674 static GrowableArray<Symbol*>* _global_symbol_objects = NULL; 675 676 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 677 if (a[0] < b[0]) { 678 return -1; 679 } else if (a[0] == b[0]) { 680 ResourceMark rm; 681 log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string()); 682 return 0; 683 } else { 684 return 1; 685 } 686 } 687 688 void MetaspaceShared::add_symbol(Symbol* sym) { 689 MutexLocker ml(CDSAddSymbol_lock, Mutex::_no_safepoint_check_flag); 690 if (_global_symbol_objects == NULL) { 691 _global_symbol_objects = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Symbol*>(2048, mtSymbol); 692 } 693 _global_symbol_objects->append(sym); 694 } 695 696 GrowableArray<Symbol*>* MetaspaceShared::collected_symbols() { 697 return _global_symbol_objects; 698 } 699 700 static void remove_unshareable_in_classes() { 701 for (int i = 0; i < _global_klass_objects->length(); i++) { 702 Klass* k = _global_klass_objects->at(i); 703 if (!k->is_objArray_klass()) { 704 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 705 // on their array classes. 706 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 707 k->remove_unshareable_info(); 708 } 709 } 710 } 711 712 static void remove_java_mirror_in_classes() { 713 for (int i = 0; i < _global_klass_objects->length(); i++) { 714 Klass* k = _global_klass_objects->at(i); 715 if (!k->is_objArray_klass()) { 716 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 717 // on their array classes. 718 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 719 k->remove_java_mirror(); 720 } 721 } 722 } 723 724 static void clear_basic_type_mirrors() { 725 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 726 Universe::set_int_mirror(NULL); 727 Universe::set_float_mirror(NULL); 728 Universe::set_double_mirror(NULL); 729 Universe::set_byte_mirror(NULL); 730 Universe::set_bool_mirror(NULL); 731 Universe::set_char_mirror(NULL); 732 Universe::set_long_mirror(NULL); 733 Universe::set_short_mirror(NULL); 734 Universe::set_void_mirror(NULL); 735 } 736 737 static void rewrite_nofast_bytecode(const methodHandle& method) { 738 BytecodeStream bcs(method); 739 while (!bcs.is_last_bytecode()) { 740 Bytecodes::Code opcode = bcs.next(); 741 switch (opcode) { 742 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 743 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 744 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 745 case Bytecodes::_iload: { 746 if (!bcs.is_wide()) { 747 *bcs.bcp() = Bytecodes::_nofast_iload; 748 } 749 break; 750 } 751 default: break; 752 } 753 } 754 } 755 756 // Walk all methods in the class list to ensure that they won't be modified at 757 // run time. This includes: 758 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 759 // at run time by RewriteBytecodes/RewriteFrequentPairs 760 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 761 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) { 762 for (int i = 0; i < _global_klass_objects->length(); i++) { 763 Klass* k = _global_klass_objects->at(i); 764 if (k->is_instance_klass()) { 765 InstanceKlass* ik = InstanceKlass::cast(k); 766 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik); 767 } 768 } 769 } 770 771 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) { 772 for (int i = 0; i < ik->methods()->length(); i++) { 773 methodHandle m(thread, ik->methods()->at(i)); 774 rewrite_nofast_bytecode(m); 775 Fingerprinter fp(m); 776 // The side effect of this call sets method's fingerprint field. 777 fp.fingerprint(); 778 } 779 } 780 781 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 782 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 783 // 784 // Addresses of the vtables and the methods may be different across JVM runs, 785 // if libjvm.so is dynamically loaded at a different base address. 786 // 787 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 788 // 789 // + at dump time: we redirect the _vptr to point to our own vtables inside 790 // the CDS image 791 // + at run time: we clone the actual contents of the vtables from libjvm.so 792 // into our own tables. 793 794 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 795 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 796 f(ConstantPool) \ 797 f(InstanceKlass) \ 798 f(InstanceClassLoaderKlass) \ 799 f(InstanceMirrorKlass) \ 800 f(InstanceRefKlass) \ 801 f(Method) \ 802 f(ObjArrayKlass) \ 803 f(TypeArrayKlass) 804 805 class CppVtableInfo { 806 intptr_t _vtable_size; 807 intptr_t _cloned_vtable[1]; 808 public: 809 static int num_slots(int vtable_size) { 810 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 811 } 812 int vtable_size() { return int(uintx(_vtable_size)); } 813 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 814 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 815 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 816 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 817 static size_t byte_size(int vtable_size) { 818 CppVtableInfo i; 819 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 820 } 821 }; 822 823 static inline intptr_t* vtable_of(Metadata* m) { 824 return *((intptr_t**)m); 825 } 826 827 template <class T> class CppVtableCloner : public T { 828 static CppVtableInfo* _info; 829 830 static int get_vtable_length(const char* name); 831 832 public: 833 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 834 static intptr_t* allocate(const char* name); 835 836 // Clone the vtable to ... 837 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 838 839 static void zero_vtable_clone() { 840 assert(DumpSharedSpaces, "dump-time only"); 841 _info->zero(); 842 } 843 844 static bool is_valid_shared_object(const T* obj) { 845 intptr_t* vptr = *(intptr_t**)obj; 846 return vptr == _info->cloned_vtable(); 847 } 848 849 static void init_orig_cpp_vtptr(int kind); 850 }; 851 852 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 853 854 template <class T> 855 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 856 assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment"); 857 int n = get_vtable_length(name); 858 _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 859 _info->set_vtable_size(n); 860 861 intptr_t* p = clone_vtable(name, _info); 862 assert((char*)p == _mc_region.top(), "must be"); 863 864 return _info->cloned_vtable(); 865 } 866 867 template <class T> 868 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 869 if (!DumpSharedSpaces) { 870 assert(_info == 0, "_info is initialized only at dump time"); 871 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 872 } 873 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 874 int n = info->vtable_size(); 875 intptr_t* srcvtable = vtable_of(&tmp); 876 intptr_t* dstvtable = info->cloned_vtable(); 877 878 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 879 // safe to do memcpy. 880 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 881 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 882 return dstvtable + n; 883 } 884 885 // To determine the size of the vtable for each type, we use the following 886 // trick by declaring 2 subclasses: 887 // 888 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 889 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 890 // 891 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 892 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 893 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 894 // - Their last entry is different. 895 // 896 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 897 // and find the first entry that's different. 898 // 899 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 900 // esoteric compilers. 901 902 template <class T> class CppVtableTesterB: public T { 903 public: 904 virtual int last_virtual_method() {return 1;} 905 }; 906 907 template <class T> class CppVtableTesterA : public T { 908 public: 909 virtual void* last_virtual_method() { 910 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 911 // compiler/linker won't alias the two functions. 912 return NULL; 913 } 914 }; 915 916 template <class T> 917 int CppVtableCloner<T>::get_vtable_length(const char* name) { 918 CppVtableTesterA<T> a; 919 CppVtableTesterB<T> b; 920 921 intptr_t* avtable = vtable_of(&a); 922 intptr_t* bvtable = vtable_of(&b); 923 924 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 925 int vtable_len = 1; 926 for (; ; vtable_len++) { 927 if (avtable[vtable_len] != bvtable[vtable_len]) { 928 break; 929 } 930 } 931 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 932 933 return vtable_len; 934 } 935 936 #define ALLOC_CPP_VTABLE_CLONE(c) \ 937 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \ 938 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); 939 940 #define CLONE_CPP_VTABLE(c) \ 941 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 942 943 #define ZERO_CPP_VTABLE(c) \ 944 CppVtableCloner<c>::zero_vtable_clone(); 945 946 #define INIT_ORIG_CPP_VTPTRS(c) \ 947 CppVtableCloner<c>::init_orig_cpp_vtptr(c##_Kind); 948 949 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 950 951 enum ClonedVtableKind { 952 // E.g., ConstantPool_Kind == 0, InstanceKlass_Kind == 1, etc. 953 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 954 _num_cloned_vtable_kinds 955 }; 956 957 // This is a map of all the original vtptrs. E.g., for 958 // ConstantPool *cp = new (...) ConstantPool(...) ; // a dynamically allocated constant pool 959 // the following holds true: 960 // _orig_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 961 static intptr_t* _orig_cpp_vtptrs[_num_cloned_vtable_kinds]; 962 static bool _orig_cpp_vtptrs_inited = false; 963 964 template <class T> 965 void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) { 966 assert(kind < _num_cloned_vtable_kinds, "sanity"); 967 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 968 intptr_t* srcvtable = vtable_of(&tmp); 969 _orig_cpp_vtptrs[kind] = srcvtable; 970 } 971 972 // This is the index of all the cloned vtables. E.g., for 973 // ConstantPool* cp = ....; // an archived constant pool 974 // InstanceKlass* ik = ....;// an archived class 975 // the following holds true: 976 // _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 977 // _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0] 978 static intptr_t** _cloned_cpp_vtptrs = NULL; 979 980 void MetaspaceShared::allocate_cloned_cpp_vtptrs() { 981 assert(DumpSharedSpaces, "must"); 982 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 983 _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 984 } 985 986 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 987 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 988 } 989 990 intptr_t* MetaspaceShared::get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj) { 991 if (!_orig_cpp_vtptrs_inited) { 992 CPP_VTABLE_PATCH_TYPES_DO(INIT_ORIG_CPP_VTPTRS); 993 _orig_cpp_vtptrs_inited = true; 994 } 995 996 Arguments::assert_is_dumping_archive(); 997 int kind = -1; 998 switch (msotype) { 999 case MetaspaceObj::SymbolType: 1000 case MetaspaceObj::TypeArrayU1Type: 1001 case MetaspaceObj::TypeArrayU2Type: 1002 case MetaspaceObj::TypeArrayU4Type: 1003 case MetaspaceObj::TypeArrayU8Type: 1004 case MetaspaceObj::TypeArrayOtherType: 1005 case MetaspaceObj::ConstMethodType: 1006 case MetaspaceObj::ConstantPoolCacheType: 1007 case MetaspaceObj::AnnotationsType: 1008 case MetaspaceObj::MethodCountersType: 1009 case MetaspaceObj::RecordComponentType: 1010 // These have no vtables. 1011 break; 1012 case MetaspaceObj::MethodDataType: 1013 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 1014 ShouldNotReachHere(); 1015 break; 1016 default: 1017 for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) { 1018 if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind]) { 1019 break; 1020 } 1021 } 1022 if (kind >= _num_cloned_vtable_kinds) { 1023 fatal("Cannot find C++ vtable for " INTPTR_FORMAT " -- you probably added" 1024 " a new subtype of Klass or MetaData without updating CPP_VTABLE_PATCH_TYPES_DO", 1025 p2i(obj)); 1026 } 1027 } 1028 1029 if (kind >= 0) { 1030 assert(kind < _num_cloned_vtable_kinds, "must be"); 1031 return _cloned_cpp_vtptrs[kind]; 1032 } else { 1033 return NULL; 1034 } 1035 } 1036 1037 // This can be called at both dump time and run time: 1038 // - clone the contents of the c++ vtables into the space 1039 // allocated by allocate_cpp_vtable_clones() 1040 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 1041 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 1042 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 1043 } 1044 1045 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 1046 assert(DumpSharedSpaces, "dump-time only"); 1047 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 1048 } 1049 1050 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 1051 char* MetaspaceShared::allocate_cpp_vtable_clones() { 1052 char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables 1053 1054 assert(DumpSharedSpaces, "dump-time only"); 1055 // Layout (each slot is a intptr_t): 1056 // [number of slots in the first vtable = n1] 1057 // [ <n1> slots for the first vtable] 1058 // [number of slots in the first second = n2] 1059 // [ <n2> slots for the second vtable] 1060 // ... 1061 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 1062 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 1063 1064 return cloned_vtables; 1065 } 1066 1067 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 1068 assert(is_in_shared_metaspace(m), "must be"); 1069 return CppVtableCloner<Method>::is_valid_shared_object(m); 1070 } 1071 1072 void WriteClosure::do_oop(oop* o) { 1073 if (*o == NULL) { 1074 _dump_region->append_intptr_t(0); 1075 } else { 1076 assert(HeapShared::is_heap_object_archiving_allowed(), 1077 "Archiving heap object is not allowed"); 1078 _dump_region->append_intptr_t( 1079 (intptr_t)CompressedOops::encode_not_null(*o)); 1080 } 1081 } 1082 1083 void WriteClosure::do_region(u_char* start, size_t size) { 1084 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1085 assert(size % sizeof(intptr_t) == 0, "bad size"); 1086 do_tag((int)size); 1087 while (size > 0) { 1088 _dump_region->append_intptr_t(*(intptr_t*)start, true); 1089 start += sizeof(intptr_t); 1090 size -= sizeof(intptr_t); 1091 } 1092 } 1093 1094 // This is for dumping detailed statistics for the allocations 1095 // in the shared spaces. 1096 class DumpAllocStats : public ResourceObj { 1097 public: 1098 1099 // Here's poor man's enum inheritance 1100 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 1101 METASPACE_OBJ_TYPES_DO(f) \ 1102 f(SymbolHashentry) \ 1103 f(SymbolBucket) \ 1104 f(StringHashentry) \ 1105 f(StringBucket) \ 1106 f(ModulesNatives) \ 1107 f(Other) 1108 1109 enum Type { 1110 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 1111 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 1112 _number_of_types 1113 }; 1114 1115 static const char * type_name(Type type) { 1116 switch(type) { 1117 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 1118 default: 1119 ShouldNotReachHere(); 1120 return NULL; 1121 } 1122 } 1123 1124 public: 1125 enum { RO = 0, RW = 1 }; 1126 1127 int _counts[2][_number_of_types]; 1128 int _bytes [2][_number_of_types]; 1129 1130 DumpAllocStats() { 1131 memset(_counts, 0, sizeof(_counts)); 1132 memset(_bytes, 0, sizeof(_bytes)); 1133 }; 1134 1135 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 1136 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 1137 int which = (read_only) ? RO : RW; 1138 _counts[which][type] ++; 1139 _bytes [which][type] += byte_size; 1140 } 1141 1142 void record_modules(int byte_size, bool read_only) { 1143 int which = (read_only) ? RO : RW; 1144 _bytes [which][ModulesNativesType] += byte_size; 1145 } 1146 1147 void record_other_type(int byte_size, bool read_only) { 1148 int which = (read_only) ? RO : RW; 1149 _bytes [which][OtherType] += byte_size; 1150 } 1151 void print_stats(int ro_all, int rw_all, int mc_all); 1152 }; 1153 1154 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { 1155 // Calculate size of data that was not allocated by Metaspace::allocate() 1156 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1157 1158 // symbols 1159 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1160 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1161 1162 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1163 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1164 1165 // strings 1166 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1167 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1168 1169 _counts[RO][StringBucketType] = stats->string.bucket_count; 1170 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1171 1172 // TODO: count things like dictionary, vtable, etc 1173 _bytes[RW][OtherType] += mc_all; 1174 rw_all += mc_all; // mc is mapped Read/Write 1175 1176 // prevent divide-by-zero 1177 if (ro_all < 1) { 1178 ro_all = 1; 1179 } 1180 if (rw_all < 1) { 1181 rw_all = 1; 1182 } 1183 1184 int all_ro_count = 0; 1185 int all_ro_bytes = 0; 1186 int all_rw_count = 0; 1187 int all_rw_bytes = 0; 1188 1189 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1190 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1191 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1192 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1193 1194 LogMessage(cds) msg; 1195 1196 msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):"); 1197 msg.debug("%s", hdr); 1198 msg.debug("%s", sep); 1199 for (int type = 0; type < int(_number_of_types); type ++) { 1200 const char *name = type_name((Type)type); 1201 int ro_count = _counts[RO][type]; 1202 int ro_bytes = _bytes [RO][type]; 1203 int rw_count = _counts[RW][type]; 1204 int rw_bytes = _bytes [RW][type]; 1205 int count = ro_count + rw_count; 1206 int bytes = ro_bytes + rw_bytes; 1207 1208 double ro_perc = percent_of(ro_bytes, ro_all); 1209 double rw_perc = percent_of(rw_bytes, rw_all); 1210 double perc = percent_of(bytes, ro_all + rw_all); 1211 1212 msg.debug(fmt_stats, name, 1213 ro_count, ro_bytes, ro_perc, 1214 rw_count, rw_bytes, rw_perc, 1215 count, bytes, perc); 1216 1217 all_ro_count += ro_count; 1218 all_ro_bytes += ro_bytes; 1219 all_rw_count += rw_count; 1220 all_rw_bytes += rw_bytes; 1221 } 1222 1223 int all_count = all_ro_count + all_rw_count; 1224 int all_bytes = all_ro_bytes + all_rw_bytes; 1225 1226 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1227 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1228 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1229 1230 msg.debug("%s", sep); 1231 msg.debug(fmt_stats, "Total", 1232 all_ro_count, all_ro_bytes, all_ro_perc, 1233 all_rw_count, all_rw_bytes, all_rw_perc, 1234 all_count, all_bytes, all_perc); 1235 1236 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1237 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1238 1239 #undef fmt_stats 1240 } 1241 1242 // Populate the shared space. 1243 1244 class VM_PopulateDumpSharedSpace: public VM_Operation { 1245 private: 1246 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1247 GrowableArray<MemRegion> *_open_archive_heap_regions; 1248 1249 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1250 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1251 1252 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1253 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1254 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1255 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1256 void dump_symbols(); 1257 char* dump_read_only_tables(); 1258 void print_class_stats(); 1259 void print_region_stats(FileMapInfo* map_info); 1260 void print_bitmap_region_stats(size_t size, size_t total_size); 1261 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1262 const char *name, size_t total_size); 1263 void relocate_to_requested_base_address(CHeapBitMap* ptrmap); 1264 1265 public: 1266 1267 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1268 void doit(); // outline because gdb sucks 1269 bool allow_nested_vm_operations() const { return true; } 1270 }; // class VM_PopulateDumpSharedSpace 1271 1272 // ArchiveCompactor -- 1273 // 1274 // This class is the central piece of shared archive compaction -- all metaspace data are 1275 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1276 // metaspace data into their final location in the shared regions. 1277 1278 class ArchiveCompactor : AllStatic { 1279 static const int INITIAL_TABLE_SIZE = 8087; 1280 static const int MAX_TABLE_SIZE = 1000000; 1281 1282 static DumpAllocStats* _alloc_stats; 1283 1284 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1285 static RelocationTable* _new_loc_table; 1286 1287 public: 1288 static void initialize() { 1289 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1290 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1291 } 1292 static DumpAllocStats* alloc_stats() { 1293 return _alloc_stats; 1294 } 1295 1296 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1297 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1298 // that are allocated in the RO space. 1299 class OtherROAllocMark { 1300 char* _oldtop; 1301 public: 1302 OtherROAllocMark() { 1303 _oldtop = _ro_region.top(); 1304 } 1305 ~OtherROAllocMark() { 1306 char* newtop = _ro_region.top(); 1307 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1308 } 1309 }; 1310 1311 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1312 address obj = ref->obj(); 1313 int bytes = ref->size() * BytesPerWord; 1314 char* p; 1315 size_t alignment = BytesPerWord; 1316 char* oldtop; 1317 char* newtop; 1318 1319 if (read_only) { 1320 oldtop = _ro_region.top(); 1321 p = _ro_region.allocate(bytes, alignment); 1322 newtop = _ro_region.top(); 1323 } else { 1324 oldtop = _rw_region.top(); 1325 if (ref->msotype() == MetaspaceObj::ClassType) { 1326 // Save a pointer immediate in front of an InstanceKlass, so 1327 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1328 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1329 // in systemDictionaryShared.cpp. 1330 Klass* klass = (Klass*)obj; 1331 if (klass->is_instance_klass()) { 1332 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1333 _rw_region.allocate(sizeof(address), BytesPerWord); 1334 } 1335 } 1336 p = _rw_region.allocate(bytes, alignment); 1337 newtop = _rw_region.top(); 1338 } 1339 memcpy(p, obj, bytes); 1340 1341 intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)p); 1342 if (archived_vtable != NULL) { 1343 *(address*)p = (address)archived_vtable; 1344 ArchivePtrMarker::mark_pointer((address*)p); 1345 } 1346 1347 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1348 _new_loc_table->add(obj, (address)p); 1349 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1350 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1351 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1352 } 1353 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1354 } 1355 1356 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1357 address* pp = _new_loc_table->lookup(ref->obj()); 1358 assert(pp != NULL, "must be"); 1359 return *pp; 1360 } 1361 1362 private: 1363 // Makes a shallow copy of visited MetaspaceObj's 1364 class ShallowCopier: public UniqueMetaspaceClosure { 1365 bool _read_only; 1366 public: 1367 ShallowCopier(bool read_only) : _read_only(read_only) {} 1368 1369 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1370 if (read_only == _read_only) { 1371 allocate(ref, read_only); 1372 } 1373 return true; // recurse into ref.obj() 1374 } 1375 }; 1376 1377 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1378 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1379 public: 1380 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1381 address new_loc = get_new_loc(ref); 1382 RefRelocator refer; 1383 ref->metaspace_pointers_do_at(&refer, new_loc); 1384 return true; // recurse into ref.obj() 1385 } 1386 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 1387 assert(type == _method_entry_ref, "only special type allowed for now"); 1388 address obj = ref->obj(); 1389 address new_obj = get_new_loc(ref); 1390 size_t offset = pointer_delta(p, obj, sizeof(u1)); 1391 intptr_t* new_p = (intptr_t*)(new_obj + offset); 1392 assert(*p == *new_p, "must be a copy"); 1393 ArchivePtrMarker::mark_pointer((address*)new_p); 1394 } 1395 }; 1396 1397 // Relocate a reference to point to its shallow copy 1398 class RefRelocator: public MetaspaceClosure { 1399 public: 1400 virtual bool do_ref(Ref* ref, bool read_only) { 1401 if (ref->not_null()) { 1402 ref->update(get_new_loc(ref)); 1403 ArchivePtrMarker::mark_pointer(ref->addr()); 1404 } 1405 return false; // Do not recurse. 1406 } 1407 }; 1408 1409 #ifdef ASSERT 1410 class IsRefInArchiveChecker: public MetaspaceClosure { 1411 public: 1412 virtual bool do_ref(Ref* ref, bool read_only) { 1413 if (ref->not_null()) { 1414 char* obj = (char*)ref->obj(); 1415 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1416 "must be relocated to point to CDS archive"); 1417 } 1418 return false; // Do not recurse. 1419 } 1420 }; 1421 #endif 1422 1423 public: 1424 static void copy_and_compact() { 1425 ResourceMark rm; 1426 1427 log_info(cds)("Scanning all metaspace objects ... "); 1428 { 1429 // allocate and shallow-copy RW objects, immediately following the MC region 1430 log_info(cds)("Allocating RW objects ... "); 1431 _mc_region.pack(&_rw_region); 1432 1433 ResourceMark rm; 1434 ShallowCopier rw_copier(false); 1435 iterate_roots(&rw_copier); 1436 1437 #if INCLUDE_CDS_JAVA_HEAP 1438 // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders 1439 char* start = _rw_region.top(); 1440 ClassLoaderData::allocate_archived_tables(); 1441 ArchiveCompactor::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false); 1442 #endif 1443 } 1444 { 1445 // allocate and shallow-copy of RO object, immediately following the RW region 1446 log_info(cds)("Allocating RO objects ... "); 1447 _rw_region.pack(&_ro_region); 1448 1449 ResourceMark rm; 1450 ShallowCopier ro_copier(true); 1451 iterate_roots(&ro_copier); 1452 #if INCLUDE_CDS_JAVA_HEAP 1453 char* start = _ro_region.top(); 1454 ClassLoaderData::init_archived_tables(); 1455 ArchiveCompactor::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true); 1456 #endif 1457 } 1458 { 1459 log_info(cds)("Relocating embedded pointers ... "); 1460 ResourceMark rm; 1461 ShallowCopyEmbeddedRefRelocator emb_reloc; 1462 iterate_roots(&emb_reloc); 1463 } 1464 { 1465 log_info(cds)("Relocating external roots ... "); 1466 ResourceMark rm; 1467 RefRelocator ext_reloc; 1468 iterate_roots(&ext_reloc); 1469 } 1470 { 1471 log_info(cds)("Fixing symbol identity hash ... "); 1472 os::init_random(0x12345678); 1473 GrowableArray<Symbol*>* all_symbols = MetaspaceShared::collected_symbols(); 1474 all_symbols->sort(compare_symbols_by_address); 1475 for (int i = 0; i < all_symbols->length(); i++) { 1476 assert(all_symbols->at(i)->is_permanent(), "archived symbols must be permanent"); 1477 all_symbols->at(i)->update_identity_hash(); 1478 } 1479 } 1480 #ifdef ASSERT 1481 { 1482 log_info(cds)("Verifying external roots ... "); 1483 ResourceMark rm; 1484 IsRefInArchiveChecker checker; 1485 iterate_roots(&checker); 1486 } 1487 #endif 1488 } 1489 1490 // We must relocate the System::_well_known_klasses only after we have copied the 1491 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1492 // old objects which assert that their klass is the original klass. 1493 static void relocate_well_known_klasses() { 1494 { 1495 log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... "); 1496 ResourceMark rm; 1497 RefRelocator ext_reloc; 1498 SystemDictionary::well_known_klasses_do(&ext_reloc); 1499 } 1500 // NOTE: after this point, we shouldn't have any globals that can reach the old 1501 // objects. 1502 1503 // We cannot use any of the objects in the heap anymore (except for the 1504 // shared strings) because their headers no longer point to valid Klasses. 1505 } 1506 1507 static void iterate_roots(MetaspaceClosure* it) { 1508 // To ensure deterministic contents in the archive, we just need to ensure that 1509 // we iterate the MetsapceObjs in a deterministic order. It doesn't matter where 1510 // the MetsapceObjs are located originally, as they are copied sequentially into 1511 // the archive during the iteration. 1512 // 1513 // The only issue here is that the symbol table and the system directories may be 1514 // randomly ordered, so we copy the symbols and klasses into two arrays and sort 1515 // them deterministically. 1516 // 1517 // During -Xshare:dump, the order of Symbol creation is strictly determined by 1518 // the SharedClassListFile (class loading is done in a single thread and the JIT 1519 // is disabled). Also, Symbols are allocated in monotonically increasing addresses 1520 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by 1521 // ascending address order, we ensure that all Symbols are copied into deterministic 1522 // locations in the archive. 1523 GrowableArray<Symbol*>* symbols = _global_symbol_objects; 1524 for (int i = 0; i < symbols->length(); i++) { 1525 it->push(symbols->adr_at(i)); 1526 } 1527 if (_global_klass_objects != NULL) { 1528 // Need to fix up the pointers 1529 for (int i = 0; i < _global_klass_objects->length(); i++) { 1530 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1531 it->push(_global_klass_objects->adr_at(i)); 1532 } 1533 } 1534 FileMapInfo::metaspace_pointers_do(it, false); 1535 SystemDictionaryShared::dumptime_classes_do(it); 1536 Universe::metaspace_pointers_do(it); 1537 SymbolTable::metaspace_pointers_do(it); 1538 vmSymbols::metaspace_pointers_do(it); 1539 1540 it->finish(); 1541 } 1542 1543 static Klass* get_relocated_klass(Klass* orig_klass) { 1544 assert(DumpSharedSpaces, "dump time only"); 1545 address* pp = _new_loc_table->lookup((address)orig_klass); 1546 assert(pp != NULL, "must be"); 1547 Klass* klass = (Klass*)(*pp); 1548 assert(klass->is_klass(), "must be"); 1549 return klass; 1550 } 1551 1552 static Symbol* get_relocated_symbol(Symbol* orig_symbol) { 1553 assert(DumpSharedSpaces, "dump time only"); 1554 address* pp = _new_loc_table->lookup((address)orig_symbol); 1555 assert(pp != NULL, "must be"); 1556 return (Symbol*)(*pp); 1557 } 1558 }; 1559 1560 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1561 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1562 1563 void VM_PopulateDumpSharedSpace::dump_symbols() { 1564 log_info(cds)("Dumping symbol table ..."); 1565 1566 NOT_PRODUCT(SymbolTable::verify()); 1567 SymbolTable::write_to_archive(); 1568 } 1569 1570 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1571 ArchiveCompactor::OtherROAllocMark mark; 1572 1573 log_info(cds)("Removing java_mirror ... "); 1574 if (!HeapShared::is_heap_object_archiving_allowed()) { 1575 clear_basic_type_mirrors(); 1576 } 1577 remove_java_mirror_in_classes(); 1578 log_info(cds)("done. "); 1579 1580 SystemDictionaryShared::write_to_archive(); 1581 1582 // Write the other data to the output array. 1583 char* start = _ro_region.top(); 1584 WriteClosure wc(&_ro_region); 1585 MetaspaceShared::serialize(&wc); 1586 1587 // Write the bitmaps for patching the archive heap regions 1588 _closed_archive_heap_oopmaps = NULL; 1589 _open_archive_heap_oopmaps = NULL; 1590 dump_archive_heap_oopmaps(); 1591 1592 return start; 1593 } 1594 1595 void VM_PopulateDumpSharedSpace::print_class_stats() { 1596 log_info(cds)("Number of classes %d", _global_klass_objects->length()); 1597 { 1598 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1599 for (int i = 0; i < _global_klass_objects->length(); i++) { 1600 Klass* k = _global_klass_objects->at(i); 1601 if (k->is_instance_klass()) { 1602 num_inst ++; 1603 } else if (k->is_objArray_klass()) { 1604 num_obj_array ++; 1605 } else { 1606 assert(k->is_typeArray_klass(), "sanity"); 1607 num_type_array ++; 1608 } 1609 } 1610 log_info(cds)(" instance classes = %5d", num_inst); 1611 log_info(cds)(" obj array classes = %5d", num_obj_array); 1612 log_info(cds)(" type array classes = %5d", num_type_array); 1613 } 1614 } 1615 1616 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) { 1617 intx addr_delta = MetaspaceShared::final_delta(); 1618 if (addr_delta == 0) { 1619 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top()); 1620 } else { 1621 // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR). 1622 // This means that the current content of the archive is based on a random 1623 // address. Let's relocate all the pointers, so that it can be mapped to 1624 // MetaspaceShared::requested_base_address() without runtime relocation. 1625 // 1626 // Note: both the base and dynamic archive are written with 1627 // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address() 1628 1629 // Patch all pointers that are marked by ptrmap within this region, 1630 // where we have just dumped all the metaspace data. 1631 address patch_base = (address)SharedBaseAddress; 1632 address patch_end = (address)_ro_region.top(); 1633 size_t size = patch_end - patch_base; 1634 1635 // the current value of the pointers to be patched must be within this 1636 // range (i.e., must point to valid metaspace objects) 1637 address valid_old_base = patch_base; 1638 address valid_old_end = patch_end; 1639 1640 // after patching, the pointers must point inside this range 1641 // (the requested location of the archive, as mapped at runtime). 1642 address valid_new_base = (address)MetaspaceShared::requested_base_address(); 1643 address valid_new_end = valid_new_base + size; 1644 1645 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " 1646 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), 1647 p2i(valid_new_base), p2i(valid_new_end)); 1648 1649 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1650 valid_new_base, valid_new_end, addr_delta, ptrmap); 1651 ptrmap->iterate(&patcher); 1652 ArchivePtrMarker::compact(patcher.max_non_null_offset()); 1653 } 1654 } 1655 1656 void VM_PopulateDumpSharedSpace::doit() { 1657 HeapShared::run_full_gc_in_vm_thread(); 1658 CHeapBitMap ptrmap; 1659 MetaspaceShared::initialize_ptr_marker(&ptrmap); 1660 1661 // We should no longer allocate anything from the metaspace, so that: 1662 // 1663 // (1) Metaspace::allocate might trigger GC if we have run out of 1664 // committed metaspace, but we can't GC because we're running 1665 // in the VM thread. 1666 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1667 Metaspace::freeze(); 1668 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1669 1670 Thread* THREAD = VMThread::vm_thread(); 1671 1672 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1673 1674 NOT_PRODUCT(SystemDictionary::verify();) 1675 // The following guarantee is meant to ensure that no loader constraints 1676 // exist yet, since the constraints table is not shared. This becomes 1677 // more important now that we don't re-initialize vtables/itables for 1678 // shared classes at runtime, where constraints were previously created. 1679 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1680 "loader constraints are not saved"); 1681 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1682 "placeholders are not saved"); 1683 1684 // At this point, many classes have been loaded. 1685 // Gather systemDictionary classes in a global array and do everything to 1686 // that so we don't have to walk the SystemDictionary again. 1687 SystemDictionaryShared::check_excluded_classes(); 1688 _global_klass_objects = new GrowableArray<Klass*>(1000); 1689 CollectClassesClosure collect_classes; 1690 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1691 _global_klass_objects->sort(global_klass_compare); 1692 1693 print_class_stats(); 1694 1695 // Ensure the ConstMethods won't be modified at run-time 1696 log_info(cds)("Updating ConstMethods ... "); 1697 rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD); 1698 log_info(cds)("done. "); 1699 1700 // Remove all references outside the metadata 1701 log_info(cds)("Removing unshareable information ... "); 1702 remove_unshareable_in_classes(); 1703 log_info(cds)("done. "); 1704 1705 MetaspaceShared::allocate_cloned_cpp_vtptrs(); 1706 char* cloned_vtables = _mc_region.top(); 1707 MetaspaceShared::allocate_cpp_vtable_clones(); 1708 1709 ArchiveCompactor::initialize(); 1710 ArchiveCompactor::copy_and_compact(); 1711 1712 dump_symbols(); 1713 1714 // Dump supported java heap objects 1715 _closed_archive_heap_regions = NULL; 1716 _open_archive_heap_regions = NULL; 1717 dump_java_heap_objects(); 1718 1719 ArchiveCompactor::relocate_well_known_klasses(); 1720 1721 char* serialized_data = dump_read_only_tables(); 1722 _ro_region.pack(); 1723 1724 // The vtable clones contain addresses of the current process. 1725 // We don't want to write these addresses into the archive. Same for i2i buffer. 1726 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1727 memset(MetaspaceShared::i2i_entry_code_buffers(), 0, 1728 MetaspaceShared::i2i_entry_code_buffers_size()); 1729 1730 // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address() 1731 // without runtime relocation. 1732 relocate_to_requested_base_address(&ptrmap); 1733 1734 // Create and write the archive file that maps the shared spaces. 1735 1736 FileMapInfo* mapinfo = new FileMapInfo(true); 1737 mapinfo->populate_header(os::vm_allocation_granularity()); 1738 mapinfo->set_serialized_data(serialized_data); 1739 mapinfo->set_cloned_vtables(cloned_vtables); 1740 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1741 MetaspaceShared::i2i_entry_code_buffers_size()); 1742 mapinfo->open_for_write(); 1743 MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps); 1744 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1745 _closed_archive_heap_regions, 1746 _closed_archive_heap_oopmaps, 1747 MetaspaceShared::first_closed_archive_heap_region, 1748 MetaspaceShared::max_closed_archive_heap_region); 1749 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1750 _open_archive_heap_regions, 1751 _open_archive_heap_oopmaps, 1752 MetaspaceShared::first_open_archive_heap_region, 1753 MetaspaceShared::max_open_archive_heap_region); 1754 1755 mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address()); 1756 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1757 mapinfo->write_header(); 1758 print_region_stats(mapinfo); 1759 mapinfo->close(); 1760 1761 if (log_is_enabled(Info, cds)) { 1762 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1763 int(_mc_region.used())); 1764 } 1765 1766 if (PrintSystemDictionaryAtExit) { 1767 SystemDictionary::print(); 1768 } 1769 1770 if (AllowArchivingWithJavaAgent) { 1771 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1772 "for testing purposes only and should not be used in a production environment"); 1773 } 1774 1775 // There may be other pending VM operations that operate on the InstanceKlasses, 1776 // which will fail because InstanceKlasses::remove_unshareable_info() 1777 // has been called. Forget these operations and exit the VM directly. 1778 vm_direct_exit(0); 1779 } 1780 1781 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) { 1782 // Print statistics of all the regions 1783 const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used(); 1784 const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned(); 1785 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1786 _mc_region.reserved() + 1787 bitmap_reserved + 1788 _total_closed_archive_region_size + 1789 _total_open_archive_region_size; 1790 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1791 _mc_region.used() + 1792 bitmap_used + 1793 _total_closed_archive_region_size + 1794 _total_open_archive_region_size; 1795 const double total_u_perc = percent_of(total_bytes, total_reserved); 1796 1797 _mc_region.print(total_reserved); 1798 _rw_region.print(total_reserved); 1799 _ro_region.print(total_reserved); 1800 print_bitmap_region_stats(bitmap_used, total_reserved); 1801 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1802 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1803 1804 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1805 total_bytes, total_reserved, total_u_perc); 1806 } 1807 1808 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { 1809 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", 1810 size, size/double(total_size)*100.0, size); 1811 } 1812 1813 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1814 const char *name, size_t total_size) { 1815 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1816 for (int i = 0; i < arr_len; i++) { 1817 char* start = (char*)heap_mem->at(i).start(); 1818 size_t size = heap_mem->at(i).byte_size(); 1819 char* top = start + size; 1820 log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1821 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1822 1823 } 1824 } 1825 1826 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo, 1827 GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps, 1828 GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) { 1829 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with 1830 // MetaspaceShared::n_regions (internal to hotspot). 1831 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); 1832 1833 // mc contains the trampoline code for method entries, which are patched at run time, 1834 // so it needs to be read/write. 1835 write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1836 write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1837 write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1838 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps); 1839 } 1840 1841 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1842 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1843 } 1844 1845 // Update a Java object to point its Klass* to the new location after 1846 // shared archive has been compacted. 1847 void MetaspaceShared::relocate_klass_ptr(oop o) { 1848 assert(DumpSharedSpaces, "sanity"); 1849 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1850 o->set_klass(k); 1851 } 1852 1853 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { 1854 assert(DumpSharedSpaces, "sanity"); 1855 k = ArchiveCompactor::get_relocated_klass(k); 1856 if (is_final) { 1857 k = (Klass*)(address(k) + final_delta()); 1858 } 1859 return k; 1860 } 1861 1862 Symbol* MetaspaceShared::get_relocated_symbol(Symbol* orig_symbol) { 1863 return ArchiveCompactor::get_relocated_symbol(orig_symbol); 1864 } 1865 1866 class LinkSharedClassesClosure : public KlassClosure { 1867 Thread* THREAD; 1868 bool _made_progress; 1869 public: 1870 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1871 1872 void reset() { _made_progress = false; } 1873 bool made_progress() const { return _made_progress; } 1874 1875 void do_klass(Klass* k) { 1876 if (k->is_instance_klass()) { 1877 InstanceKlass* ik = InstanceKlass::cast(k); 1878 // For dynamic CDS dump, only link classes loaded by the builtin class loaders. 1879 bool do_linking = DumpSharedSpaces ? true : !ik->is_shared_unregistered_class(); 1880 if (do_linking) { 1881 // Link the class to cause the bytecodes to be rewritten and the 1882 // cpcache to be created. Class verification is done according 1883 // to -Xverify setting. 1884 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1885 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1886 1887 if (DumpSharedSpaces) { 1888 // The following function is used to resolve all Strings in the statically 1889 // dumped classes to archive all the Strings. The archive heap is not supported 1890 // for the dynamic archive. 1891 ik->constants()->resolve_class_constants(THREAD); 1892 } 1893 } 1894 } 1895 } 1896 }; 1897 1898 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1899 // We need to iterate because verification may cause additional classes 1900 // to be loaded. 1901 LinkSharedClassesClosure link_closure(THREAD); 1902 do { 1903 link_closure.reset(); 1904 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1905 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1906 } while (link_closure.made_progress()); 1907 } 1908 1909 void MetaspaceShared::prepare_for_dumping() { 1910 Arguments::check_unsupported_dumping_properties(); 1911 ClassLoader::initialize_shared_path(); 1912 } 1913 1914 // Preload classes from a list, populate the shared spaces and dump to a 1915 // file. 1916 void MetaspaceShared::preload_and_dump(TRAPS) { 1917 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1918 ResourceMark rm(THREAD); 1919 char class_list_path_str[JVM_MAXPATHLEN]; 1920 // Preload classes to be shared. 1921 const char* class_list_path; 1922 if (SharedClassListFile == NULL) { 1923 // Construct the path to the class list (in jre/lib) 1924 // Walk up two directories from the location of the VM and 1925 // optionally tack on "lib" (depending on platform) 1926 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1927 for (int i = 0; i < 3; i++) { 1928 char *end = strrchr(class_list_path_str, *os::file_separator()); 1929 if (end != NULL) *end = '\0'; 1930 } 1931 int class_list_path_len = (int)strlen(class_list_path_str); 1932 if (class_list_path_len >= 3) { 1933 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1934 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1935 jio_snprintf(class_list_path_str + class_list_path_len, 1936 sizeof(class_list_path_str) - class_list_path_len, 1937 "%slib", os::file_separator()); 1938 class_list_path_len += 4; 1939 } 1940 } 1941 } 1942 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1943 jio_snprintf(class_list_path_str + class_list_path_len, 1944 sizeof(class_list_path_str) - class_list_path_len, 1945 "%sclasslist", os::file_separator()); 1946 } 1947 class_list_path = class_list_path_str; 1948 } else { 1949 class_list_path = SharedClassListFile; 1950 } 1951 1952 log_info(cds)("Loading classes to share ..."); 1953 _has_error_classes = false; 1954 int class_count = preload_classes(class_list_path, THREAD); 1955 if (ExtraSharedClassListFile) { 1956 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1957 } 1958 log_info(cds)("Loading classes to share: done."); 1959 1960 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1961 1962 if (SharedArchiveConfigFile) { 1963 log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile); 1964 read_extra_data(SharedArchiveConfigFile, THREAD); 1965 } 1966 log_info(cds)("Reading extra data: done."); 1967 1968 HeapShared::init_subgraph_entry_fields(THREAD); 1969 1970 // Rewrite and link classes 1971 log_info(cds)("Rewriting and linking classes ..."); 1972 1973 // Link any classes which got missed. This would happen if we have loaded classes that 1974 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1975 // fails verification, all other interfaces that were not specified in the classlist but 1976 // are implemented by K are not verified. 1977 link_and_cleanup_shared_classes(CATCH); 1978 log_info(cds)("Rewriting and linking classes: done"); 1979 1980 #if INCLUDE_CDS_JAVA_HEAP 1981 if (use_full_module_graph()) { 1982 HeapShared::reset_archived_object_states(THREAD); 1983 } 1984 #endif 1985 1986 VM_PopulateDumpSharedSpace op; 1987 MutexLocker ml(THREAD, HeapShared::is_heap_object_archiving_allowed() ? 1988 Heap_lock : NULL); // needed by HeapShared::run_gc() 1989 VMThread::execute(&op); 1990 } 1991 } 1992 1993 1994 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1995 ClassListParser parser(class_list_path); 1996 int class_count = 0; 1997 1998 while (parser.parse_one_line()) { 1999 Klass* klass = parser.load_current_class(THREAD); 2000 if (HAS_PENDING_EXCEPTION) { 2001 if (klass == NULL && 2002 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 2003 // print a warning only when the pending exception is class not found 2004 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 2005 } 2006 CLEAR_PENDING_EXCEPTION; 2007 } 2008 if (klass != NULL) { 2009 if (log_is_enabled(Trace, cds)) { 2010 ResourceMark rm(THREAD); 2011 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 2012 } 2013 2014 if (klass->is_instance_klass()) { 2015 InstanceKlass* ik = InstanceKlass::cast(klass); 2016 2017 // Link the class to cause the bytecodes to be rewritten and the 2018 // cpcache to be created. The linking is done as soon as classes 2019 // are loaded in order that the related data structures (klass and 2020 // cpCache) are located together. 2021 try_link_class(ik, THREAD); 2022 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 2023 } 2024 2025 class_count++; 2026 } 2027 } 2028 2029 return class_count; 2030 } 2031 2032 // Returns true if the class's status has changed 2033 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 2034 Arguments::assert_is_dumping_archive(); 2035 if (ik->init_state() < InstanceKlass::linked && 2036 !SystemDictionaryShared::has_class_failed_verification(ik)) { 2037 bool saved = BytecodeVerificationLocal; 2038 if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) { 2039 // The verification decision is based on BytecodeVerificationRemote 2040 // for non-system classes. Since we are using the NULL classloader 2041 // to load non-system classes for customized class loaders during dumping, 2042 // we need to temporarily change BytecodeVerificationLocal to be the same as 2043 // BytecodeVerificationRemote. Note this can cause the parent system 2044 // classes also being verified. The extra overhead is acceptable during 2045 // dumping. 2046 BytecodeVerificationLocal = BytecodeVerificationRemote; 2047 } 2048 ik->link_class(THREAD); 2049 if (HAS_PENDING_EXCEPTION) { 2050 ResourceMark rm(THREAD); 2051 log_warning(cds)("Preload Warning: Verification failed for %s", 2052 ik->external_name()); 2053 CLEAR_PENDING_EXCEPTION; 2054 SystemDictionaryShared::set_class_has_failed_verification(ik); 2055 _has_error_classes = true; 2056 } 2057 BytecodeVerificationLocal = saved; 2058 return true; 2059 } else { 2060 return false; 2061 } 2062 } 2063 2064 #if INCLUDE_CDS_JAVA_HEAP 2065 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 2066 // The closed and open archive heap space has maximum two regions. 2067 // See FileMapInfo::write_archive_heap_regions() for details. 2068 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 2069 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 2070 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 2071 _open_archive_heap_regions); 2072 ArchiveCompactor::OtherROAllocMark mark; 2073 HeapShared::write_subgraph_info_table(); 2074 } 2075 2076 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 2077 if (HeapShared::is_heap_object_archiving_allowed()) { 2078 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2079 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 2080 2081 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2082 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 2083 } 2084 } 2085 2086 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 2087 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 2088 for (int i=0; i<regions->length(); i++) { 2089 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 2090 size_t size_in_bits = oopmap.size(); 2091 size_t size_in_bytes = oopmap.size_in_bytes(); 2092 uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal); 2093 oopmap.write_to(buffer, size_in_bytes); 2094 log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 2095 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 2096 p2i(buffer), size_in_bytes, 2097 p2i(regions->at(i).start()), regions->at(i).byte_size()); 2098 2099 ArchiveHeapOopmapInfo info; 2100 info._oopmap = (address)buffer; 2101 info._oopmap_size_in_bits = size_in_bits; 2102 info._oopmap_size_in_bytes = size_in_bytes; 2103 oopmaps->append(info); 2104 } 2105 } 2106 #endif // INCLUDE_CDS_JAVA_HEAP 2107 2108 void ReadClosure::do_ptr(void** p) { 2109 assert(*p == NULL, "initializing previous initialized pointer."); 2110 intptr_t obj = nextPtr(); 2111 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 2112 "hit tag while initializing ptrs."); 2113 *p = (void*)obj; 2114 } 2115 2116 void ReadClosure::do_u4(u4* p) { 2117 intptr_t obj = nextPtr(); 2118 *p = (u4)(uintx(obj)); 2119 } 2120 2121 void ReadClosure::do_bool(bool* p) { 2122 intptr_t obj = nextPtr(); 2123 *p = (bool)(uintx(obj)); 2124 } 2125 2126 void ReadClosure::do_tag(int tag) { 2127 int old_tag; 2128 old_tag = (int)(intptr_t)nextPtr(); 2129 // do_int(&old_tag); 2130 assert(tag == old_tag, "old tag doesn't match"); 2131 FileMapInfo::assert_mark(tag == old_tag); 2132 } 2133 2134 void ReadClosure::do_oop(oop *p) { 2135 narrowOop o = (narrowOop)nextPtr(); 2136 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 2137 p = NULL; 2138 } else { 2139 assert(HeapShared::is_heap_object_archiving_allowed(), 2140 "Archived heap object is not allowed"); 2141 assert(HeapShared::open_archive_heap_region_mapped(), 2142 "Open archive heap region is not mapped"); 2143 *p = HeapShared::decode_from_archive(o); 2144 } 2145 } 2146 2147 void ReadClosure::do_region(u_char* start, size_t size) { 2148 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 2149 assert(size % sizeof(intptr_t) == 0, "bad size"); 2150 do_tag((int)size); 2151 while (size > 0) { 2152 *(intptr_t*)start = nextPtr(); 2153 start += sizeof(intptr_t); 2154 size -= sizeof(intptr_t); 2155 } 2156 } 2157 2158 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { 2159 assert(base <= static_top && static_top <= top, "must be"); 2160 _shared_metaspace_static_top = static_top; 2161 MetaspaceObj::set_shared_metaspace_range(base, top); 2162 } 2163 2164 // Return true if given address is in the misc data region 2165 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2166 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2167 } 2168 2169 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2170 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2171 return true; 2172 } 2173 return false; 2174 } 2175 2176 bool MetaspaceShared::is_shared_dynamic(void* p) { 2177 if ((p < MetaspaceObj::shared_metaspace_top()) && 2178 (p >= _shared_metaspace_static_top)) { 2179 return true; 2180 } else { 2181 return false; 2182 } 2183 } 2184 2185 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 2186 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 2187 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2188 2189 FileMapInfo* static_mapinfo = open_static_archive(); 2190 FileMapInfo* dynamic_mapinfo = NULL; 2191 2192 if (static_mapinfo != NULL) { 2193 dynamic_mapinfo = open_dynamic_archive(); 2194 2195 // First try to map at the requested address 2196 result = map_archives(static_mapinfo, dynamic_mapinfo, true); 2197 if (result == MAP_ARCHIVE_MMAP_FAILURE) { 2198 // Mapping has failed (probably due to ASLR). Let's map at an address chosen 2199 // by the OS. 2200 log_info(cds)("Try to map archive(s) at an alternative address"); 2201 result = map_archives(static_mapinfo, dynamic_mapinfo, false); 2202 } 2203 } 2204 2205 if (result == MAP_ARCHIVE_SUCCESS) { 2206 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); 2207 char* cds_base = static_mapinfo->mapped_base(); 2208 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); 2209 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); 2210 _relocation_delta = static_mapinfo->relocation_delta(); 2211 if (dynamic_mapped) { 2212 FileMapInfo::set_shared_path_table(dynamic_mapinfo); 2213 } else { 2214 FileMapInfo::set_shared_path_table(static_mapinfo); 2215 } 2216 _requested_base_address = static_mapinfo->requested_base_address(); 2217 } else { 2218 set_shared_metaspace_range(NULL, NULL, NULL); 2219 UseSharedSpaces = false; 2220 FileMapInfo::fail_continue("Unable to map shared spaces"); 2221 if (PrintSharedArchiveAndExit) { 2222 vm_exit_during_initialization("Unable to use shared archive."); 2223 } 2224 } 2225 2226 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { 2227 delete static_mapinfo; 2228 } 2229 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { 2230 delete dynamic_mapinfo; 2231 } 2232 } 2233 2234 FileMapInfo* MetaspaceShared::open_static_archive() { 2235 FileMapInfo* mapinfo = new FileMapInfo(true); 2236 if (!mapinfo->initialize()) { 2237 delete(mapinfo); 2238 return NULL; 2239 } 2240 return mapinfo; 2241 } 2242 2243 FileMapInfo* MetaspaceShared::open_dynamic_archive() { 2244 if (DynamicDumpSharedSpaces) { 2245 return NULL; 2246 } 2247 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 2248 return NULL; 2249 } 2250 2251 FileMapInfo* mapinfo = new FileMapInfo(false); 2252 if (!mapinfo->initialize()) { 2253 delete(mapinfo); 2254 return NULL; 2255 } 2256 return mapinfo; 2257 } 2258 2259 // use_requested_addr: 2260 // true = map at FileMapHeader::_requested_base_address 2261 // false = map at an alternative address picked by OS. 2262 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, 2263 bool use_requested_addr) { 2264 if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) { 2265 log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address."); 2266 return MAP_ARCHIVE_MMAP_FAILURE; 2267 } 2268 2269 PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2270 // For product build only -- this is for benchmarking the cost of doing relocation. 2271 // For debug builds, the check is done below, after reserving the space, for better test coverage 2272 // (see comment below). 2273 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2274 return MAP_ARCHIVE_MMAP_FAILURE; 2275 }); 2276 2277 if (ArchiveRelocationMode == 2 && !use_requested_addr) { 2278 log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); 2279 return MAP_ARCHIVE_MMAP_FAILURE; 2280 }; 2281 2282 if (dynamic_mapinfo != NULL) { 2283 // Ensure that the OS won't be able to allocate new memory spaces between the two 2284 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). 2285 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); 2286 } 2287 2288 ReservedSpace archive_space_rs, class_space_rs; 2289 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2290 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, 2291 use_requested_addr, archive_space_rs, 2292 class_space_rs); 2293 if (mapped_base_address == NULL) { 2294 result = MAP_ARCHIVE_MMAP_FAILURE; 2295 log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr); 2296 } else { 2297 2298 #ifdef ASSERT 2299 // Some sanity checks after reserving address spaces for archives 2300 // and class space. 2301 assert(archive_space_rs.is_reserved(), "Sanity"); 2302 if (Metaspace::using_class_space()) { 2303 // Class space must closely follow the archive space. Both spaces 2304 // must be aligned correctly. 2305 assert(class_space_rs.is_reserved(), 2306 "A class space should have been reserved"); 2307 assert(class_space_rs.base() >= archive_space_rs.end(), 2308 "class space should follow the cds archive space"); 2309 assert(is_aligned(archive_space_rs.base(), 2310 MetaspaceShared::reserved_space_alignment()), 2311 "Archive space misaligned"); 2312 assert(is_aligned(class_space_rs.base(), 2313 Metaspace::reserve_alignment()), 2314 "class space misaligned"); 2315 } 2316 #endif // ASSERT 2317 2318 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2319 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); 2320 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2321 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); 2322 2323 if (MetaspaceShared::use_windows_memory_mapping()) { 2324 // We have now reserved address space for the archives, and will map in 2325 // the archive files into this space. 2326 // 2327 // Special handling for Windows: on Windows we cannot map a file view 2328 // into an existing memory mapping. So, we unmap the address range we 2329 // just reserved again, which will make it available for mapping the 2330 // archives. 2331 // Reserving this range has not been for naught however since it makes 2332 // us reasonably sure the address range is available. 2333 // 2334 // But still it may fail, since between unmapping the range and mapping 2335 // in the archive someone else may grab the address space. Therefore 2336 // there is a fallback in FileMap::map_region() where we just read in 2337 // the archive files sequentially instead of mapping it in. We couple 2338 // this with use_requested_addr, since we're going to patch all the 2339 // pointers anyway so there's no benefit to mmap. 2340 if (use_requested_addr) { 2341 log_info(cds)("Windows mmap workaround: releasing archive space."); 2342 archive_space_rs.release(); 2343 } 2344 } 2345 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); 2346 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? 2347 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; 2348 2349 DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2350 // This is for simulating mmap failures at the requested address. In 2351 // debug builds, we do it here (after all archives have possibly been 2352 // mapped), so we can thoroughly test the code for failure handling 2353 // (releasing all allocated resource, etc). 2354 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2355 if (static_result == MAP_ARCHIVE_SUCCESS) { 2356 static_result = MAP_ARCHIVE_MMAP_FAILURE; 2357 } 2358 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2359 dynamic_result = MAP_ARCHIVE_MMAP_FAILURE; 2360 } 2361 }); 2362 2363 if (static_result == MAP_ARCHIVE_SUCCESS) { 2364 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2365 result = MAP_ARCHIVE_SUCCESS; 2366 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { 2367 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); 2368 // No need to retry mapping the dynamic archive again, as it will never succeed 2369 // (bad file, etc) -- just keep the base archive. 2370 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", 2371 dynamic_mapinfo->full_path()); 2372 result = MAP_ARCHIVE_SUCCESS; 2373 // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no 2374 // easy API to do that right now. 2375 } else { 2376 result = MAP_ARCHIVE_MMAP_FAILURE; 2377 } 2378 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { 2379 result = MAP_ARCHIVE_OTHER_FAILURE; 2380 } else { 2381 result = MAP_ARCHIVE_MMAP_FAILURE; 2382 } 2383 } 2384 2385 if (result == MAP_ARCHIVE_SUCCESS) { 2386 SharedBaseAddress = (size_t)mapped_base_address; 2387 LP64_ONLY({ 2388 if (Metaspace::using_class_space()) { 2389 // Set up ccs in metaspace. 2390 Metaspace::initialize_class_space(class_space_rs); 2391 2392 // Set up compressed Klass pointer encoding: the encoding range must 2393 // cover both archive and class space. 2394 address cds_base = (address)static_mapinfo->mapped_base(); 2395 address ccs_end = (address)class_space_rs.end(); 2396 CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base); 2397 2398 // map_heap_regions() compares the current narrow oop and klass encodings 2399 // with the archived ones, so it must be done after all encodings are determined. 2400 static_mapinfo->map_heap_regions(); 2401 } 2402 }); 2403 log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled"); 2404 log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled"); 2405 } else { 2406 unmap_archive(static_mapinfo); 2407 unmap_archive(dynamic_mapinfo); 2408 release_reserved_spaces(archive_space_rs, class_space_rs); 2409 } 2410 2411 return result; 2412 } 2413 2414 2415 // This will reserve two address spaces suitable to house Klass structures, one 2416 // for the cds archives (static archive and optionally dynamic archive) and 2417 // optionally one move for ccs. 2418 // 2419 // Since both spaces must fall within the compressed class pointer encoding 2420 // range, they are allocated close to each other. 2421 // 2422 // Space for archives will be reserved first, followed by a potential gap, 2423 // followed by the space for ccs: 2424 // 2425 // +-- Base address A B End 2426 // | | | | 2427 // v v v v 2428 // +-------------+--------------+ +----------------------+ 2429 // | static arc | [dyn. arch] | [gap] | compr. class space | 2430 // +-------------+--------------+ +----------------------+ 2431 // 2432 // (The gap may result from different alignment requirements between metaspace 2433 // and CDS) 2434 // 2435 // If UseCompressedClassPointers is disabled, only one address space will be 2436 // reserved: 2437 // 2438 // +-- Base address End 2439 // | | 2440 // v v 2441 // +-------------+--------------+ 2442 // | static arc | [dyn. arch] | 2443 // +-------------+--------------+ 2444 // 2445 // Base address: If use_archive_base_addr address is true, the Base address is 2446 // determined by the address stored in the static archive. If 2447 // use_archive_base_addr address is false, this base address is determined 2448 // by the platform. 2449 // 2450 // If UseCompressedClassPointers=1, the range encompassing both spaces will be 2451 // suitable to en/decode narrow Klass pointers: the base will be valid for 2452 // encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax. 2453 // 2454 // Return: 2455 // 2456 // - On success: 2457 // - archive_space_rs will be reserved and large enough to host static and 2458 // if needed dynamic archive: [Base, A). 2459 // archive_space_rs.base and size will be aligned to CDS reserve 2460 // granularity. 2461 // - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will 2462 // be reserved. Its start address will be aligned to metaspace reserve 2463 // alignment, which may differ from CDS alignment. It will follow the cds 2464 // archive space, close enough such that narrow class pointer encoding 2465 // covers both spaces. 2466 // If UseCompressedClassPointers=0, class_space_rs remains unreserved. 2467 // - On error: NULL is returned and the spaces remain unreserved. 2468 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, 2469 FileMapInfo* dynamic_mapinfo, 2470 bool use_archive_base_addr, 2471 ReservedSpace& archive_space_rs, 2472 ReservedSpace& class_space_rs) { 2473 2474 address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL); 2475 const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment(); 2476 2477 // Size and requested location of the archive_space_rs (for both static and dynamic archives) 2478 assert(static_mapinfo->mapping_base_offset() == 0, "Must be"); 2479 size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); 2480 size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment); 2481 2482 // If a base address is given, it must have valid alignment and be suitable as encoding base. 2483 if (base_address != NULL) { 2484 assert(is_aligned(base_address, archive_space_alignment), 2485 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2486 if (Metaspace::using_class_space()) { 2487 assert(CompressedKlassPointers::is_valid_base(base_address), 2488 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2489 } 2490 } 2491 2492 if (!Metaspace::using_class_space()) { 2493 // Get the simple case out of the way first: 2494 // no compressed class space, simple allocation. 2495 archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment, 2496 false /* bool large */, (char*)base_address); 2497 if (archive_space_rs.is_reserved()) { 2498 assert(base_address == NULL || 2499 (address)archive_space_rs.base() == base_address, "Sanity"); 2500 // Register archive space with NMT. 2501 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); 2502 return archive_space_rs.base(); 2503 } 2504 return NULL; 2505 } 2506 2507 #ifdef _LP64 2508 2509 // Complex case: two spaces adjacent to each other, both to be addressable 2510 // with narrow class pointers. 2511 // We reserve the whole range spanning both spaces, then split that range up. 2512 2513 const size_t class_space_alignment = Metaspace::reserve_alignment(); 2514 2515 // To simplify matters, lets assume that metaspace alignment will always be 2516 // equal or a multiple of archive alignment. 2517 assert(is_power_of_2(class_space_alignment) && 2518 is_power_of_2(archive_space_alignment) && 2519 class_space_alignment >= archive_space_alignment, 2520 "Sanity"); 2521 2522 const size_t class_space_size = CompressedClassSpaceSize; 2523 assert(CompressedClassSpaceSize > 0 && 2524 is_aligned(CompressedClassSpaceSize, class_space_alignment), 2525 "CompressedClassSpaceSize malformed: " 2526 SIZE_FORMAT, CompressedClassSpaceSize); 2527 2528 const size_t ccs_begin_offset = align_up(base_address + archive_space_size, 2529 class_space_alignment) - base_address; 2530 const size_t gap_size = ccs_begin_offset - archive_space_size; 2531 2532 const size_t total_range_size = 2533 align_up(archive_space_size + gap_size + class_space_size, 2534 os::vm_allocation_granularity()); 2535 2536 ReservedSpace total_rs; 2537 if (base_address != NULL) { 2538 // Reserve at the given archive base address, or not at all. 2539 total_rs = ReservedSpace(total_range_size, archive_space_alignment, 2540 false /* bool large */, (char*) base_address); 2541 } else { 2542 // Reserve at any address, but leave it up to the platform to choose a good one. 2543 total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size); 2544 } 2545 2546 if (!total_rs.is_reserved()) { 2547 return NULL; 2548 } 2549 2550 // Paranoid checks: 2551 assert(base_address == NULL || (address)total_rs.base() == base_address, 2552 "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base())); 2553 assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity"); 2554 assert(total_rs.size() == total_range_size, "Sanity"); 2555 assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity"); 2556 2557 // Now split up the space into ccs and cds archive. For simplicity, just leave 2558 // the gap reserved at the end of the archive space. 2559 archive_space_rs = total_rs.first_part(ccs_begin_offset, 2560 (size_t)os::vm_allocation_granularity(), 2561 /*split=*/true); 2562 class_space_rs = total_rs.last_part(ccs_begin_offset); 2563 2564 assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity"); 2565 assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity"); 2566 assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity"); 2567 assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity"); 2568 2569 // NMT: fix up the space tags 2570 MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); 2571 MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass); 2572 2573 return archive_space_rs.base(); 2574 2575 #else 2576 ShouldNotReachHere(); 2577 return NULL; 2578 #endif 2579 2580 } 2581 2582 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs, 2583 ReservedSpace& class_space_rs) { 2584 if (archive_space_rs.is_reserved()) { 2585 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); 2586 archive_space_rs.release(); 2587 } 2588 if (class_space_rs.is_reserved()) { 2589 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); 2590 class_space_rs.release(); 2591 } 2592 } 2593 2594 static int archive_regions[] = {MetaspaceShared::mc, 2595 MetaspaceShared::rw, 2596 MetaspaceShared::ro}; 2597 static int archive_regions_count = 3; 2598 2599 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { 2600 assert(UseSharedSpaces, "must be runtime"); 2601 if (mapinfo == NULL) { 2602 return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded. 2603 } 2604 2605 mapinfo->set_is_mapped(false); 2606 2607 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { 2608 log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT 2609 " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); 2610 return MAP_ARCHIVE_OTHER_FAILURE; 2611 } 2612 2613 MapArchiveResult result = 2614 mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs); 2615 2616 if (result != MAP_ARCHIVE_SUCCESS) { 2617 unmap_archive(mapinfo); 2618 return result; 2619 } 2620 2621 if (!mapinfo->validate_shared_path_table()) { 2622 unmap_archive(mapinfo); 2623 return MAP_ARCHIVE_OTHER_FAILURE; 2624 } 2625 2626 mapinfo->set_is_mapped(true); 2627 return MAP_ARCHIVE_SUCCESS; 2628 } 2629 2630 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { 2631 assert(UseSharedSpaces, "must be runtime"); 2632 if (mapinfo != NULL) { 2633 mapinfo->unmap_regions(archive_regions, archive_regions_count); 2634 mapinfo->set_is_mapped(false); 2635 } 2636 } 2637 2638 // Read the miscellaneous data from the shared file, and 2639 // serialize it out to its various destinations. 2640 2641 void MetaspaceShared::initialize_shared_spaces() { 2642 FileMapInfo *static_mapinfo = FileMapInfo::current_info(); 2643 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); 2644 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); 2645 char* buffer = static_mapinfo->cloned_vtables(); 2646 clone_cpp_vtables((intptr_t*)buffer); 2647 2648 // Verify various attributes of the archive, plus initialize the 2649 // shared string/symbol tables 2650 buffer = static_mapinfo->serialized_data(); 2651 intptr_t* array = (intptr_t*)buffer; 2652 ReadClosure rc(&array); 2653 serialize(&rc); 2654 2655 // Initialize the run-time symbol table. 2656 SymbolTable::create_table(); 2657 2658 static_mapinfo->patch_archived_heap_embedded_pointers(); 2659 2660 // Close the mapinfo file 2661 static_mapinfo->close(); 2662 2663 static_mapinfo->unmap_region(MetaspaceShared::bm); 2664 2665 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); 2666 if (dynamic_mapinfo != NULL) { 2667 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data(); 2668 ReadClosure rc(&buffer); 2669 SymbolTable::serialize_shared_table_header(&rc, false); 2670 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); 2671 dynamic_mapinfo->close(); 2672 } 2673 2674 if (PrintSharedArchiveAndExit) { 2675 if (PrintSharedDictionary) { 2676 tty->print_cr("\nShared classes:\n"); 2677 SystemDictionaryShared::print_on(tty); 2678 } 2679 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { 2680 tty->print_cr("archive is invalid"); 2681 vm_exit(1); 2682 } else { 2683 tty->print_cr("archive is valid"); 2684 vm_exit(0); 2685 } 2686 } 2687 } 2688 2689 // JVM/TI RedefineClasses() support: 2690 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2691 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2692 2693 if (UseSharedSpaces) { 2694 // remap the shared readonly space to shared readwrite, private 2695 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2696 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2697 return false; 2698 } 2699 if (FileMapInfo::dynamic_info() != NULL) { 2700 mapinfo = FileMapInfo::dynamic_info(); 2701 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2702 return false; 2703 } 2704 } 2705 _remapped_readwrite = true; 2706 } 2707 return true; 2708 } 2709 2710 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2711 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2712 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2713 // or so. 2714 _mc_region.print_out_of_space_msg(name, needed_bytes); 2715 _rw_region.print_out_of_space_msg(name, needed_bytes); 2716 _ro_region.print_out_of_space_msg(name, needed_bytes); 2717 2718 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2719 "Please reduce the number of shared classes."); 2720 } 2721 2722 // This is used to relocate the pointers so that the base archive can be mapped at 2723 // MetaspaceShared::requested_base_address() without runtime relocation. 2724 intx MetaspaceShared::final_delta() { 2725 return intx(MetaspaceShared::requested_base_address()) // We want the base archive to be mapped to here at runtime 2726 - intx(SharedBaseAddress); // .. but the base archive is mapped at here at dump time 2727 } 2728 2729 bool MetaspaceShared::use_full_module_graph() { 2730 return _use_optimized_module_handling && _use_full_module_graph && 2731 (UseSharedSpaces || DumpSharedSpaces) && HeapShared::is_heap_object_archiving_allowed(); 2732 } 2733 2734 void MetaspaceShared::print_on(outputStream* st) { 2735 if (UseSharedSpaces || DumpSharedSpaces) { 2736 st->print("CDS archive(s) mapped at: "); 2737 address base; 2738 address top; 2739 if (UseSharedSpaces) { // Runtime 2740 base = (address)MetaspaceObj::shared_metaspace_base(); 2741 address static_top = (address)_shared_metaspace_static_top; 2742 top = (address)MetaspaceObj::shared_metaspace_top(); 2743 st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top)); 2744 } else if (DumpSharedSpaces) { // Dump Time 2745 base = (address)_shared_rs.base(); 2746 top = (address)_shared_rs.end(); 2747 st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top)); 2748 } 2749 st->print("size " SIZE_FORMAT ", ", top - base); 2750 st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode); 2751 } else { 2752 st->print("CDS disabled."); 2753 } 2754 st->cr(); 2755 }