1 /* 2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "gc/shared/softRefPolicy.hpp" 40 #include "interpreter/bytecodeStream.hpp" 41 #include "interpreter/bytecodes.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logMessage.hpp" 44 #include "memory/archiveUtils.inline.hpp" 45 #include "memory/dynamicArchive.hpp" 46 #include "memory/filemap.hpp" 47 #include "memory/heapShared.inline.hpp" 48 #include "memory/metaspace.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/metaspaceShared.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "oops/compressedOops.inline.hpp" 54 #include "oops/instanceClassLoaderKlass.hpp" 55 #include "oops/instanceMirrorKlass.hpp" 56 #include "oops/instanceRefKlass.hpp" 57 #include "oops/methodData.hpp" 58 #include "oops/objArrayKlass.hpp" 59 #include "oops/objArrayOop.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "oops/typeArrayKlass.hpp" 62 #include "prims/jvmtiRedefineClasses.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/os.hpp" 65 #include "runtime/safepointVerifiers.hpp" 66 #include "runtime/signature.hpp" 67 #include "runtime/timerTrace.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/bitMap.inline.hpp" 72 #include "utilities/ostream.hpp" 73 #include "utilities/defaultStream.hpp" 74 #include "utilities/hashtable.inline.hpp" 75 #if INCLUDE_G1GC 76 #include "gc/g1/g1CollectedHeap.hpp" 77 #endif 78 79 ReservedSpace MetaspaceShared::_shared_rs; 80 VirtualSpace MetaspaceShared::_shared_vs; 81 ReservedSpace MetaspaceShared::_symbol_rs; 82 VirtualSpace MetaspaceShared::_symbol_vs; 83 MetaspaceSharedStats MetaspaceShared::_stats; 84 bool MetaspaceShared::_has_error_classes; 85 bool MetaspaceShared::_archive_loading_failed = false; 86 bool MetaspaceShared::_remapped_readwrite = false; 87 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 88 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 89 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 90 intx MetaspaceShared::_relocation_delta; 91 char* MetaspaceShared::_requested_base_address; 92 93 // The CDS archive is divided into the following regions: 94 // mc - misc code (the method entry trampolines, c++ vtables) 95 // rw - read-write metadata 96 // ro - read-only metadata and read-only tables 97 // 98 // ca0 - closed archive heap space #0 99 // ca1 - closed archive heap space #1 (may be empty) 100 // oa0 - open archive heap space #0 101 // oa1 - open archive heap space #1 (may be empty) 102 // 103 // The mc, rw, and ro regions are linearly allocated, starting from 104 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions 105 // are page-aligned, and there's no gap between any consecutive regions. 106 // 107 // These 3 regions are populated in the following steps: 108 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 109 // temporarily allocated outside of the shared regions. Only the method entry 110 // trampolines are written into the mc region. 111 // [2] C++ vtables are copied into the mc region. 112 // [3] ArchiveCompactor copies RW metadata into the rw region. 113 // [4] ArchiveCompactor copies RO metadata into the ro region. 114 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 115 // are copied into the ro region as read-only tables. 116 // 117 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 118 // Their layout is independent of the other 4 regions. 119 120 char* DumpRegion::expand_top_to(char* newtop) { 121 assert(is_allocatable(), "must be initialized and not packed"); 122 assert(newtop >= _top, "must not grow backwards"); 123 if (newtop > _end) { 124 MetaspaceShared::report_out_of_space(_name, newtop - _top); 125 ShouldNotReachHere(); 126 } 127 128 if (_rs == MetaspaceShared::shared_rs()) { 129 uintx delta; 130 if (DynamicDumpSharedSpaces) { 131 delta = DynamicArchive::object_delta_uintx(newtop); 132 } else { 133 delta = MetaspaceShared::object_delta_uintx(newtop); 134 } 135 if (delta > MAX_SHARED_DELTA) { 136 // This is just a sanity check and should not appear in any real world usage. This 137 // happens only if you allocate more than 2GB of shared objects and would require 138 // millions of shared classes. 139 vm_exit_during_initialization("Out of memory in the CDS archive", 140 "Please reduce the number of shared classes."); 141 } 142 } 143 144 MetaspaceShared::commit_to(_rs, _vs, newtop); 145 _top = newtop; 146 return _top; 147 } 148 149 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 150 char* p = (char*)align_up(_top, alignment); 151 char* newtop = p + align_up(num_bytes, alignment); 152 expand_top_to(newtop); 153 memset(p, 0, newtop - p); 154 return p; 155 } 156 157 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 158 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 159 intptr_t *p = (intptr_t*)_top; 160 char* newtop = _top + sizeof(intptr_t); 161 expand_top_to(newtop); 162 *p = n; 163 if (need_to_mark) { 164 ArchivePtrMarker::mark_pointer(p); 165 } 166 } 167 168 void DumpRegion::print(size_t total_bytes) const { 169 log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 170 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 171 p2i(_base + MetaspaceShared::final_delta())); 172 } 173 174 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 175 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 176 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 177 if (strcmp(_name, failing_region) == 0) { 178 log_error(cds)(" required = %d", int(needed_bytes)); 179 } 180 } 181 182 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) { 183 _rs = rs; 184 _vs = vs; 185 // Start with 0 committed bytes. The memory will be committed as needed by 186 // MetaspaceShared::commit_to(). 187 if (!_vs->initialize(*_rs, 0)) { 188 fatal("Unable to allocate memory for shared space"); 189 } 190 _base = _top = _rs->base(); 191 _end = _rs->end(); 192 } 193 194 void DumpRegion::pack(DumpRegion* next) { 195 assert(!is_packed(), "sanity"); 196 _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment()); 197 _is_packed = true; 198 if (next != NULL) { 199 next->_rs = _rs; 200 next->_vs = _vs; 201 next->_base = next->_top = this->_end; 202 next->_end = _rs->end(); 203 } 204 } 205 206 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols"); 207 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 208 209 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) { 210 first_space->init(&_shared_rs, &_shared_vs); 211 } 212 213 DumpRegion* MetaspaceShared::misc_code_dump_space() { 214 return &_mc_region; 215 } 216 217 DumpRegion* MetaspaceShared::read_write_dump_space() { 218 return &_rw_region; 219 } 220 221 DumpRegion* MetaspaceShared::read_only_dump_space() { 222 return &_ro_region; 223 } 224 225 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 226 ReservedSpace* rs) { 227 current->pack(next); 228 } 229 230 char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) { 231 return _symbol_region.allocate(num_bytes); 232 } 233 234 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 235 return _mc_region.allocate(num_bytes); 236 } 237 238 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 239 return _ro_region.allocate(num_bytes); 240 } 241 242 size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); } 243 244 static bool shared_base_valid(char* shared_base) { 245 #ifdef _LP64 246 return CompressedKlassPointers::is_valid_base((address)shared_base); 247 #else 248 return true; 249 #endif 250 } 251 252 static bool shared_base_too_high(char* shared_base, size_t cds_total) { 253 if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) { 254 // SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so 255 // align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around. 256 return true; 257 } 258 if (max_uintx - uintx(shared_base) < uintx(cds_total)) { 259 // The end of the archive will wrap around 260 return true; 261 } 262 263 return false; 264 } 265 266 static char* compute_shared_base(size_t cds_total) { 267 char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 268 const char* err = NULL; 269 if (shared_base_too_high(shared_base, cds_total)) { 270 err = "too high"; 271 } else if (!shared_base_valid(shared_base)) { 272 err = "invalid for this platform"; 273 } 274 if (err) { 275 log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT, 276 p2i((void*)SharedBaseAddress), err, 277 p2i((void*)Arguments::default_SharedBaseAddress())); 278 SharedBaseAddress = Arguments::default_SharedBaseAddress(); 279 shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment()); 280 } 281 assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity"); 282 return shared_base; 283 } 284 285 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 286 assert(DumpSharedSpaces, "should be called for dump time only"); 287 288 const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment(); 289 290 #ifdef _LP64 291 // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1, 292 // will use that to house both the archives and the ccs. See below for 293 // details. 294 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 295 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 296 #else 297 // We don't support archives larger than 256MB on 32-bit due to limited 298 // virtual address space. 299 size_t cds_total = align_down(256*M, reserve_alignment); 300 #endif 301 302 char* shared_base = compute_shared_base(cds_total); 303 _requested_base_address = shared_base; 304 305 // Whether to use SharedBaseAddress as attach address. 306 bool use_requested_base = true; 307 308 if (shared_base == NULL) { 309 use_requested_base = false; 310 } 311 312 if (ArchiveRelocationMode == 1) { 313 log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); 314 use_requested_base = false; 315 } 316 317 // First try to reserve the space at the specified SharedBaseAddress. 318 assert(!_shared_rs.is_reserved(), "must be"); 319 if (use_requested_base) { 320 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 321 false /* large */, (char*)shared_base); 322 if (_shared_rs.is_reserved()) { 323 assert(_shared_rs.base() == shared_base, "should match"); 324 } else { 325 log_info(cds)("dumptime space reservation: failed to map at " 326 "SharedBaseAddress " PTR_FORMAT, p2i(shared_base)); 327 } 328 } 329 if (!_shared_rs.is_reserved()) { 330 // Get a reserved space anywhere if attaching at the SharedBaseAddress 331 // fails: 332 if (UseCompressedClassPointers) { 333 // If we need to reserve class space as well, let the platform handle 334 // the reservation. 335 LP64_ONLY(_shared_rs = 336 Metaspace::reserve_address_space_for_compressed_classes(cds_total);) 337 NOT_LP64(ShouldNotReachHere();) 338 } else { 339 // anywhere is fine. 340 _shared_rs = ReservedSpace(cds_total, reserve_alignment, 341 false /* large */, (char*)NULL); 342 } 343 } 344 345 if (!_shared_rs.is_reserved()) { 346 vm_exit_during_initialization("Unable to reserve memory for shared space", 347 err_msg(SIZE_FORMAT " bytes.", cds_total)); 348 } 349 350 #ifdef _LP64 351 352 if (UseCompressedClassPointers) { 353 354 assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity"); 355 356 // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space 357 // must be allocated near the cds such as that the compressed Klass pointer 358 // encoding can be used to en/decode pointers from both cds and ccs. Since 359 // Metaspace cannot do this (it knows nothing about cds), we do it for 360 // Metaspace here and pass it the space to use for ccs. 361 // 362 // We do this by reserving space for the ccs behind the archives. Note 363 // however that ccs follows a different alignment 364 // (Metaspace::reserve_alignment), so there may be a gap between ccs and 365 // cds. 366 // We use a similar layout at runtime, see reserve_address_space_for_archives(). 367 // 368 // +-- SharedBaseAddress (default = 0x800000000) 369 // v 370 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 371 // | Heap | Archive | | MC | RW | RO | [gap] | class space | 372 // +-..---------+---------+ ... +----+----+----+--------+-----------------+ 373 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 374 // 375 // Note: ccs must follow the archives, and the archives must start at the 376 // encoding base. However, the exact placement of ccs does not matter as 377 // long as it it resides in the encoding range of CompressedKlassPointers 378 // and comes after the archive. 379 // 380 // We do this by splitting up the allocated 4G into 3G of archive space, 381 // followed by 1G for the ccs: 382 // + The upper 1 GB is used as the "temporary compressed class space" 383 // -- preload_classes() will store Klasses into this space. 384 // + The lower 3 GB is used for the archive -- when preload_classes() 385 // is done, ArchiveCompactor will copy the class metadata into this 386 // space, first the RW parts, then the RO parts. 387 388 // Starting address of ccs must be aligned to Metaspace::reserve_alignment()... 389 size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment()); 390 address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment()); 391 size_t archive_size = class_space_start - (address)_shared_rs.base(); 392 393 ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size); 394 _shared_rs = _shared_rs.first_part(archive_size); 395 396 // ... as does the size of ccs. 397 tmp_class_space = tmp_class_space.first_part(class_space_size); 398 CompressedClassSpaceSize = class_space_size; 399 400 // Let Metaspace initialize ccs 401 Metaspace::initialize_class_space(tmp_class_space); 402 403 // and set up CompressedKlassPointers encoding. 404 CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total); 405 406 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 407 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 408 409 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 410 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 411 412 assert(_shared_rs.end() == tmp_class_space.base() && 413 is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) && 414 is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) && 415 is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity"); 416 } 417 418 #endif 419 420 init_shared_dump_space(&_mc_region); 421 SharedBaseAddress = (size_t)_shared_rs.base(); 422 log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 423 _shared_rs.size(), p2i(_shared_rs.base())); 424 425 // We don't want any valid object to be at the very bottom of the archive. 426 // See ArchivePtrMarker::mark_pointer(). 427 MetaspaceShared::misc_code_space_alloc(16); 428 429 size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M); 430 _symbol_rs = ReservedSpace(symbol_rs_size); 431 if (!_symbol_rs.is_reserved()) { 432 vm_exit_during_initialization("Unable to reserve memory for symbols", 433 err_msg(SIZE_FORMAT " bytes.", symbol_rs_size)); 434 } 435 _symbol_region.init(&_symbol_rs, &_symbol_vs); 436 } 437 438 // Called by universe_post_init() 439 void MetaspaceShared::post_initialize(TRAPS) { 440 if (UseSharedSpaces) { 441 int size = FileMapInfo::get_number_of_shared_paths(); 442 if (size > 0) { 443 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 444 if (!DynamicDumpSharedSpaces) { 445 FileMapInfo* info; 446 if (FileMapInfo::dynamic_info() == NULL) { 447 info = FileMapInfo::current_info(); 448 } else { 449 info = FileMapInfo::dynamic_info(); 450 } 451 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 452 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 453 } 454 } 455 } 456 } 457 458 static GrowableArray<Handle>* _extra_interned_strings = NULL; 459 460 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 461 _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); 462 463 HashtableTextDump reader(filename); 464 reader.check_version("VERSION: 1.0"); 465 466 while (reader.remain() > 0) { 467 int utf8_length; 468 int prefix_type = reader.scan_prefix(&utf8_length); 469 ResourceMark rm(THREAD); 470 if (utf8_length == 0x7fffffff) { 471 // buf_len will overflown 32-bit value. 472 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 473 } 474 int buf_len = utf8_length+1; 475 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 476 reader.get_utf8(utf8_buffer, utf8_length); 477 utf8_buffer[utf8_length] = '\0'; 478 479 if (prefix_type == HashtableTextDump::SymbolPrefix) { 480 SymbolTable::new_permanent_symbol(utf8_buffer); 481 } else{ 482 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 483 oop s = StringTable::intern(utf8_buffer, THREAD); 484 485 if (HAS_PENDING_EXCEPTION) { 486 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 487 reader.last_line_no(), utf8_length); 488 CLEAR_PENDING_EXCEPTION; 489 } else { 490 #if INCLUDE_G1GC 491 if (UseG1GC) { 492 typeArrayOop body = java_lang_String::value(s); 493 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 494 if (hr->is_humongous()) { 495 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 496 // to maximize free heap space and minimize fragmentation. 497 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 498 reader.last_line_no(), utf8_length); 499 continue; 500 } 501 } 502 #endif 503 // Interned strings are GC'ed if there are no references to it, so let's 504 // add a reference to keep this string alive. 505 assert(s != NULL, "must succeed"); 506 Handle h(THREAD, s); 507 _extra_interned_strings->append(h); 508 } 509 } 510 } 511 } 512 513 void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) { 514 Arguments::assert_is_dumping_archive(); 515 char* base = rs->base(); 516 size_t need_committed_size = newtop - base; 517 size_t has_committed_size = vs->committed_size(); 518 if (need_committed_size < has_committed_size) { 519 return; 520 } 521 522 size_t min_bytes = need_committed_size - has_committed_size; 523 size_t preferred_bytes = 1 * M; 524 size_t uncommitted = vs->reserved_size() - has_committed_size; 525 526 size_t commit =MAX2(min_bytes, preferred_bytes); 527 commit = MIN2(commit, uncommitted); 528 assert(commit <= uncommitted, "sanity"); 529 530 bool result = vs->expand_by(commit, false); 531 if (rs == &_shared_rs) { 532 ArchivePtrMarker::expand_ptr_end((address*)vs->high()); 533 } 534 535 if (!result) { 536 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 537 need_committed_size)); 538 } 539 540 assert(rs == &_shared_rs || rs == &_symbol_rs, "must be"); 541 const char* which = (rs == &_shared_rs) ? "shared" : "symbol"; 542 log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 543 which, commit, vs->actual_committed_size(), vs->high()); 544 } 545 546 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { 547 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); 548 } 549 550 // Read/write a data stream for restoring/preserving metadata pointers and 551 // miscellaneous data from/to the shared archive file. 552 553 void MetaspaceShared::serialize(SerializeClosure* soc) { 554 int tag = 0; 555 soc->do_tag(--tag); 556 557 // Verify the sizes of various metadata in the system. 558 soc->do_tag(sizeof(Method)); 559 soc->do_tag(sizeof(ConstMethod)); 560 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 561 soc->do_tag(sizeof(ConstantPool)); 562 soc->do_tag(sizeof(ConstantPoolCache)); 563 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 564 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 565 soc->do_tag(sizeof(Symbol)); 566 567 // Dump/restore miscellaneous metadata. 568 JavaClasses::serialize_offsets(soc); 569 Universe::serialize(soc); 570 soc->do_tag(--tag); 571 572 // Dump/restore references to commonly used names and signatures. 573 vmSymbols::serialize(soc); 574 soc->do_tag(--tag); 575 576 // Dump/restore the symbol/string/subgraph_info tables 577 SymbolTable::serialize_shared_table_header(soc); 578 StringTable::serialize_shared_table_header(soc); 579 HeapShared::serialize_subgraph_info_table_header(soc); 580 SystemDictionaryShared::serialize_dictionary_headers(soc); 581 582 InstanceMirrorKlass::serialize_offsets(soc); 583 584 // Dump/restore well known classes (pointers) 585 SystemDictionaryShared::serialize_well_known_klasses(soc); 586 soc->do_tag(--tag); 587 588 serialize_cloned_cpp_vtptrs(soc); 589 soc->do_tag(--tag); 590 591 soc->do_tag(666); 592 } 593 594 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 595 if (DumpSharedSpaces) { 596 if (_i2i_entry_code_buffers == NULL) { 597 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 598 _i2i_entry_code_buffers_size = total_size; 599 } 600 } else if (UseSharedSpaces) { 601 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 602 } else { 603 return NULL; 604 } 605 606 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 607 return _i2i_entry_code_buffers; 608 } 609 610 uintx MetaspaceShared::object_delta_uintx(void* obj) { 611 Arguments::assert_is_dumping_archive(); 612 if (DumpSharedSpaces) { 613 assert(shared_rs()->contains(obj), "must be"); 614 } else { 615 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 616 } 617 address base_address = address(SharedBaseAddress); 618 uintx deltax = address(obj) - base_address; 619 return deltax; 620 } 621 622 // Global object for holding classes that have been loaded. Since this 623 // is run at a safepoint just before exit, this is the entire set of classes. 624 static GrowableArray<Klass*>* _global_klass_objects; 625 626 static int global_klass_compare(Klass** a, Klass **b) { 627 return a[0]->name()->fast_compare(b[0]->name()); 628 } 629 630 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 631 return _global_klass_objects; 632 } 633 634 static void collect_array_classes(Klass* k) { 635 _global_klass_objects->append_if_missing(k); 636 if (k->is_array_klass()) { 637 // Add in the array classes too 638 ArrayKlass* ak = ArrayKlass::cast(k); 639 Klass* h = ak->higher_dimension(); 640 if (h != NULL) { 641 h->array_klasses_do(collect_array_classes); 642 } 643 } 644 } 645 646 class CollectClassesClosure : public KlassClosure { 647 void do_klass(Klass* k) { 648 if (k->is_instance_klass() && 649 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 650 // Don't add to the _global_klass_objects 651 } else { 652 _global_klass_objects->append_if_missing(k); 653 } 654 if (k->is_array_klass()) { 655 // Add in the array classes too 656 ArrayKlass* ak = ArrayKlass::cast(k); 657 Klass* h = ak->higher_dimension(); 658 if (h != NULL) { 659 h->array_klasses_do(collect_array_classes); 660 } 661 } 662 } 663 }; 664 665 static void remove_unshareable_in_classes() { 666 for (int i = 0; i < _global_klass_objects->length(); i++) { 667 Klass* k = _global_klass_objects->at(i); 668 if (!k->is_objArray_klass()) { 669 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 670 // on their array classes. 671 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 672 k->remove_unshareable_info(); 673 } 674 } 675 } 676 677 static void remove_java_mirror_in_classes() { 678 for (int i = 0; i < _global_klass_objects->length(); i++) { 679 Klass* k = _global_klass_objects->at(i); 680 if (!k->is_objArray_klass()) { 681 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 682 // on their array classes. 683 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 684 k->remove_java_mirror(); 685 } 686 } 687 } 688 689 static void clear_basic_type_mirrors() { 690 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 691 Universe::set_int_mirror(NULL); 692 Universe::set_float_mirror(NULL); 693 Universe::set_double_mirror(NULL); 694 Universe::set_byte_mirror(NULL); 695 Universe::set_bool_mirror(NULL); 696 Universe::set_char_mirror(NULL); 697 Universe::set_long_mirror(NULL); 698 Universe::set_short_mirror(NULL); 699 Universe::set_void_mirror(NULL); 700 } 701 702 static void rewrite_nofast_bytecode(const methodHandle& method) { 703 BytecodeStream bcs(method); 704 while (!bcs.is_last_bytecode()) { 705 Bytecodes::Code opcode = bcs.next(); 706 switch (opcode) { 707 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 708 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 709 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 710 case Bytecodes::_iload: { 711 if (!bcs.is_wide()) { 712 *bcs.bcp() = Bytecodes::_nofast_iload; 713 } 714 break; 715 } 716 default: break; 717 } 718 } 719 } 720 721 // Walk all methods in the class list to ensure that they won't be modified at 722 // run time. This includes: 723 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 724 // at run time by RewriteBytecodes/RewriteFrequentPairs 725 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 726 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) { 727 for (int i = 0; i < _global_klass_objects->length(); i++) { 728 Klass* k = _global_klass_objects->at(i); 729 if (k->is_instance_klass()) { 730 InstanceKlass* ik = InstanceKlass::cast(k); 731 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik); 732 } 733 } 734 } 735 736 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) { 737 for (int i = 0; i < ik->methods()->length(); i++) { 738 methodHandle m(thread, ik->methods()->at(i)); 739 rewrite_nofast_bytecode(m); 740 Fingerprinter fp(m); 741 // The side effect of this call sets method's fingerprint field. 742 fp.fingerprint(); 743 } 744 } 745 746 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 747 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 748 // 749 // Addresses of the vtables and the methods may be different across JVM runs, 750 // if libjvm.so is dynamically loaded at a different base address. 751 // 752 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 753 // 754 // + at dump time: we redirect the _vptr to point to our own vtables inside 755 // the CDS image 756 // + at run time: we clone the actual contents of the vtables from libjvm.so 757 // into our own tables. 758 759 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 760 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 761 f(ConstantPool) \ 762 f(InstanceKlass) \ 763 f(InstanceClassLoaderKlass) \ 764 f(InstanceMirrorKlass) \ 765 f(InstanceRefKlass) \ 766 f(Method) \ 767 f(ObjArrayKlass) \ 768 f(TypeArrayKlass) 769 770 class CppVtableInfo { 771 intptr_t _vtable_size; 772 intptr_t _cloned_vtable[1]; 773 public: 774 static int num_slots(int vtable_size) { 775 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 776 } 777 int vtable_size() { return int(uintx(_vtable_size)); } 778 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 779 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 780 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 781 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 782 static size_t byte_size(int vtable_size) { 783 CppVtableInfo i; 784 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 785 } 786 }; 787 788 template <class T> class CppVtableCloner : public T { 789 static intptr_t* vtable_of(Metadata& m) { 790 return *((intptr_t**)&m); 791 } 792 static CppVtableInfo* _info; 793 794 static int get_vtable_length(const char* name); 795 796 public: 797 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 798 static intptr_t* allocate(const char* name); 799 800 // Clone the vtable to ... 801 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 802 803 static void zero_vtable_clone() { 804 assert(DumpSharedSpaces, "dump-time only"); 805 _info->zero(); 806 } 807 808 static bool is_valid_shared_object(const T* obj) { 809 intptr_t* vptr = *(intptr_t**)obj; 810 return vptr == _info->cloned_vtable(); 811 } 812 }; 813 814 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 815 816 template <class T> 817 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 818 assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment"); 819 int n = get_vtable_length(name); 820 _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 821 _info->set_vtable_size(n); 822 823 intptr_t* p = clone_vtable(name, _info); 824 assert((char*)p == _mc_region.top(), "must be"); 825 826 return _info->cloned_vtable(); 827 } 828 829 template <class T> 830 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 831 if (!DumpSharedSpaces) { 832 assert(_info == 0, "_info is initialized only at dump time"); 833 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 834 } 835 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 836 int n = info->vtable_size(); 837 intptr_t* srcvtable = vtable_of(tmp); 838 intptr_t* dstvtable = info->cloned_vtable(); 839 840 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 841 // safe to do memcpy. 842 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 843 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 844 return dstvtable + n; 845 } 846 847 // To determine the size of the vtable for each type, we use the following 848 // trick by declaring 2 subclasses: 849 // 850 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 851 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 852 // 853 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 854 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 855 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 856 // - Their last entry is different. 857 // 858 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 859 // and find the first entry that's different. 860 // 861 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 862 // esoteric compilers. 863 864 template <class T> class CppVtableTesterB: public T { 865 public: 866 virtual int last_virtual_method() {return 1;} 867 }; 868 869 template <class T> class CppVtableTesterA : public T { 870 public: 871 virtual void* last_virtual_method() { 872 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 873 // compiler/linker won't alias the two functions. 874 return NULL; 875 } 876 }; 877 878 template <class T> 879 int CppVtableCloner<T>::get_vtable_length(const char* name) { 880 CppVtableTesterA<T> a; 881 CppVtableTesterB<T> b; 882 883 intptr_t* avtable = vtable_of(a); 884 intptr_t* bvtable = vtable_of(b); 885 886 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 887 int vtable_len = 1; 888 for (; ; vtable_len++) { 889 if (avtable[vtable_len] != bvtable[vtable_len]) { 890 break; 891 } 892 } 893 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 894 895 return vtable_len; 896 } 897 898 #define ALLOC_CPP_VTABLE_CLONE(c) \ 899 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \ 900 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); 901 902 #define CLONE_CPP_VTABLE(c) \ 903 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 904 905 #define ZERO_CPP_VTABLE(c) \ 906 CppVtableCloner<c>::zero_vtable_clone(); 907 908 //------------------------------ for DynamicDumpSharedSpaces - start 909 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 910 911 enum { 912 // E.g., ConstantPool_Kind == 0, InstanceKlass == 1, etc. 913 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 914 _num_cloned_vtable_kinds 915 }; 916 917 // This is the index of all the cloned vtables. E.g., for 918 // ConstantPool* cp = ....; // an archived constant pool 919 // InstanceKlass* ik = ....;// an archived class 920 // the following holds true: 921 // _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 922 // _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0] 923 static intptr_t** _cloned_cpp_vtptrs = NULL; 924 925 void MetaspaceShared::allocate_cloned_cpp_vtptrs() { 926 assert(DumpSharedSpaces, "must"); 927 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 928 _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 929 } 930 931 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 932 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 933 } 934 935 intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) { 936 Arguments::assert_is_dumping_archive(); 937 int kind = -1; 938 switch (msotype) { 939 case MetaspaceObj::SymbolType: 940 case MetaspaceObj::TypeArrayU1Type: 941 case MetaspaceObj::TypeArrayU2Type: 942 case MetaspaceObj::TypeArrayU4Type: 943 case MetaspaceObj::TypeArrayU8Type: 944 case MetaspaceObj::TypeArrayOtherType: 945 case MetaspaceObj::ConstMethodType: 946 case MetaspaceObj::ConstantPoolCacheType: 947 case MetaspaceObj::AnnotationsType: 948 case MetaspaceObj::MethodCountersType: 949 case MetaspaceObj::RecordComponentType: 950 // These have no vtables. 951 break; 952 case MetaspaceObj::ClassType: 953 { 954 Klass* k = (Klass*)obj; 955 assert(k->is_klass(), "must be"); 956 if (k->is_instance_klass()) { 957 InstanceKlass* ik = InstanceKlass::cast(k); 958 if (ik->is_class_loader_instance_klass()) { 959 kind = InstanceClassLoaderKlass_Kind; 960 } else if (ik->is_reference_instance_klass()) { 961 kind = InstanceRefKlass_Kind; 962 } else if (ik->is_mirror_instance_klass()) { 963 kind = InstanceMirrorKlass_Kind; 964 } else { 965 kind = InstanceKlass_Kind; 966 } 967 } else if (k->is_typeArray_klass()) { 968 kind = TypeArrayKlass_Kind; 969 } else { 970 assert(k->is_objArray_klass(), "must be"); 971 kind = ObjArrayKlass_Kind; 972 } 973 } 974 break; 975 976 case MetaspaceObj::MethodType: 977 { 978 Method* m = (Method*)obj; 979 assert(m->is_method(), "must be"); 980 kind = Method_Kind; 981 } 982 break; 983 984 case MetaspaceObj::MethodDataType: 985 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 986 ShouldNotReachHere(); 987 break; 988 989 case MetaspaceObj::ConstantPoolType: 990 { 991 ConstantPool *cp = (ConstantPool*)obj; 992 assert(cp->is_constantPool(), "must be"); 993 kind = ConstantPool_Kind; 994 } 995 break; 996 997 default: 998 ShouldNotReachHere(); 999 } 1000 1001 if (kind >= 0) { 1002 assert(kind < _num_cloned_vtable_kinds, "must be"); 1003 return _cloned_cpp_vtptrs[kind]; 1004 } else { 1005 return NULL; 1006 } 1007 } 1008 1009 //------------------------------ for DynamicDumpSharedSpaces - end 1010 1011 // This can be called at both dump time and run time: 1012 // - clone the contents of the c++ vtables into the space 1013 // allocated by allocate_cpp_vtable_clones() 1014 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 1015 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 1016 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 1017 } 1018 1019 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 1020 assert(DumpSharedSpaces, "dump-time only"); 1021 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 1022 } 1023 1024 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 1025 char* MetaspaceShared::allocate_cpp_vtable_clones() { 1026 char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables 1027 1028 assert(DumpSharedSpaces, "dump-time only"); 1029 // Layout (each slot is a intptr_t): 1030 // [number of slots in the first vtable = n1] 1031 // [ <n1> slots for the first vtable] 1032 // [number of slots in the first second = n2] 1033 // [ <n2> slots for the second vtable] 1034 // ... 1035 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 1036 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 1037 1038 return cloned_vtables; 1039 } 1040 1041 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 1042 assert(is_in_shared_metaspace(m), "must be"); 1043 return CppVtableCloner<Method>::is_valid_shared_object(m); 1044 } 1045 1046 void WriteClosure::do_oop(oop* o) { 1047 if (*o == NULL) { 1048 _dump_region->append_intptr_t(0); 1049 } else { 1050 assert(HeapShared::is_heap_object_archiving_allowed(), 1051 "Archiving heap object is not allowed"); 1052 _dump_region->append_intptr_t( 1053 (intptr_t)CompressedOops::encode_not_null(*o)); 1054 } 1055 } 1056 1057 void WriteClosure::do_region(u_char* start, size_t size) { 1058 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1059 assert(size % sizeof(intptr_t) == 0, "bad size"); 1060 do_tag((int)size); 1061 while (size > 0) { 1062 _dump_region->append_intptr_t(*(intptr_t*)start, true); 1063 start += sizeof(intptr_t); 1064 size -= sizeof(intptr_t); 1065 } 1066 } 1067 1068 // This is for dumping detailed statistics for the allocations 1069 // in the shared spaces. 1070 class DumpAllocStats : public ResourceObj { 1071 public: 1072 1073 // Here's poor man's enum inheritance 1074 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 1075 METASPACE_OBJ_TYPES_DO(f) \ 1076 f(SymbolHashentry) \ 1077 f(SymbolBucket) \ 1078 f(StringHashentry) \ 1079 f(StringBucket) \ 1080 f(Other) 1081 1082 enum Type { 1083 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 1084 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 1085 _number_of_types 1086 }; 1087 1088 static const char * type_name(Type type) { 1089 switch(type) { 1090 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 1091 default: 1092 ShouldNotReachHere(); 1093 return NULL; 1094 } 1095 } 1096 1097 public: 1098 enum { RO = 0, RW = 1 }; 1099 1100 int _counts[2][_number_of_types]; 1101 int _bytes [2][_number_of_types]; 1102 1103 DumpAllocStats() { 1104 memset(_counts, 0, sizeof(_counts)); 1105 memset(_bytes, 0, sizeof(_bytes)); 1106 }; 1107 1108 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 1109 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 1110 int which = (read_only) ? RO : RW; 1111 _counts[which][type] ++; 1112 _bytes [which][type] += byte_size; 1113 } 1114 1115 void record_other_type(int byte_size, bool read_only) { 1116 int which = (read_only) ? RO : RW; 1117 _bytes [which][OtherType] += byte_size; 1118 } 1119 void print_stats(int ro_all, int rw_all, int mc_all); 1120 }; 1121 1122 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { 1123 // Calculate size of data that was not allocated by Metaspace::allocate() 1124 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1125 1126 // symbols 1127 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1128 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1129 1130 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1131 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1132 1133 // strings 1134 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1135 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1136 1137 _counts[RO][StringBucketType] = stats->string.bucket_count; 1138 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1139 1140 // TODO: count things like dictionary, vtable, etc 1141 _bytes[RW][OtherType] += mc_all; 1142 rw_all += mc_all; // mc is mapped Read/Write 1143 1144 // prevent divide-by-zero 1145 if (ro_all < 1) { 1146 ro_all = 1; 1147 } 1148 if (rw_all < 1) { 1149 rw_all = 1; 1150 } 1151 1152 int all_ro_count = 0; 1153 int all_ro_bytes = 0; 1154 int all_rw_count = 0; 1155 int all_rw_bytes = 0; 1156 1157 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1158 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1159 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1160 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1161 1162 LogMessage(cds) msg; 1163 1164 msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):"); 1165 msg.debug("%s", hdr); 1166 msg.debug("%s", sep); 1167 for (int type = 0; type < int(_number_of_types); type ++) { 1168 const char *name = type_name((Type)type); 1169 int ro_count = _counts[RO][type]; 1170 int ro_bytes = _bytes [RO][type]; 1171 int rw_count = _counts[RW][type]; 1172 int rw_bytes = _bytes [RW][type]; 1173 int count = ro_count + rw_count; 1174 int bytes = ro_bytes + rw_bytes; 1175 1176 double ro_perc = percent_of(ro_bytes, ro_all); 1177 double rw_perc = percent_of(rw_bytes, rw_all); 1178 double perc = percent_of(bytes, ro_all + rw_all); 1179 1180 msg.debug(fmt_stats, name, 1181 ro_count, ro_bytes, ro_perc, 1182 rw_count, rw_bytes, rw_perc, 1183 count, bytes, perc); 1184 1185 all_ro_count += ro_count; 1186 all_ro_bytes += ro_bytes; 1187 all_rw_count += rw_count; 1188 all_rw_bytes += rw_bytes; 1189 } 1190 1191 int all_count = all_ro_count + all_rw_count; 1192 int all_bytes = all_ro_bytes + all_rw_bytes; 1193 1194 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1195 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1196 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1197 1198 msg.debug("%s", sep); 1199 msg.debug(fmt_stats, "Total", 1200 all_ro_count, all_ro_bytes, all_ro_perc, 1201 all_rw_count, all_rw_bytes, all_rw_perc, 1202 all_count, all_bytes, all_perc); 1203 1204 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1205 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1206 1207 #undef fmt_stats 1208 } 1209 1210 // Populate the shared space. 1211 1212 class VM_PopulateDumpSharedSpace: public VM_Operation { 1213 private: 1214 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1215 GrowableArray<MemRegion> *_open_archive_heap_regions; 1216 1217 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1218 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1219 1220 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1221 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1222 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1223 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1224 void dump_symbols(); 1225 char* dump_read_only_tables(); 1226 void print_class_stats(); 1227 void print_region_stats(FileMapInfo* map_info); 1228 void print_bitmap_region_stats(size_t size, size_t total_size); 1229 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1230 const char *name, size_t total_size); 1231 void relocate_to_requested_base_address(CHeapBitMap* ptrmap); 1232 1233 public: 1234 1235 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1236 void doit(); // outline because gdb sucks 1237 bool allow_nested_vm_operations() const { return true; } 1238 }; // class VM_PopulateDumpSharedSpace 1239 1240 class SortedSymbolClosure: public SymbolClosure { 1241 GrowableArray<Symbol*> _symbols; 1242 virtual void do_symbol(Symbol** sym) { 1243 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1244 _symbols.append(*sym); 1245 } 1246 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1247 if (a[0] < b[0]) { 1248 return -1; 1249 } else if (a[0] == b[0]) { 1250 ResourceMark rm; 1251 log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string()); 1252 return 0; 1253 } else { 1254 return 1; 1255 } 1256 } 1257 1258 public: 1259 SortedSymbolClosure() { 1260 SymbolTable::symbols_do(this); 1261 _symbols.sort(compare_symbols_by_address); 1262 } 1263 GrowableArray<Symbol*>* get_sorted_symbols() { 1264 return &_symbols; 1265 } 1266 }; 1267 1268 // ArchiveCompactor -- 1269 // 1270 // This class is the central piece of shared archive compaction -- all metaspace data are 1271 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1272 // metaspace data into their final location in the shared regions. 1273 1274 class ArchiveCompactor : AllStatic { 1275 static const int INITIAL_TABLE_SIZE = 8087; 1276 static const int MAX_TABLE_SIZE = 1000000; 1277 1278 static DumpAllocStats* _alloc_stats; 1279 static SortedSymbolClosure* _ssc; 1280 1281 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1282 static RelocationTable* _new_loc_table; 1283 1284 public: 1285 static void initialize() { 1286 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1287 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1288 } 1289 static DumpAllocStats* alloc_stats() { 1290 return _alloc_stats; 1291 } 1292 1293 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1294 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1295 // that are allocated in the RO space. 1296 class OtherROAllocMark { 1297 char* _oldtop; 1298 public: 1299 OtherROAllocMark() { 1300 _oldtop = _ro_region.top(); 1301 } 1302 ~OtherROAllocMark() { 1303 char* newtop = _ro_region.top(); 1304 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1305 } 1306 }; 1307 1308 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1309 address obj = ref->obj(); 1310 int bytes = ref->size() * BytesPerWord; 1311 char* p; 1312 size_t alignment = BytesPerWord; 1313 char* oldtop; 1314 char* newtop; 1315 1316 if (read_only) { 1317 oldtop = _ro_region.top(); 1318 p = _ro_region.allocate(bytes, alignment); 1319 newtop = _ro_region.top(); 1320 } else { 1321 oldtop = _rw_region.top(); 1322 if (ref->msotype() == MetaspaceObj::ClassType) { 1323 // Save a pointer immediate in front of an InstanceKlass, so 1324 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1325 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1326 // in systemDictionaryShared.cpp. 1327 Klass* klass = (Klass*)obj; 1328 if (klass->is_instance_klass()) { 1329 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1330 _rw_region.allocate(sizeof(address), BytesPerWord); 1331 } 1332 } 1333 p = _rw_region.allocate(bytes, alignment); 1334 newtop = _rw_region.top(); 1335 } 1336 memcpy(p, obj, bytes); 1337 1338 intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), (address)p); 1339 if (cloned_vtable != NULL) { 1340 *(address*)p = (address)cloned_vtable; 1341 ArchivePtrMarker::mark_pointer((address*)p); 1342 } 1343 1344 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1345 _new_loc_table->add(obj, (address)p); 1346 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1347 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1348 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1349 } 1350 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1351 } 1352 1353 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1354 address* pp = _new_loc_table->lookup(ref->obj()); 1355 assert(pp != NULL, "must be"); 1356 return *pp; 1357 } 1358 1359 private: 1360 // Makes a shallow copy of visited MetaspaceObj's 1361 class ShallowCopier: public UniqueMetaspaceClosure { 1362 bool _read_only; 1363 public: 1364 ShallowCopier(bool read_only) : _read_only(read_only) {} 1365 1366 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1367 if (read_only == _read_only) { 1368 allocate(ref, read_only); 1369 } 1370 return true; // recurse into ref.obj() 1371 } 1372 }; 1373 1374 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1375 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1376 public: 1377 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1378 address new_loc = get_new_loc(ref); 1379 RefRelocator refer; 1380 ref->metaspace_pointers_do_at(&refer, new_loc); 1381 return true; // recurse into ref.obj() 1382 } 1383 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 1384 assert(type == _method_entry_ref, "only special type allowed for now"); 1385 address obj = ref->obj(); 1386 address new_obj = get_new_loc(ref); 1387 size_t offset = pointer_delta(p, obj, sizeof(u1)); 1388 intptr_t* new_p = (intptr_t*)(new_obj + offset); 1389 assert(*p == *new_p, "must be a copy"); 1390 ArchivePtrMarker::mark_pointer((address*)new_p); 1391 } 1392 }; 1393 1394 // Relocate a reference to point to its shallow copy 1395 class RefRelocator: public MetaspaceClosure { 1396 public: 1397 virtual bool do_ref(Ref* ref, bool read_only) { 1398 if (ref->not_null()) { 1399 ref->update(get_new_loc(ref)); 1400 ArchivePtrMarker::mark_pointer(ref->addr()); 1401 } 1402 return false; // Do not recurse. 1403 } 1404 }; 1405 1406 #ifdef ASSERT 1407 class IsRefInArchiveChecker: public MetaspaceClosure { 1408 public: 1409 virtual bool do_ref(Ref* ref, bool read_only) { 1410 if (ref->not_null()) { 1411 char* obj = (char*)ref->obj(); 1412 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1413 "must be relocated to point to CDS archive"); 1414 } 1415 return false; // Do not recurse. 1416 } 1417 }; 1418 #endif 1419 1420 public: 1421 static void copy_and_compact() { 1422 ResourceMark rm; 1423 SortedSymbolClosure the_ssc; // StackObj 1424 _ssc = &the_ssc; 1425 1426 log_info(cds)("Scanning all metaspace objects ... "); 1427 { 1428 // allocate and shallow-copy RW objects, immediately following the MC region 1429 log_info(cds)("Allocating RW objects ... "); 1430 _mc_region.pack(&_rw_region); 1431 1432 ResourceMark rm; 1433 ShallowCopier rw_copier(false); 1434 iterate_roots(&rw_copier); 1435 } 1436 { 1437 // allocate and shallow-copy of RO object, immediately following the RW region 1438 log_info(cds)("Allocating RO objects ... "); 1439 _rw_region.pack(&_ro_region); 1440 1441 ResourceMark rm; 1442 ShallowCopier ro_copier(true); 1443 iterate_roots(&ro_copier); 1444 } 1445 { 1446 log_info(cds)("Relocating embedded pointers ... "); 1447 ResourceMark rm; 1448 ShallowCopyEmbeddedRefRelocator emb_reloc; 1449 iterate_roots(&emb_reloc); 1450 } 1451 { 1452 log_info(cds)("Relocating external roots ... "); 1453 ResourceMark rm; 1454 RefRelocator ext_reloc; 1455 iterate_roots(&ext_reloc); 1456 } 1457 { 1458 log_info(cds)("Fixing symbol identity hash ... "); 1459 os::init_random(0x12345678); 1460 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1461 for (int i=0; i<symbols->length(); i++) { 1462 symbols->at(i)->update_identity_hash(); 1463 } 1464 } 1465 #ifdef ASSERT 1466 { 1467 log_info(cds)("Verifying external roots ... "); 1468 ResourceMark rm; 1469 IsRefInArchiveChecker checker; 1470 iterate_roots(&checker); 1471 } 1472 #endif 1473 1474 1475 // cleanup 1476 _ssc = NULL; 1477 } 1478 1479 // We must relocate the System::_well_known_klasses only after we have copied the 1480 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1481 // old objects which assert that their klass is the original klass. 1482 static void relocate_well_known_klasses() { 1483 { 1484 log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... "); 1485 ResourceMark rm; 1486 RefRelocator ext_reloc; 1487 SystemDictionary::well_known_klasses_do(&ext_reloc); 1488 } 1489 // NOTE: after this point, we shouldn't have any globals that can reach the old 1490 // objects. 1491 1492 // We cannot use any of the objects in the heap anymore (except for the 1493 // shared strings) because their headers no longer point to valid Klasses. 1494 } 1495 1496 static void iterate_roots(MetaspaceClosure* it) { 1497 // To ensure deterministic contents in the archive, we just need to ensure that 1498 // we iterate the MetsapceObjs in a deterministic order. It doesn't matter where 1499 // the MetsapceObjs are located originally, as they are copied sequentially into 1500 // the archive during the iteration. 1501 // 1502 // The only issue here is that the symbol table and the system directories may be 1503 // randomly ordered, so we copy the symbols and klasses into two arrays and sort 1504 // them deterministically. 1505 // 1506 // During -Xshare:dump, the order of Symbol creation is strictly determined by 1507 // the SharedClassListFile (class loading is done in a single thread and the JIT 1508 // is disabled). Also, Symbols are allocated in monotonically increasing addresses 1509 // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by 1510 // ascending address order, we ensure that all Symbols are copied into deterministic 1511 // locations in the archive. 1512 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1513 for (int i=0; i<symbols->length(); i++) { 1514 it->push(symbols->adr_at(i)); 1515 } 1516 if (_global_klass_objects != NULL) { 1517 // Need to fix up the pointers 1518 for (int i = 0; i < _global_klass_objects->length(); i++) { 1519 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1520 it->push(_global_klass_objects->adr_at(i)); 1521 } 1522 } 1523 FileMapInfo::metaspace_pointers_do(it, false); 1524 SystemDictionaryShared::dumptime_classes_do(it); 1525 Universe::metaspace_pointers_do(it); 1526 SymbolTable::metaspace_pointers_do(it); 1527 vmSymbols::metaspace_pointers_do(it); 1528 1529 it->finish(); 1530 } 1531 1532 static Klass* get_relocated_klass(Klass* orig_klass) { 1533 assert(DumpSharedSpaces, "dump time only"); 1534 address* pp = _new_loc_table->lookup((address)orig_klass); 1535 assert(pp != NULL, "must be"); 1536 Klass* klass = (Klass*)(*pp); 1537 assert(klass->is_klass(), "must be"); 1538 return klass; 1539 } 1540 }; 1541 1542 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1543 SortedSymbolClosure* ArchiveCompactor::_ssc; 1544 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1545 1546 void VM_PopulateDumpSharedSpace::dump_symbols() { 1547 log_info(cds)("Dumping symbol table ..."); 1548 1549 NOT_PRODUCT(SymbolTable::verify()); 1550 SymbolTable::write_to_archive(); 1551 } 1552 1553 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1554 ArchiveCompactor::OtherROAllocMark mark; 1555 1556 log_info(cds)("Removing java_mirror ... "); 1557 if (!HeapShared::is_heap_object_archiving_allowed()) { 1558 clear_basic_type_mirrors(); 1559 } 1560 remove_java_mirror_in_classes(); 1561 log_info(cds)("done. "); 1562 1563 SystemDictionaryShared::write_to_archive(); 1564 1565 // Write the other data to the output array. 1566 char* start = _ro_region.top(); 1567 WriteClosure wc(&_ro_region); 1568 MetaspaceShared::serialize(&wc); 1569 1570 // Write the bitmaps for patching the archive heap regions 1571 _closed_archive_heap_oopmaps = NULL; 1572 _open_archive_heap_oopmaps = NULL; 1573 dump_archive_heap_oopmaps(); 1574 1575 return start; 1576 } 1577 1578 void VM_PopulateDumpSharedSpace::print_class_stats() { 1579 log_info(cds)("Number of classes %d", _global_klass_objects->length()); 1580 { 1581 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1582 for (int i = 0; i < _global_klass_objects->length(); i++) { 1583 Klass* k = _global_klass_objects->at(i); 1584 if (k->is_instance_klass()) { 1585 num_inst ++; 1586 } else if (k->is_objArray_klass()) { 1587 num_obj_array ++; 1588 } else { 1589 assert(k->is_typeArray_klass(), "sanity"); 1590 num_type_array ++; 1591 } 1592 } 1593 log_info(cds)(" instance classes = %5d", num_inst); 1594 log_info(cds)(" obj array classes = %5d", num_obj_array); 1595 log_info(cds)(" type array classes = %5d", num_type_array); 1596 } 1597 } 1598 1599 void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) { 1600 intx addr_delta = MetaspaceShared::final_delta(); 1601 if (addr_delta == 0) { 1602 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top()); 1603 } else { 1604 // We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR). 1605 // This means that the current content of the archive is based on a random 1606 // address. Let's relocate all the pointers, so that it can be mapped to 1607 // MetaspaceShared::requested_base_address() without runtime relocation. 1608 // 1609 // Note: both the base and dynamic archive are written with 1610 // FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address() 1611 1612 // Patch all pointers that are marked by ptrmap within this region, 1613 // where we have just dumped all the metaspace data. 1614 address patch_base = (address)SharedBaseAddress; 1615 address patch_end = (address)_ro_region.top(); 1616 size_t size = patch_end - patch_base; 1617 1618 // the current value of the pointers to be patched must be within this 1619 // range (i.e., must point to valid metaspace objects) 1620 address valid_old_base = patch_base; 1621 address valid_old_end = patch_end; 1622 1623 // after patching, the pointers must point inside this range 1624 // (the requested location of the archive, as mapped at runtime). 1625 address valid_new_base = (address)MetaspaceShared::requested_base_address(); 1626 address valid_new_end = valid_new_base + size; 1627 1628 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " 1629 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), 1630 p2i(valid_new_base), p2i(valid_new_end)); 1631 1632 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1633 valid_new_base, valid_new_end, addr_delta, ptrmap); 1634 ptrmap->iterate(&patcher); 1635 ArchivePtrMarker::compact(patcher.max_non_null_offset()); 1636 } 1637 } 1638 1639 void VM_PopulateDumpSharedSpace::doit() { 1640 CHeapBitMap ptrmap; 1641 MetaspaceShared::initialize_ptr_marker(&ptrmap); 1642 1643 // We should no longer allocate anything from the metaspace, so that: 1644 // 1645 // (1) Metaspace::allocate might trigger GC if we have run out of 1646 // committed metaspace, but we can't GC because we're running 1647 // in the VM thread. 1648 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1649 Metaspace::freeze(); 1650 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1651 1652 Thread* THREAD = VMThread::vm_thread(); 1653 1654 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1655 1656 NOT_PRODUCT(SystemDictionary::verify();) 1657 // The following guarantee is meant to ensure that no loader constraints 1658 // exist yet, since the constraints table is not shared. This becomes 1659 // more important now that we don't re-initialize vtables/itables for 1660 // shared classes at runtime, where constraints were previously created. 1661 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1662 "loader constraints are not saved"); 1663 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1664 "placeholders are not saved"); 1665 1666 // At this point, many classes have been loaded. 1667 // Gather systemDictionary classes in a global array and do everything to 1668 // that so we don't have to walk the SystemDictionary again. 1669 SystemDictionaryShared::check_excluded_classes(); 1670 _global_klass_objects = new GrowableArray<Klass*>(1000); 1671 CollectClassesClosure collect_classes; 1672 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1673 _global_klass_objects->sort(global_klass_compare); 1674 1675 print_class_stats(); 1676 1677 // Ensure the ConstMethods won't be modified at run-time 1678 log_info(cds)("Updating ConstMethods ... "); 1679 rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD); 1680 log_info(cds)("done. "); 1681 1682 // Remove all references outside the metadata 1683 log_info(cds)("Removing unshareable information ... "); 1684 remove_unshareable_in_classes(); 1685 log_info(cds)("done. "); 1686 1687 MetaspaceShared::allocate_cloned_cpp_vtptrs(); 1688 char* cloned_vtables = _mc_region.top(); 1689 MetaspaceShared::allocate_cpp_vtable_clones(); 1690 1691 ArchiveCompactor::initialize(); 1692 ArchiveCompactor::copy_and_compact(); 1693 1694 dump_symbols(); 1695 1696 // Dump supported java heap objects 1697 _closed_archive_heap_regions = NULL; 1698 _open_archive_heap_regions = NULL; 1699 dump_java_heap_objects(); 1700 1701 ArchiveCompactor::relocate_well_known_klasses(); 1702 1703 char* serialized_data = dump_read_only_tables(); 1704 _ro_region.pack(); 1705 1706 // The vtable clones contain addresses of the current process. 1707 // We don't want to write these addresses into the archive. Same for i2i buffer. 1708 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1709 memset(MetaspaceShared::i2i_entry_code_buffers(), 0, 1710 MetaspaceShared::i2i_entry_code_buffers_size()); 1711 1712 // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address() 1713 // without runtime relocation. 1714 relocate_to_requested_base_address(&ptrmap); 1715 1716 // Create and write the archive file that maps the shared spaces. 1717 1718 FileMapInfo* mapinfo = new FileMapInfo(true); 1719 mapinfo->populate_header(os::vm_allocation_granularity()); 1720 mapinfo->set_serialized_data(serialized_data); 1721 mapinfo->set_cloned_vtables(cloned_vtables); 1722 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1723 MetaspaceShared::i2i_entry_code_buffers_size()); 1724 mapinfo->open_for_write(); 1725 MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, _open_archive_heap_oopmaps); 1726 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1727 _closed_archive_heap_regions, 1728 _closed_archive_heap_oopmaps, 1729 MetaspaceShared::first_closed_archive_heap_region, 1730 MetaspaceShared::max_closed_archive_heap_region); 1731 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1732 _open_archive_heap_regions, 1733 _open_archive_heap_oopmaps, 1734 MetaspaceShared::first_open_archive_heap_region, 1735 MetaspaceShared::max_open_archive_heap_region); 1736 1737 mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address()); 1738 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1739 mapinfo->write_header(); 1740 print_region_stats(mapinfo); 1741 mapinfo->close(); 1742 1743 if (log_is_enabled(Info, cds)) { 1744 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1745 int(_mc_region.used())); 1746 } 1747 1748 if (PrintSystemDictionaryAtExit) { 1749 SystemDictionary::print(); 1750 } 1751 1752 if (AllowArchivingWithJavaAgent) { 1753 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1754 "for testing purposes only and should not be used in a production environment"); 1755 } 1756 1757 // There may be other pending VM operations that operate on the InstanceKlasses, 1758 // which will fail because InstanceKlasses::remove_unshareable_info() 1759 // has been called. Forget these operations and exit the VM directly. 1760 vm_direct_exit(0); 1761 } 1762 1763 void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) { 1764 // Print statistics of all the regions 1765 const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used(); 1766 const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned(); 1767 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1768 _mc_region.reserved() + 1769 bitmap_reserved + 1770 _total_closed_archive_region_size + 1771 _total_open_archive_region_size; 1772 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1773 _mc_region.used() + 1774 bitmap_used + 1775 _total_closed_archive_region_size + 1776 _total_open_archive_region_size; 1777 const double total_u_perc = percent_of(total_bytes, total_reserved); 1778 1779 _mc_region.print(total_reserved); 1780 _rw_region.print(total_reserved); 1781 _ro_region.print(total_reserved); 1782 print_bitmap_region_stats(bitmap_used, total_reserved); 1783 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1784 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1785 1786 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1787 total_bytes, total_reserved, total_u_perc); 1788 } 1789 1790 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { 1791 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", 1792 size, size/double(total_size)*100.0, size); 1793 } 1794 1795 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1796 const char *name, size_t total_size) { 1797 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1798 for (int i = 0; i < arr_len; i++) { 1799 char* start = (char*)heap_mem->at(i).start(); 1800 size_t size = heap_mem->at(i).byte_size(); 1801 char* top = start + size; 1802 log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1803 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1804 1805 } 1806 } 1807 1808 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo, 1809 GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps, 1810 GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps) { 1811 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with 1812 // MetaspaceShared::n_regions (internal to hotspot). 1813 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); 1814 1815 // mc contains the trampoline code for method entries, which are patched at run time, 1816 // so it needs to be read/write. 1817 write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1818 write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1819 write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1820 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps); 1821 } 1822 1823 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1824 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1825 } 1826 1827 // Update a Java object to point its Klass* to the new location after 1828 // shared archive has been compacted. 1829 void MetaspaceShared::relocate_klass_ptr(oop o) { 1830 assert(DumpSharedSpaces, "sanity"); 1831 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1832 o->set_klass(k); 1833 } 1834 1835 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { 1836 assert(DumpSharedSpaces, "sanity"); 1837 k = ArchiveCompactor::get_relocated_klass(k); 1838 if (is_final) { 1839 k = (Klass*)(address(k) + final_delta()); 1840 } 1841 return k; 1842 } 1843 1844 class LinkSharedClassesClosure : public KlassClosure { 1845 Thread* THREAD; 1846 bool _made_progress; 1847 public: 1848 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1849 1850 void reset() { _made_progress = false; } 1851 bool made_progress() const { return _made_progress; } 1852 1853 void do_klass(Klass* k) { 1854 if (k->is_instance_klass()) { 1855 InstanceKlass* ik = InstanceKlass::cast(k); 1856 // For dynamic CDS dump, only link classes loaded by the builtin class loaders. 1857 bool do_linking = DumpSharedSpaces ? true : !ik->is_shared_unregistered_class(); 1858 if (do_linking) { 1859 // Link the class to cause the bytecodes to be rewritten and the 1860 // cpcache to be created. Class verification is done according 1861 // to -Xverify setting. 1862 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1863 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1864 1865 if (DumpSharedSpaces) { 1866 // The following function is used to resolve all Strings in the statically 1867 // dumped classes to archive all the Strings. The archive heap is not supported 1868 // for the dynamic archive. 1869 ik->constants()->resolve_class_constants(THREAD); 1870 } 1871 } 1872 } 1873 } 1874 }; 1875 1876 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1877 // We need to iterate because verification may cause additional classes 1878 // to be loaded. 1879 LinkSharedClassesClosure link_closure(THREAD); 1880 do { 1881 link_closure.reset(); 1882 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1883 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1884 } while (link_closure.made_progress()); 1885 } 1886 1887 void MetaspaceShared::prepare_for_dumping() { 1888 Arguments::check_unsupported_dumping_properties(); 1889 ClassLoader::initialize_shared_path(); 1890 } 1891 1892 // Preload classes from a list, populate the shared spaces and dump to a 1893 // file. 1894 void MetaspaceShared::preload_and_dump(TRAPS) { 1895 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1896 ResourceMark rm(THREAD); 1897 char class_list_path_str[JVM_MAXPATHLEN]; 1898 // Preload classes to be shared. 1899 const char* class_list_path; 1900 if (SharedClassListFile == NULL) { 1901 // Construct the path to the class list (in jre/lib) 1902 // Walk up two directories from the location of the VM and 1903 // optionally tack on "lib" (depending on platform) 1904 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1905 for (int i = 0; i < 3; i++) { 1906 char *end = strrchr(class_list_path_str, *os::file_separator()); 1907 if (end != NULL) *end = '\0'; 1908 } 1909 int class_list_path_len = (int)strlen(class_list_path_str); 1910 if (class_list_path_len >= 3) { 1911 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1912 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1913 jio_snprintf(class_list_path_str + class_list_path_len, 1914 sizeof(class_list_path_str) - class_list_path_len, 1915 "%slib", os::file_separator()); 1916 class_list_path_len += 4; 1917 } 1918 } 1919 } 1920 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1921 jio_snprintf(class_list_path_str + class_list_path_len, 1922 sizeof(class_list_path_str) - class_list_path_len, 1923 "%sclasslist", os::file_separator()); 1924 } 1925 class_list_path = class_list_path_str; 1926 } else { 1927 class_list_path = SharedClassListFile; 1928 } 1929 1930 log_info(cds)("Loading classes to share ..."); 1931 _has_error_classes = false; 1932 int class_count = preload_classes(class_list_path, THREAD); 1933 if (ExtraSharedClassListFile) { 1934 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1935 } 1936 log_info(cds)("Loading classes to share: done."); 1937 1938 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1939 1940 if (SharedArchiveConfigFile) { 1941 log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile); 1942 read_extra_data(SharedArchiveConfigFile, THREAD); 1943 } 1944 log_info(cds)("Reading extra data: done."); 1945 1946 HeapShared::init_subgraph_entry_fields(THREAD); 1947 1948 // Rewrite and link classes 1949 log_info(cds)("Rewriting and linking classes ..."); 1950 1951 // Link any classes which got missed. This would happen if we have loaded classes that 1952 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1953 // fails verification, all other interfaces that were not specified in the classlist but 1954 // are implemented by K are not verified. 1955 link_and_cleanup_shared_classes(CATCH); 1956 log_info(cds)("Rewriting and linking classes: done"); 1957 1958 if (HeapShared::is_heap_object_archiving_allowed()) { 1959 // Avoid fragmentation while archiving heap objects. 1960 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); 1961 Universe::heap()->collect(GCCause::_archive_time_gc); 1962 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); 1963 } 1964 1965 VM_PopulateDumpSharedSpace op; 1966 VMThread::execute(&op); 1967 } 1968 } 1969 1970 1971 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1972 ClassListParser parser(class_list_path); 1973 int class_count = 0; 1974 1975 while (parser.parse_one_line()) { 1976 Klass* klass = parser.load_current_class(THREAD); 1977 if (HAS_PENDING_EXCEPTION) { 1978 if (klass == NULL && 1979 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1980 // print a warning only when the pending exception is class not found 1981 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 1982 } 1983 CLEAR_PENDING_EXCEPTION; 1984 } 1985 if (klass != NULL) { 1986 if (log_is_enabled(Trace, cds)) { 1987 ResourceMark rm(THREAD); 1988 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1989 } 1990 1991 if (klass->is_instance_klass()) { 1992 InstanceKlass* ik = InstanceKlass::cast(klass); 1993 1994 // Link the class to cause the bytecodes to be rewritten and the 1995 // cpcache to be created. The linking is done as soon as classes 1996 // are loaded in order that the related data structures (klass and 1997 // cpCache) are located together. 1998 try_link_class(ik, THREAD); 1999 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 2000 } 2001 2002 class_count++; 2003 } 2004 } 2005 2006 return class_count; 2007 } 2008 2009 // Returns true if the class's status has changed 2010 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 2011 Arguments::assert_is_dumping_archive(); 2012 if (ik->init_state() < InstanceKlass::linked && 2013 !SystemDictionaryShared::has_class_failed_verification(ik)) { 2014 bool saved = BytecodeVerificationLocal; 2015 if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) { 2016 // The verification decision is based on BytecodeVerificationRemote 2017 // for non-system classes. Since we are using the NULL classloader 2018 // to load non-system classes for customized class loaders during dumping, 2019 // we need to temporarily change BytecodeVerificationLocal to be the same as 2020 // BytecodeVerificationRemote. Note this can cause the parent system 2021 // classes also being verified. The extra overhead is acceptable during 2022 // dumping. 2023 BytecodeVerificationLocal = BytecodeVerificationRemote; 2024 } 2025 ik->link_class(THREAD); 2026 if (HAS_PENDING_EXCEPTION) { 2027 ResourceMark rm(THREAD); 2028 log_warning(cds)("Preload Warning: Verification failed for %s", 2029 ik->external_name()); 2030 CLEAR_PENDING_EXCEPTION; 2031 SystemDictionaryShared::set_class_has_failed_verification(ik); 2032 _has_error_classes = true; 2033 } 2034 BytecodeVerificationLocal = saved; 2035 return true; 2036 } else { 2037 return false; 2038 } 2039 } 2040 2041 #if INCLUDE_CDS_JAVA_HEAP 2042 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 2043 // The closed and open archive heap space has maximum two regions. 2044 // See FileMapInfo::write_archive_heap_regions() for details. 2045 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 2046 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 2047 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 2048 _open_archive_heap_regions); 2049 ArchiveCompactor::OtherROAllocMark mark; 2050 HeapShared::write_subgraph_info_table(); 2051 } 2052 2053 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 2054 if (HeapShared::is_heap_object_archiving_allowed()) { 2055 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2056 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 2057 2058 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 2059 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 2060 } 2061 } 2062 2063 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 2064 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 2065 for (int i=0; i<regions->length(); i++) { 2066 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 2067 size_t size_in_bits = oopmap.size(); 2068 size_t size_in_bytes = oopmap.size_in_bytes(); 2069 uintptr_t* buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal); 2070 oopmap.write_to(buffer, size_in_bytes); 2071 log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 2072 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 2073 p2i(buffer), size_in_bytes, 2074 p2i(regions->at(i).start()), regions->at(i).byte_size()); 2075 2076 ArchiveHeapOopmapInfo info; 2077 info._oopmap = (address)buffer; 2078 info._oopmap_size_in_bits = size_in_bits; 2079 info._oopmap_size_in_bytes = size_in_bytes; 2080 oopmaps->append(info); 2081 } 2082 } 2083 #endif // INCLUDE_CDS_JAVA_HEAP 2084 2085 void ReadClosure::do_ptr(void** p) { 2086 assert(*p == NULL, "initializing previous initialized pointer."); 2087 intptr_t obj = nextPtr(); 2088 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 2089 "hit tag while initializing ptrs."); 2090 *p = (void*)obj; 2091 } 2092 2093 void ReadClosure::do_u4(u4* p) { 2094 intptr_t obj = nextPtr(); 2095 *p = (u4)(uintx(obj)); 2096 } 2097 2098 void ReadClosure::do_bool(bool* p) { 2099 intptr_t obj = nextPtr(); 2100 *p = (bool)(uintx(obj)); 2101 } 2102 2103 void ReadClosure::do_tag(int tag) { 2104 int old_tag; 2105 old_tag = (int)(intptr_t)nextPtr(); 2106 // do_int(&old_tag); 2107 assert(tag == old_tag, "old tag doesn't match"); 2108 FileMapInfo::assert_mark(tag == old_tag); 2109 } 2110 2111 void ReadClosure::do_oop(oop *p) { 2112 narrowOop o = (narrowOop)nextPtr(); 2113 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 2114 p = NULL; 2115 } else { 2116 assert(HeapShared::is_heap_object_archiving_allowed(), 2117 "Archived heap object is not allowed"); 2118 assert(HeapShared::open_archive_heap_region_mapped(), 2119 "Open archive heap region is not mapped"); 2120 *p = HeapShared::decode_from_archive(o); 2121 } 2122 } 2123 2124 void ReadClosure::do_region(u_char* start, size_t size) { 2125 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 2126 assert(size % sizeof(intptr_t) == 0, "bad size"); 2127 do_tag((int)size); 2128 while (size > 0) { 2129 *(intptr_t*)start = nextPtr(); 2130 start += sizeof(intptr_t); 2131 size -= sizeof(intptr_t); 2132 } 2133 } 2134 2135 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { 2136 assert(base <= static_top && static_top <= top, "must be"); 2137 _shared_metaspace_static_top = static_top; 2138 MetaspaceObj::set_shared_metaspace_range(base, top); 2139 } 2140 2141 // Return true if given address is in the misc data region 2142 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2143 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2144 } 2145 2146 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2147 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2148 return true; 2149 } 2150 return false; 2151 } 2152 2153 bool MetaspaceShared::is_shared_dynamic(void* p) { 2154 if ((p < MetaspaceObj::shared_metaspace_top()) && 2155 (p >= _shared_metaspace_static_top)) { 2156 return true; 2157 } else { 2158 return false; 2159 } 2160 } 2161 2162 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 2163 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 2164 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2165 2166 FileMapInfo* static_mapinfo = open_static_archive(); 2167 FileMapInfo* dynamic_mapinfo = NULL; 2168 2169 if (static_mapinfo != NULL) { 2170 dynamic_mapinfo = open_dynamic_archive(); 2171 2172 // First try to map at the requested address 2173 result = map_archives(static_mapinfo, dynamic_mapinfo, true); 2174 if (result == MAP_ARCHIVE_MMAP_FAILURE) { 2175 // Mapping has failed (probably due to ASLR). Let's map at an address chosen 2176 // by the OS. 2177 log_info(cds)("Try to map archive(s) at an alternative address"); 2178 result = map_archives(static_mapinfo, dynamic_mapinfo, false); 2179 } 2180 } 2181 2182 if (result == MAP_ARCHIVE_SUCCESS) { 2183 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); 2184 char* cds_base = static_mapinfo->mapped_base(); 2185 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); 2186 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); 2187 _relocation_delta = static_mapinfo->relocation_delta(); 2188 if (dynamic_mapped) { 2189 FileMapInfo::set_shared_path_table(dynamic_mapinfo); 2190 } else { 2191 FileMapInfo::set_shared_path_table(static_mapinfo); 2192 } 2193 _requested_base_address = static_mapinfo->requested_base_address(); 2194 } else { 2195 set_shared_metaspace_range(NULL, NULL, NULL); 2196 UseSharedSpaces = false; 2197 FileMapInfo::fail_continue("Unable to map shared spaces"); 2198 if (PrintSharedArchiveAndExit) { 2199 vm_exit_during_initialization("Unable to use shared archive."); 2200 } 2201 } 2202 2203 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { 2204 delete static_mapinfo; 2205 } 2206 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { 2207 delete dynamic_mapinfo; 2208 } 2209 } 2210 2211 FileMapInfo* MetaspaceShared::open_static_archive() { 2212 FileMapInfo* mapinfo = new FileMapInfo(true); 2213 if (!mapinfo->initialize()) { 2214 delete(mapinfo); 2215 return NULL; 2216 } 2217 return mapinfo; 2218 } 2219 2220 FileMapInfo* MetaspaceShared::open_dynamic_archive() { 2221 if (DynamicDumpSharedSpaces) { 2222 return NULL; 2223 } 2224 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 2225 return NULL; 2226 } 2227 2228 FileMapInfo* mapinfo = new FileMapInfo(false); 2229 if (!mapinfo->initialize()) { 2230 delete(mapinfo); 2231 return NULL; 2232 } 2233 return mapinfo; 2234 } 2235 2236 // use_requested_addr: 2237 // true = map at FileMapHeader::_requested_base_address 2238 // false = map at an alternative address picked by OS. 2239 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, 2240 bool use_requested_addr) { 2241 if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) { 2242 log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address."); 2243 return MAP_ARCHIVE_MMAP_FAILURE; 2244 } 2245 2246 PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2247 // For product build only -- this is for benchmarking the cost of doing relocation. 2248 // For debug builds, the check is done below, after reserving the space, for better test coverage 2249 // (see comment below). 2250 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2251 return MAP_ARCHIVE_MMAP_FAILURE; 2252 }); 2253 2254 if (ArchiveRelocationMode == 2 && !use_requested_addr) { 2255 log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); 2256 return MAP_ARCHIVE_MMAP_FAILURE; 2257 }; 2258 2259 if (dynamic_mapinfo != NULL) { 2260 // Ensure that the OS won't be able to allocate new memory spaces between the two 2261 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). 2262 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); 2263 } 2264 2265 ReservedSpace archive_space_rs, class_space_rs; 2266 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2267 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, 2268 use_requested_addr, archive_space_rs, 2269 class_space_rs); 2270 if (mapped_base_address == NULL) { 2271 result = MAP_ARCHIVE_MMAP_FAILURE; 2272 } else { 2273 2274 #ifdef ASSERT 2275 // Some sanity checks after reserving address spaces for archives 2276 // and class space. 2277 assert(archive_space_rs.is_reserved(), "Sanity"); 2278 if (Metaspace::using_class_space()) { 2279 // Class space must closely follow the archive space. Both spaces 2280 // must be aligned correctly. 2281 assert(class_space_rs.is_reserved(), 2282 "A class space should have been reserved"); 2283 assert(class_space_rs.base() >= archive_space_rs.end(), 2284 "class space should follow the cds archive space"); 2285 assert(is_aligned(archive_space_rs.base(), 2286 MetaspaceShared::reserved_space_alignment()), 2287 "Archive space misaligned"); 2288 assert(is_aligned(class_space_rs.base(), 2289 Metaspace::reserve_alignment()), 2290 "class space misaligned"); 2291 } 2292 #endif // ASSERT 2293 2294 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2295 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); 2296 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2297 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); 2298 2299 if (MetaspaceShared::use_windows_memory_mapping()) { 2300 // We have now reserved address space for the archives, and will map in 2301 // the archive files into this space. 2302 // 2303 // Special handling for Windows: on Windows we cannot map a file view 2304 // into an existing memory mapping. So, we unmap the address range we 2305 // just reserved again, which will make it available for mapping the 2306 // archives. 2307 // Reserving this range has not been for naught however since it makes 2308 // us reasonably sure the address range is available. 2309 // 2310 // But still it may fail, since between unmapping the range and mapping 2311 // in the archive someone else may grab the address space. Therefore 2312 // there is a fallback in FileMap::map_region() where we just read in 2313 // the archive files sequentially instead of mapping it in. We couple 2314 // this with use_requested_addr, since we're going to patch all the 2315 // pointers anyway so there's no benefit to mmap. 2316 if (use_requested_addr) { 2317 log_info(cds)("Windows mmap workaround: releasing archive space."); 2318 archive_space_rs.release(); 2319 } 2320 } 2321 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); 2322 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? 2323 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; 2324 2325 DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2326 // This is for simulating mmap failures at the requested address. In 2327 // debug builds, we do it here (after all archives have possibly been 2328 // mapped), so we can thoroughly test the code for failure handling 2329 // (releasing all allocated resource, etc). 2330 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2331 if (static_result == MAP_ARCHIVE_SUCCESS) { 2332 static_result = MAP_ARCHIVE_MMAP_FAILURE; 2333 } 2334 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2335 dynamic_result = MAP_ARCHIVE_MMAP_FAILURE; 2336 } 2337 }); 2338 2339 if (static_result == MAP_ARCHIVE_SUCCESS) { 2340 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2341 result = MAP_ARCHIVE_SUCCESS; 2342 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { 2343 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); 2344 // No need to retry mapping the dynamic archive again, as it will never succeed 2345 // (bad file, etc) -- just keep the base archive. 2346 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", 2347 dynamic_mapinfo->full_path()); 2348 result = MAP_ARCHIVE_SUCCESS; 2349 // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no 2350 // easy API to do that right now. 2351 } else { 2352 result = MAP_ARCHIVE_MMAP_FAILURE; 2353 } 2354 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { 2355 result = MAP_ARCHIVE_OTHER_FAILURE; 2356 } else { 2357 result = MAP_ARCHIVE_MMAP_FAILURE; 2358 } 2359 } 2360 2361 if (result == MAP_ARCHIVE_SUCCESS) { 2362 SharedBaseAddress = (size_t)mapped_base_address; 2363 LP64_ONLY({ 2364 if (Metaspace::using_class_space()) { 2365 // Set up ccs in metaspace. 2366 Metaspace::initialize_class_space(class_space_rs); 2367 2368 // Set up compressed Klass pointer encoding: the encoding range must 2369 // cover both archive and class space. 2370 address cds_base = (address)static_mapinfo->mapped_base(); 2371 address ccs_end = (address)class_space_rs.end(); 2372 CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base); 2373 2374 // map_heap_regions() compares the current narrow oop and klass encodings 2375 // with the archived ones, so it must be done after all encodings are determined. 2376 static_mapinfo->map_heap_regions(); 2377 } 2378 }); 2379 } else { 2380 unmap_archive(static_mapinfo); 2381 unmap_archive(dynamic_mapinfo); 2382 release_reserved_spaces(archive_space_rs, class_space_rs); 2383 } 2384 2385 return result; 2386 } 2387 2388 2389 // This will reserve two address spaces suitable to house Klass structures, one 2390 // for the cds archives (static archive and optionally dynamic archive) and 2391 // optionally one move for ccs. 2392 // 2393 // Since both spaces must fall within the compressed class pointer encoding 2394 // range, they are allocated close to each other. 2395 // 2396 // Space for archives will be reserved first, followed by a potential gap, 2397 // followed by the space for ccs: 2398 // 2399 // +-- Base address A B End 2400 // | | | | 2401 // v v v v 2402 // +-------------+--------------+ +----------------------+ 2403 // | static arc | [dyn. arch] | [gap] | compr. class space | 2404 // +-------------+--------------+ +----------------------+ 2405 // 2406 // (The gap may result from different alignment requirements between metaspace 2407 // and CDS) 2408 // 2409 // If UseCompressedClassPointers is disabled, only one address space will be 2410 // reserved: 2411 // 2412 // +-- Base address End 2413 // | | 2414 // v v 2415 // +-------------+--------------+ 2416 // | static arc | [dyn. arch] | 2417 // +-------------+--------------+ 2418 // 2419 // Base address: If use_archive_base_addr address is true, the Base address is 2420 // determined by the address stored in the static archive. If 2421 // use_archive_base_addr address is false, this base address is determined 2422 // by the platform. 2423 // 2424 // If UseCompressedClassPointers=1, the range encompassing both spaces will be 2425 // suitable to en/decode narrow Klass pointers: the base will be valid for 2426 // encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax. 2427 // 2428 // Return: 2429 // 2430 // - On success: 2431 // - archive_space_rs will be reserved and large enough to host static and 2432 // if needed dynamic archive: [Base, A). 2433 // archive_space_rs.base and size will be aligned to CDS reserve 2434 // granularity. 2435 // - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will 2436 // be reserved. Its start address will be aligned to metaspace reserve 2437 // alignment, which may differ from CDS alignment. It will follow the cds 2438 // archive space, close enough such that narrow class pointer encoding 2439 // covers both spaces. 2440 // If UseCompressedClassPointers=0, class_space_rs remains unreserved. 2441 // - On error: NULL is returned and the spaces remain unreserved. 2442 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, 2443 FileMapInfo* dynamic_mapinfo, 2444 bool use_archive_base_addr, 2445 ReservedSpace& archive_space_rs, 2446 ReservedSpace& class_space_rs) { 2447 2448 address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL); 2449 const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment(); 2450 2451 // Size and requested location of the archive_space_rs (for both static and dynamic archives) 2452 assert(static_mapinfo->mapping_base_offset() == 0, "Must be"); 2453 size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); 2454 size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment); 2455 2456 // If a base address is given, it must have valid alignment and be suitable as encoding base. 2457 if (base_address != NULL) { 2458 assert(is_aligned(base_address, archive_space_alignment), 2459 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2460 if (Metaspace::using_class_space()) { 2461 assert(CompressedKlassPointers::is_valid_base(base_address), 2462 "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address)); 2463 } 2464 } 2465 2466 if (!Metaspace::using_class_space()) { 2467 // Get the simple case out of the way first: 2468 // no compressed class space, simple allocation. 2469 archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment, 2470 false /* bool large */, (char*)base_address); 2471 if (archive_space_rs.is_reserved()) { 2472 assert(base_address == NULL || 2473 (address)archive_space_rs.base() == base_address, "Sanity"); 2474 return archive_space_rs.base(); 2475 } 2476 return NULL; 2477 } 2478 2479 #ifdef _LP64 2480 2481 // Complex case: two spaces adjacent to each other, both to be addressable 2482 // with narrow class pointers. 2483 // We reserve the whole range spanning both spaces, then split that range up. 2484 2485 const size_t class_space_alignment = Metaspace::reserve_alignment(); 2486 2487 // To simplify matters, lets assume that metaspace alignment will always be 2488 // equal or a multiple of archive alignment. 2489 assert(is_power_of_2(class_space_alignment) && 2490 is_power_of_2(archive_space_alignment) && 2491 class_space_alignment >= archive_space_alignment, 2492 "Sanity"); 2493 2494 const size_t class_space_size = CompressedClassSpaceSize; 2495 assert(CompressedClassSpaceSize > 0 && 2496 is_aligned(CompressedClassSpaceSize, class_space_alignment), 2497 "CompressedClassSpaceSize malformed: " 2498 SIZE_FORMAT, CompressedClassSpaceSize); 2499 2500 const size_t ccs_begin_offset = align_up(archive_space_size, 2501 class_space_alignment); 2502 const size_t gap_size = ccs_begin_offset - archive_space_size; 2503 2504 const size_t total_range_size = 2505 align_up(archive_space_size + gap_size + class_space_size, 2506 os::vm_allocation_granularity()); 2507 2508 ReservedSpace total_rs; 2509 if (base_address != NULL) { 2510 // Reserve at the given archive base address, or not at all. 2511 total_rs = ReservedSpace(total_range_size, archive_space_alignment, 2512 false /* bool large */, (char*) base_address); 2513 } else { 2514 // Reserve at any address, but leave it up to the platform to choose a good one. 2515 total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size); 2516 } 2517 2518 if (!total_rs.is_reserved()) { 2519 return NULL; 2520 } 2521 2522 // Paranoid checks: 2523 assert(base_address == NULL || (address)total_rs.base() == base_address, 2524 "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base())); 2525 assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity"); 2526 assert(total_rs.size() == total_range_size, "Sanity"); 2527 assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity"); 2528 2529 // Now split up the space into ccs and cds archive. For simplicity, just leave 2530 // the gap reserved at the end of the archive space. 2531 archive_space_rs = total_rs.first_part(ccs_begin_offset, 2532 (size_t)os::vm_allocation_granularity(), 2533 /*split=*/true); 2534 class_space_rs = total_rs.last_part(ccs_begin_offset); 2535 2536 assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity"); 2537 assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity"); 2538 assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity"); 2539 assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity"); 2540 2541 return archive_space_rs.base(); 2542 2543 #else 2544 ShouldNotReachHere(); 2545 return NULL; 2546 #endif 2547 2548 } 2549 2550 void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs, 2551 ReservedSpace& class_space_rs) { 2552 if (archive_space_rs.is_reserved()) { 2553 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); 2554 archive_space_rs.release(); 2555 } 2556 if (class_space_rs.is_reserved()) { 2557 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); 2558 class_space_rs.release(); 2559 } 2560 } 2561 2562 static int archive_regions[] = {MetaspaceShared::mc, 2563 MetaspaceShared::rw, 2564 MetaspaceShared::ro}; 2565 static int archive_regions_count = 3; 2566 2567 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { 2568 assert(UseSharedSpaces, "must be runtime"); 2569 if (mapinfo == NULL) { 2570 return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded. 2571 } 2572 2573 mapinfo->set_is_mapped(false); 2574 2575 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { 2576 log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT 2577 " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); 2578 return MAP_ARCHIVE_OTHER_FAILURE; 2579 } 2580 2581 MapArchiveResult result = 2582 mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs); 2583 2584 if (result != MAP_ARCHIVE_SUCCESS) { 2585 unmap_archive(mapinfo); 2586 return result; 2587 } 2588 2589 if (mapinfo->is_static()) { 2590 if (!mapinfo->validate_shared_path_table()) { 2591 unmap_archive(mapinfo); 2592 return MAP_ARCHIVE_OTHER_FAILURE; 2593 } 2594 } else { 2595 if (!DynamicArchive::validate(mapinfo)) { 2596 unmap_archive(mapinfo); 2597 return MAP_ARCHIVE_OTHER_FAILURE; 2598 } 2599 } 2600 2601 mapinfo->set_is_mapped(true); 2602 return MAP_ARCHIVE_SUCCESS; 2603 } 2604 2605 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { 2606 assert(UseSharedSpaces, "must be runtime"); 2607 if (mapinfo != NULL) { 2608 mapinfo->unmap_regions(archive_regions, archive_regions_count); 2609 mapinfo->set_is_mapped(false); 2610 } 2611 } 2612 2613 // Read the miscellaneous data from the shared file, and 2614 // serialize it out to its various destinations. 2615 2616 void MetaspaceShared::initialize_shared_spaces() { 2617 FileMapInfo *static_mapinfo = FileMapInfo::current_info(); 2618 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); 2619 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); 2620 char* buffer = static_mapinfo->cloned_vtables(); 2621 clone_cpp_vtables((intptr_t*)buffer); 2622 2623 // Verify various attributes of the archive, plus initialize the 2624 // shared string/symbol tables 2625 buffer = static_mapinfo->serialized_data(); 2626 intptr_t* array = (intptr_t*)buffer; 2627 ReadClosure rc(&array); 2628 serialize(&rc); 2629 2630 // Initialize the run-time symbol table. 2631 SymbolTable::create_table(); 2632 2633 static_mapinfo->patch_archived_heap_embedded_pointers(); 2634 2635 // Close the mapinfo file 2636 static_mapinfo->close(); 2637 2638 static_mapinfo->unmap_region(MetaspaceShared::bm); 2639 2640 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); 2641 if (dynamic_mapinfo != NULL) { 2642 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data(); 2643 ReadClosure rc(&buffer); 2644 SymbolTable::serialize_shared_table_header(&rc, false); 2645 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); 2646 dynamic_mapinfo->close(); 2647 } 2648 2649 if (PrintSharedArchiveAndExit) { 2650 if (PrintSharedDictionary) { 2651 tty->print_cr("\nShared classes:\n"); 2652 SystemDictionaryShared::print_on(tty); 2653 } 2654 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { 2655 tty->print_cr("archive is invalid"); 2656 vm_exit(1); 2657 } else { 2658 tty->print_cr("archive is valid"); 2659 vm_exit(0); 2660 } 2661 } 2662 } 2663 2664 // JVM/TI RedefineClasses() support: 2665 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2666 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2667 2668 if (UseSharedSpaces) { 2669 // remap the shared readonly space to shared readwrite, private 2670 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2671 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2672 return false; 2673 } 2674 if (FileMapInfo::dynamic_info() != NULL) { 2675 mapinfo = FileMapInfo::dynamic_info(); 2676 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2677 return false; 2678 } 2679 } 2680 _remapped_readwrite = true; 2681 } 2682 return true; 2683 } 2684 2685 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2686 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2687 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2688 // or so. 2689 _mc_region.print_out_of_space_msg(name, needed_bytes); 2690 _rw_region.print_out_of_space_msg(name, needed_bytes); 2691 _ro_region.print_out_of_space_msg(name, needed_bytes); 2692 2693 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2694 "Please reduce the number of shared classes."); 2695 } 2696 2697 // This is used to relocate the pointers so that the base archive can be mapped at 2698 // MetaspaceShared::requested_base_address() without runtime relocation. 2699 intx MetaspaceShared::final_delta() { 2700 return intx(MetaspaceShared::requested_base_address()) // We want the base archive to be mapped to here at runtime 2701 - intx(SharedBaseAddress); // .. but the base archive is mapped at here at dump time 2702 } 2703 2704 void MetaspaceShared::print_on(outputStream* st) { 2705 if (UseSharedSpaces || DumpSharedSpaces) { 2706 st->print("CDS archive(s) mapped at: "); 2707 address base; 2708 address top; 2709 if (UseSharedSpaces) { // Runtime 2710 base = (address)MetaspaceObj::shared_metaspace_base(); 2711 address static_top = (address)_shared_metaspace_static_top; 2712 top = (address)MetaspaceObj::shared_metaspace_top(); 2713 st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top)); 2714 } else if (DumpSharedSpaces) { // Dump Time 2715 base = (address)_shared_rs.base(); 2716 top = (address)_shared_rs.end(); 2717 st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top)); 2718 } 2719 st->print("size " SIZE_FORMAT ", ", top - base); 2720 st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode); 2721 } else { 2722 st->print("CDS disabled."); 2723 } 2724 st->cr(); 2725 } 2726 2727 2728 2729 2730