1 /* 2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "gc/shared/softRefPolicy.hpp" 40 #include "interpreter/bytecodeStream.hpp" 41 #include "interpreter/bytecodes.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logMessage.hpp" 44 #include "memory/archiveUtils.hpp" 45 #include "memory/dynamicArchive.hpp" 46 #include "memory/filemap.hpp" 47 #include "memory/heapShared.inline.hpp" 48 #include "memory/metaspace.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/metaspaceShared.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "oops/compressedOops.inline.hpp" 54 #include "oops/instanceClassLoaderKlass.hpp" 55 #include "oops/instanceMirrorKlass.hpp" 56 #include "oops/instanceRefKlass.hpp" 57 #include "oops/methodData.hpp" 58 #include "oops/objArrayKlass.hpp" 59 #include "oops/objArrayOop.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "oops/typeArrayKlass.hpp" 62 #include "prims/jvmtiRedefineClasses.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/os.hpp" 65 #include "runtime/safepointVerifiers.hpp" 66 #include "runtime/signature.hpp" 67 #include "runtime/timerTrace.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/bitMap.inline.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/hashtable.inline.hpp" 74 #if INCLUDE_G1GC 75 #include "gc/g1/g1CollectedHeap.hpp" 76 #endif 77 78 ReservedSpace MetaspaceShared::_shared_rs; 79 VirtualSpace MetaspaceShared::_shared_vs; 80 MetaspaceSharedStats MetaspaceShared::_stats; 81 bool MetaspaceShared::_has_error_classes; 82 bool MetaspaceShared::_archive_loading_failed = false; 83 bool MetaspaceShared::_remapped_readwrite = false; 84 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 85 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 86 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 87 intx MetaspaceShared::_mapping_delta; 88 89 // The CDS archive is divided into the following regions: 90 // mc - misc code (the method entry trampolines) 91 // rw - read-write metadata 92 // ro - read-only metadata and read-only tables 93 // md - misc data (the c++ vtables) 94 // 95 // ca0 - closed archive heap space #0 96 // ca1 - closed archive heap space #1 (may be empty) 97 // oa0 - open archive heap space #0 98 // oa1 - open archive heap space #1 (may be empty) 99 // 100 // The mc, rw, ro, and md regions are linearly allocated, starting from 101 // SharedBaseAddress, in the order of mc->rw->ro->md. The size of these 4 regions 102 // are page-aligned, and there's no gap between any consecutive regions. 103 // 104 // These 4 regions are populated in the following steps: 105 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 106 // temporarily allocated outside of the shared regions. Only the method entry 107 // trampolines are written into the mc region. 108 // [2] ArchiveCompactor copies RW metadata into the rw region. 109 // [3] ArchiveCompactor copies RO metadata into the ro region. 110 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 111 // are copied into the ro region as read-only tables. 112 // [5] C++ vtables are copied into the md region. 113 // 114 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 115 // Their layout is independent of the other 4 regions. 116 117 char* DumpRegion::expand_top_to(char* newtop) { 118 assert(is_allocatable(), "must be initialized and not packed"); 119 assert(newtop >= _top, "must not grow backwards"); 120 if (newtop > _end) { 121 MetaspaceShared::report_out_of_space(_name, newtop - _top); 122 ShouldNotReachHere(); 123 } 124 uintx delta; 125 if (DynamicDumpSharedSpaces) { 126 delta = DynamicArchive::object_delta_uintx(newtop); 127 } else { 128 delta = MetaspaceShared::object_delta_uintx(newtop); 129 } 130 if (delta > MAX_SHARED_DELTA) { 131 // This is just a sanity check and should not appear in any real world usage. This 132 // happens only if you allocate more than 2GB of shared objects and would require 133 // millions of shared classes. 134 vm_exit_during_initialization("Out of memory in the CDS archive", 135 "Please reduce the number of shared classes."); 136 } 137 138 MetaspaceShared::commit_shared_space_to(newtop); 139 _top = newtop; 140 return _top; 141 } 142 143 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 144 char* p = (char*)align_up(_top, alignment); 145 char* newtop = p + align_up(num_bytes, alignment); 146 expand_top_to(newtop); 147 memset(p, 0, newtop - p); 148 return p; 149 } 150 151 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 152 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 153 intptr_t *p = (intptr_t*)_top; 154 char* newtop = _top + sizeof(intptr_t); 155 expand_top_to(newtop); 156 *p = n; 157 if (need_to_mark) { 158 ArchivePtrMarker::mark_pointer(p); 159 } 160 } 161 162 void DumpRegion::print(size_t total_bytes) const { 163 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 164 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 165 p2i(_base + MetaspaceShared::final_delta())); 166 } 167 168 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 169 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 170 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 171 if (strcmp(_name, failing_region) == 0) { 172 tty->print_cr(" required = %d", int(needed_bytes)); 173 } else { 174 tty->cr(); 175 } 176 } 177 178 void DumpRegion::pack(DumpRegion* next) { 179 assert(!is_packed(), "sanity"); 180 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 181 _is_packed = true; 182 if (next != NULL) { 183 next->_base = next->_top = this->_end; 184 next->_end = MetaspaceShared::shared_rs()->end(); 185 } 186 } 187 188 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); 189 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 190 191 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { 192 // Start with 0 committed bytes. The memory will be committed as needed by 193 // MetaspaceShared::commit_shared_space_to(). 194 if (!_shared_vs.initialize(_shared_rs, 0)) { 195 fatal("Unable to allocate memory for shared space"); 196 } 197 first_space->init(&_shared_rs, (char*)first_space_bottom); 198 } 199 200 DumpRegion* MetaspaceShared::misc_code_dump_space() { 201 return &_mc_region; 202 } 203 204 DumpRegion* MetaspaceShared::read_write_dump_space() { 205 return &_rw_region; 206 } 207 208 DumpRegion* MetaspaceShared::read_only_dump_space() { 209 return &_ro_region; 210 } 211 212 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 213 ReservedSpace* rs) { 214 current->pack(next); 215 } 216 217 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 218 return _mc_region.allocate(num_bytes); 219 } 220 221 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 222 return _ro_region.allocate(num_bytes); 223 } 224 225 // When reserving an address range using ReservedSpace, we need an alignment that satisfies both: 226 // os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions, 227 // while keeping the first range at offset 0 of this range. 228 // Metaspace::reserve_alignment() -- so we can pass the region to 229 // Metaspace::allocate_metaspace_compressed_klass_ptrs. 230 size_t MetaspaceShared::reserved_space_alignment() { 231 size_t os_align = os::vm_allocation_granularity(); 232 size_t ms_align = Metaspace::reserve_alignment(); 233 if (os_align >= ms_align) { 234 assert(os_align % ms_align == 0, "must be a multiple"); 235 return os_align; 236 } else { 237 assert(ms_align % os_align == 0, "must be a multiple"); 238 return ms_align; 239 } 240 } 241 242 ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) { 243 bool large_pages = false; // Don't use large pages for the CDS archive. 244 assert(is_aligned(requested_address, reserved_space_alignment()), "must be"); 245 return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address); 246 } 247 248 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 249 assert(DumpSharedSpaces, "should be called for dump time only"); 250 const size_t reserve_alignment = reserved_space_alignment(); 251 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 252 253 #ifdef _LP64 254 // On 64-bit VM, the heap and class space layout will be the same as if 255 // you're running in -Xshare:on mode: 256 // 257 // +-- SharedBaseAddress (default = 0x800000000) 258 // v 259 // +-..---------+---------+ ... +----+----+----+----+---------------+ 260 // | Heap | Archive | | MC | RW | RO | MD | class space | 261 // +-..---------+---------+ ... +----+----+----+----+---------------+ 262 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 263 // 264 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 265 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 266 #else 267 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 268 size_t cds_total = align_down(256*M, reserve_alignment); 269 #endif 270 271 bool use_requested_base = true; 272 DEBUG_ONLY( 273 if (SharedBaseAddress == 0) { 274 log_info(cds)("SharedBaseAddress == 0: always allocate class space at an alternative address"); 275 use_requested_base = false; 276 }) 277 278 // First try to reserve the space at the specified SharedBaseAddress. 279 assert(!_shared_rs.is_reserved(), "must be"); 280 if (use_requested_base) { 281 _shared_rs = reserve_shared_space(cds_total, shared_base); 282 } 283 if (_shared_rs.is_reserved()) { 284 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 285 } else { 286 // Get a mmap region anywhere if the SharedBaseAddress fails. 287 _shared_rs = reserve_shared_space(cds_total); 288 } 289 if (!_shared_rs.is_reserved()) { 290 vm_exit_during_initialization("Unable to reserve memory for shared space", 291 err_msg(SIZE_FORMAT " bytes.", cds_total)); 292 } 293 294 #ifdef _LP64 295 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 296 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 297 // will store Klasses into this space. 298 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 299 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 300 // then the RO parts. 301 302 assert(UseCompressedOops && UseCompressedClassPointers, 303 "UseCompressedOops and UseCompressedClassPointers must be set"); 304 305 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 306 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 307 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 308 _shared_rs = _shared_rs.first_part(max_archive_size); 309 310 // Set up compress class pointers. 311 CompressedKlassPointers::set_base((address)_shared_rs.base()); 312 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 313 // with AOT. 314 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes); 315 // Set the range of klass addresses to 4GB. 316 CompressedKlassPointers::set_range(cds_total); 317 318 Metaspace::initialize_class_space(tmp_class_space); 319 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 320 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 321 322 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 323 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 324 #endif 325 326 init_shared_dump_space(&_mc_region); 327 SharedBaseAddress = (size_t)_shared_rs.base(); 328 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 329 _shared_rs.size(), p2i(_shared_rs.base())); 330 } 331 332 // Called by universe_post_init() 333 void MetaspaceShared::post_initialize(TRAPS) { 334 if (UseSharedSpaces) { 335 int size = FileMapInfo::get_number_of_shared_paths(); 336 if (size > 0) { 337 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 338 if (!DynamicDumpSharedSpaces) { 339 FileMapInfo* info; 340 if (FileMapInfo::dynamic_info() == NULL) { 341 info = FileMapInfo::current_info(); 342 } else { 343 info = FileMapInfo::dynamic_info(); 344 } 345 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 346 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 347 } 348 } 349 } 350 } 351 352 static GrowableArray<Handle>* _extra_interned_strings = NULL; 353 354 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 355 _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); 356 357 HashtableTextDump reader(filename); 358 reader.check_version("VERSION: 1.0"); 359 360 while (reader.remain() > 0) { 361 int utf8_length; 362 int prefix_type = reader.scan_prefix(&utf8_length); 363 ResourceMark rm(THREAD); 364 if (utf8_length == 0x7fffffff) { 365 // buf_len will overflown 32-bit value. 366 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 367 } 368 int buf_len = utf8_length+1; 369 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 370 reader.get_utf8(utf8_buffer, utf8_length); 371 utf8_buffer[utf8_length] = '\0'; 372 373 if (prefix_type == HashtableTextDump::SymbolPrefix) { 374 SymbolTable::new_permanent_symbol(utf8_buffer); 375 } else{ 376 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 377 oop s = StringTable::intern(utf8_buffer, THREAD); 378 379 if (HAS_PENDING_EXCEPTION) { 380 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 381 reader.last_line_no(), utf8_length); 382 CLEAR_PENDING_EXCEPTION; 383 } else { 384 #if INCLUDE_G1GC 385 if (UseG1GC) { 386 typeArrayOop body = java_lang_String::value(s); 387 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 388 if (hr->is_humongous()) { 389 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 390 // to maximize free heap space and minimize fragmentation. 391 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 392 reader.last_line_no(), utf8_length); 393 continue; 394 } 395 } 396 #endif 397 // Interned strings are GC'ed if there are no references to it, so let's 398 // add a reference to keep this string alive. 399 assert(s != NULL, "must succeed"); 400 Handle h(THREAD, s); 401 _extra_interned_strings->append(h); 402 } 403 } 404 } 405 } 406 407 void MetaspaceShared::commit_shared_space_to(char* newtop) { 408 Arguments::assert_is_dumping_archive(); 409 char* base = _shared_rs.base(); 410 size_t need_committed_size = newtop - base; 411 size_t has_committed_size = _shared_vs.committed_size(); 412 if (need_committed_size < has_committed_size) { 413 return; 414 } 415 416 size_t min_bytes = need_committed_size - has_committed_size; 417 size_t preferred_bytes = 1 * M; 418 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 419 420 size_t commit =MAX2(min_bytes, preferred_bytes); 421 commit = MIN2(commit, uncommitted); 422 assert(commit <= uncommitted, "sanity"); 423 424 bool result = _shared_vs.expand_by(commit, false); 425 ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high()); 426 427 if (!result) { 428 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 429 need_committed_size)); 430 } 431 432 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 433 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 434 } 435 436 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { 437 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); 438 } 439 440 // Read/write a data stream for restoring/preserving metadata pointers and 441 // miscellaneous data from/to the shared archive file. 442 443 void MetaspaceShared::serialize(SerializeClosure* soc) { 444 int tag = 0; 445 soc->do_tag(--tag); 446 447 // Verify the sizes of various metadata in the system. 448 soc->do_tag(sizeof(Method)); 449 soc->do_tag(sizeof(ConstMethod)); 450 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 451 soc->do_tag(sizeof(ConstantPool)); 452 soc->do_tag(sizeof(ConstantPoolCache)); 453 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 454 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 455 soc->do_tag(sizeof(Symbol)); 456 457 // Dump/restore miscellaneous metadata. 458 JavaClasses::serialize_offsets(soc); 459 Universe::serialize(soc); 460 soc->do_tag(--tag); 461 462 // Dump/restore references to commonly used names and signatures. 463 vmSymbols::serialize(soc); 464 soc->do_tag(--tag); 465 466 // Dump/restore the symbol/string/subgraph_info tables 467 SymbolTable::serialize_shared_table_header(soc); 468 StringTable::serialize_shared_table_header(soc); 469 HeapShared::serialize_subgraph_info_table_header(soc); 470 SystemDictionaryShared::serialize_dictionary_headers(soc); 471 472 InstanceMirrorKlass::serialize_offsets(soc); 473 soc->do_tag(--tag); 474 475 serialize_cloned_cpp_vtptrs(soc); 476 soc->do_tag(--tag); 477 478 soc->do_tag(666); 479 } 480 481 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 482 if (DumpSharedSpaces) { 483 if (_i2i_entry_code_buffers == NULL) { 484 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 485 _i2i_entry_code_buffers_size = total_size; 486 } 487 } else if (UseSharedSpaces) { 488 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 489 } else { 490 return NULL; 491 } 492 493 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 494 return _i2i_entry_code_buffers; 495 } 496 497 uintx MetaspaceShared::object_delta_uintx(void* obj) { 498 Arguments::assert_is_dumping_archive(); 499 if (DumpSharedSpaces) { 500 assert(shared_rs()->contains(obj), "must be"); 501 } else { 502 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 503 } 504 address base_address = address(SharedBaseAddress); 505 uintx deltax = address(obj) - base_address; 506 return deltax; 507 } 508 509 // Global object for holding classes that have been loaded. Since this 510 // is run at a safepoint just before exit, this is the entire set of classes. 511 static GrowableArray<Klass*>* _global_klass_objects; 512 513 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 514 return _global_klass_objects; 515 } 516 517 static void collect_array_classes(Klass* k) { 518 _global_klass_objects->append_if_missing(k); 519 if (k->is_array_klass()) { 520 // Add in the array classes too 521 ArrayKlass* ak = ArrayKlass::cast(k); 522 Klass* h = ak->higher_dimension(); 523 if (h != NULL) { 524 h->array_klasses_do(collect_array_classes); 525 } 526 } 527 } 528 529 class CollectClassesClosure : public KlassClosure { 530 void do_klass(Klass* k) { 531 if (k->is_instance_klass() && 532 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 533 // Don't add to the _global_klass_objects 534 } else { 535 _global_klass_objects->append_if_missing(k); 536 } 537 if (k->is_array_klass()) { 538 // Add in the array classes too 539 ArrayKlass* ak = ArrayKlass::cast(k); 540 Klass* h = ak->higher_dimension(); 541 if (h != NULL) { 542 h->array_klasses_do(collect_array_classes); 543 } 544 } 545 } 546 }; 547 548 static void remove_unshareable_in_classes() { 549 for (int i = 0; i < _global_klass_objects->length(); i++) { 550 Klass* k = _global_klass_objects->at(i); 551 if (!k->is_objArray_klass()) { 552 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 553 // on their array classes. 554 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 555 k->remove_unshareable_info(); 556 } 557 } 558 } 559 560 static void remove_java_mirror_in_classes() { 561 for (int i = 0; i < _global_klass_objects->length(); i++) { 562 Klass* k = _global_klass_objects->at(i); 563 if (!k->is_objArray_klass()) { 564 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 565 // on their array classes. 566 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 567 k->remove_java_mirror(); 568 } 569 } 570 } 571 572 static void clear_basic_type_mirrors() { 573 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 574 Universe::set_int_mirror(NULL); 575 Universe::set_float_mirror(NULL); 576 Universe::set_double_mirror(NULL); 577 Universe::set_byte_mirror(NULL); 578 Universe::set_bool_mirror(NULL); 579 Universe::set_char_mirror(NULL); 580 Universe::set_long_mirror(NULL); 581 Universe::set_short_mirror(NULL); 582 Universe::set_void_mirror(NULL); 583 } 584 585 static void rewrite_nofast_bytecode(Method* method) { 586 BytecodeStream bcs(method); 587 while (!bcs.is_last_bytecode()) { 588 Bytecodes::Code opcode = bcs.next(); 589 switch (opcode) { 590 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 591 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 592 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 593 case Bytecodes::_iload: { 594 if (!bcs.is_wide()) { 595 *bcs.bcp() = Bytecodes::_nofast_iload; 596 } 597 break; 598 } 599 default: break; 600 } 601 } 602 } 603 604 // Walk all methods in the class list to ensure that they won't be modified at 605 // run time. This includes: 606 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 607 // at run time by RewriteBytecodes/RewriteFrequentPairs 608 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 609 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 610 for (int i = 0; i < _global_klass_objects->length(); i++) { 611 Klass* k = _global_klass_objects->at(i); 612 if (k->is_instance_klass()) { 613 InstanceKlass* ik = InstanceKlass::cast(k); 614 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik); 615 } 616 } 617 } 618 619 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik) { 620 for (int i = 0; i < ik->methods()->length(); i++) { 621 Method* m = ik->methods()->at(i); 622 rewrite_nofast_bytecode(m); 623 Fingerprinter fp(m); 624 // The side effect of this call sets method's fingerprint field. 625 fp.fingerprint(); 626 } 627 } 628 629 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 630 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 631 // 632 // Addresses of the vtables and the methods may be different across JVM runs, 633 // if libjvm.so is dynamically loaded at a different base address. 634 // 635 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 636 // 637 // + at dump time: we redirect the _vptr to point to our own vtables inside 638 // the CDS image 639 // + at run time: we clone the actual contents of the vtables from libjvm.so 640 // into our own tables. 641 642 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 643 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 644 f(ConstantPool) \ 645 f(InstanceKlass) \ 646 f(InstanceClassLoaderKlass) \ 647 f(InstanceMirrorKlass) \ 648 f(InstanceRefKlass) \ 649 f(Method) \ 650 f(ObjArrayKlass) \ 651 f(TypeArrayKlass) 652 653 class CppVtableInfo { 654 intptr_t _vtable_size; 655 intptr_t _cloned_vtable[1]; 656 public: 657 static int num_slots(int vtable_size) { 658 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 659 } 660 int vtable_size() { return int(uintx(_vtable_size)); } 661 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 662 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 663 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 664 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 665 static size_t byte_size(int vtable_size) { 666 CppVtableInfo i; 667 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 668 } 669 }; 670 671 template <class T> class CppVtableCloner : public T { 672 static intptr_t* vtable_of(Metadata& m) { 673 return *((intptr_t**)&m); 674 } 675 static CppVtableInfo* _info; 676 677 static int get_vtable_length(const char* name); 678 679 public: 680 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 681 static intptr_t* allocate(const char* name); 682 683 // Clone the vtable to ... 684 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 685 686 static void zero_vtable_clone() { 687 assert(DumpSharedSpaces, "dump-time only"); 688 _info->zero(); 689 } 690 691 // Switch the vtable pointer to point to the cloned vtable. 692 static void patch(Metadata* obj) { 693 assert(DumpSharedSpaces, "dump-time only"); 694 assert(MetaspaceShared::is_in_output_space(obj), "must be"); 695 *(void**)obj = (void*)(_info->cloned_vtable()); 696 ArchivePtrMarker::mark_pointer(obj); 697 } 698 699 static bool is_valid_shared_object(const T* obj) { 700 intptr_t* vptr = *(intptr_t**)obj; 701 return vptr == _info->cloned_vtable(); 702 } 703 }; 704 705 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 706 707 template <class T> 708 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 709 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 710 int n = get_vtable_length(name); 711 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 712 _info->set_vtable_size(n); 713 714 intptr_t* p = clone_vtable(name, _info); 715 assert((char*)p == _md_region.top(), "must be"); 716 717 return _info->cloned_vtable(); 718 } 719 720 template <class T> 721 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 722 if (!DumpSharedSpaces) { 723 assert(_info == 0, "_info is initialized only at dump time"); 724 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 725 } 726 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 727 int n = info->vtable_size(); 728 intptr_t* srcvtable = vtable_of(tmp); 729 intptr_t* dstvtable = info->cloned_vtable(); 730 731 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 732 // safe to do memcpy. 733 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 734 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 735 return dstvtable + n; 736 } 737 738 // To determine the size of the vtable for each type, we use the following 739 // trick by declaring 2 subclasses: 740 // 741 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 742 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 743 // 744 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 745 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 746 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 747 // - Their last entry is different. 748 // 749 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 750 // and find the first entry that's different. 751 // 752 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 753 // esoteric compilers. 754 755 template <class T> class CppVtableTesterB: public T { 756 public: 757 virtual int last_virtual_method() {return 1;} 758 }; 759 760 template <class T> class CppVtableTesterA : public T { 761 public: 762 virtual void* last_virtual_method() { 763 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 764 // compiler/linker won't alias the two functions. 765 return NULL; 766 } 767 }; 768 769 template <class T> 770 int CppVtableCloner<T>::get_vtable_length(const char* name) { 771 CppVtableTesterA<T> a; 772 CppVtableTesterB<T> b; 773 774 intptr_t* avtable = vtable_of(a); 775 intptr_t* bvtable = vtable_of(b); 776 777 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 778 int vtable_len = 1; 779 for (; ; vtable_len++) { 780 if (avtable[vtable_len] != bvtable[vtable_len]) { 781 break; 782 } 783 } 784 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 785 786 return vtable_len; 787 } 788 789 #define ALLOC_CPP_VTABLE_CLONE(c) \ 790 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \ 791 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); 792 793 #define CLONE_CPP_VTABLE(c) \ 794 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 795 796 #define ZERO_CPP_VTABLE(c) \ 797 CppVtableCloner<c>::zero_vtable_clone(); 798 799 //------------------------------ for DynamicDumpSharedSpaces - start 800 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 801 802 enum { 803 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 804 _num_cloned_vtable_kinds 805 }; 806 807 static intptr_t** _cloned_cpp_vtptrs = NULL; 808 809 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 810 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 811 } 812 813 intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) { 814 assert(DynamicDumpSharedSpaces, "must"); 815 int kind = -1; 816 switch (msotype) { 817 case MetaspaceObj::SymbolType: 818 case MetaspaceObj::TypeArrayU1Type: 819 case MetaspaceObj::TypeArrayU2Type: 820 case MetaspaceObj::TypeArrayU4Type: 821 case MetaspaceObj::TypeArrayU8Type: 822 case MetaspaceObj::TypeArrayOtherType: 823 case MetaspaceObj::ConstMethodType: 824 case MetaspaceObj::ConstantPoolCacheType: 825 case MetaspaceObj::AnnotationsType: 826 case MetaspaceObj::MethodCountersType: 827 // These have no vtables. 828 break; 829 case MetaspaceObj::ClassType: 830 { 831 Klass* k = (Klass*)obj; 832 assert(k->is_klass(), "must be"); 833 if (k->is_instance_klass()) { 834 kind = InstanceKlass_Kind; 835 } else { 836 assert(k->is_objArray_klass(), 837 "We shouldn't archive any other klasses in DynamicDumpSharedSpaces"); 838 kind = ObjArrayKlass_Kind; 839 } 840 } 841 break; 842 843 case MetaspaceObj::MethodType: 844 { 845 Method* m = (Method*)obj; 846 assert(m->is_method(), "must be"); 847 kind = Method_Kind; 848 } 849 break; 850 851 case MetaspaceObj::MethodDataType: 852 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 853 ShouldNotReachHere(); 854 break; 855 856 case MetaspaceObj::ConstantPoolType: 857 { 858 ConstantPool *cp = (ConstantPool*)obj; 859 assert(cp->is_constantPool(), "must be"); 860 kind = ConstantPool_Kind; 861 } 862 break; 863 864 default: 865 ShouldNotReachHere(); 866 } 867 868 if (kind >= 0) { 869 assert(kind < _num_cloned_vtable_kinds, "must be"); 870 return _cloned_cpp_vtptrs[kind]; 871 } else { 872 return NULL; 873 } 874 } 875 876 //------------------------------ for DynamicDumpSharedSpaces - end 877 878 // This can be called at both dump time and run time. 879 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 880 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 881 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 882 return p; 883 } 884 885 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 886 assert(DumpSharedSpaces, "dump-time only"); 887 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 888 } 889 890 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 891 void MetaspaceShared::allocate_cpp_vtable_clones() { 892 assert(DumpSharedSpaces, "dump-time only"); 893 // Layout (each slot is a intptr_t): 894 // [number of slots in the first vtable = n1] 895 // [ <n1> slots for the first vtable] 896 // [number of slots in the first second = n2] 897 // [ <n2> slots for the second vtable] 898 // ... 899 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 900 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 901 } 902 903 // Switch the vtable pointer to point to the cloned vtable. We assume the 904 // vtable pointer is in first slot in object. 905 void MetaspaceShared::patch_cpp_vtable_pointers() { 906 int n = _global_klass_objects->length(); 907 for (int i = 0; i < n; i++) { 908 Klass* obj = _global_klass_objects->at(i); 909 if (obj->is_instance_klass()) { 910 InstanceKlass* ik = InstanceKlass::cast(obj); 911 if (ik->is_class_loader_instance_klass()) { 912 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 913 } else if (ik->is_reference_instance_klass()) { 914 CppVtableCloner<InstanceRefKlass>::patch(ik); 915 } else if (ik->is_mirror_instance_klass()) { 916 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 917 } else { 918 CppVtableCloner<InstanceKlass>::patch(ik); 919 } 920 ConstantPool* cp = ik->constants(); 921 CppVtableCloner<ConstantPool>::patch(cp); 922 for (int j = 0; j < ik->methods()->length(); j++) { 923 Method* m = ik->methods()->at(j); 924 CppVtableCloner<Method>::patch(m); 925 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 926 } 927 } else if (obj->is_objArray_klass()) { 928 CppVtableCloner<ObjArrayKlass>::patch(obj); 929 } else { 930 assert(obj->is_typeArray_klass(), "sanity"); 931 CppVtableCloner<TypeArrayKlass>::patch(obj); 932 } 933 } 934 } 935 936 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 937 assert(is_in_shared_metaspace(m), "must be"); 938 return CppVtableCloner<Method>::is_valid_shared_object(m); 939 } 940 941 void WriteClosure::do_oop(oop* o) { 942 if (*o == NULL) { 943 _dump_region->append_intptr_t(0); 944 } else { 945 assert(HeapShared::is_heap_object_archiving_allowed(), 946 "Archiving heap object is not allowed"); 947 _dump_region->append_intptr_t( 948 (intptr_t)CompressedOops::encode_not_null(*o)); 949 } 950 } 951 952 void WriteClosure::do_region(u_char* start, size_t size) { 953 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 954 assert(size % sizeof(intptr_t) == 0, "bad size"); 955 do_tag((int)size); 956 while (size > 0) { 957 _dump_region->append_intptr_t(*(intptr_t*)start, true); 958 start += sizeof(intptr_t); 959 size -= sizeof(intptr_t); 960 } 961 } 962 963 // This is for dumping detailed statistics for the allocations 964 // in the shared spaces. 965 class DumpAllocStats : public ResourceObj { 966 public: 967 968 // Here's poor man's enum inheritance 969 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 970 METASPACE_OBJ_TYPES_DO(f) \ 971 f(SymbolHashentry) \ 972 f(SymbolBucket) \ 973 f(StringHashentry) \ 974 f(StringBucket) \ 975 f(Other) 976 977 enum Type { 978 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 979 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 980 _number_of_types 981 }; 982 983 static const char * type_name(Type type) { 984 switch(type) { 985 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 986 default: 987 ShouldNotReachHere(); 988 return NULL; 989 } 990 } 991 992 public: 993 enum { RO = 0, RW = 1 }; 994 995 int _counts[2][_number_of_types]; 996 int _bytes [2][_number_of_types]; 997 998 DumpAllocStats() { 999 memset(_counts, 0, sizeof(_counts)); 1000 memset(_bytes, 0, sizeof(_bytes)); 1001 }; 1002 1003 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 1004 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 1005 int which = (read_only) ? RO : RW; 1006 _counts[which][type] ++; 1007 _bytes [which][type] += byte_size; 1008 } 1009 1010 void record_other_type(int byte_size, bool read_only) { 1011 int which = (read_only) ? RO : RW; 1012 _bytes [which][OtherType] += byte_size; 1013 } 1014 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 1015 }; 1016 1017 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 1018 // Calculate size of data that was not allocated by Metaspace::allocate() 1019 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1020 1021 // symbols 1022 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1023 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1024 1025 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1026 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1027 1028 // strings 1029 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1030 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1031 1032 _counts[RO][StringBucketType] = stats->string.bucket_count; 1033 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1034 1035 // TODO: count things like dictionary, vtable, etc 1036 _bytes[RW][OtherType] += mc_all + md_all; 1037 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 1038 1039 // prevent divide-by-zero 1040 if (ro_all < 1) { 1041 ro_all = 1; 1042 } 1043 if (rw_all < 1) { 1044 rw_all = 1; 1045 } 1046 1047 int all_ro_count = 0; 1048 int all_ro_bytes = 0; 1049 int all_rw_count = 0; 1050 int all_rw_bytes = 0; 1051 1052 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1053 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1054 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1055 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1056 1057 LogMessage(cds) msg; 1058 1059 msg.info("Detailed metadata info (excluding st regions; rw stats include md/mc regions):"); 1060 msg.info("%s", hdr); 1061 msg.info("%s", sep); 1062 for (int type = 0; type < int(_number_of_types); type ++) { 1063 const char *name = type_name((Type)type); 1064 int ro_count = _counts[RO][type]; 1065 int ro_bytes = _bytes [RO][type]; 1066 int rw_count = _counts[RW][type]; 1067 int rw_bytes = _bytes [RW][type]; 1068 int count = ro_count + rw_count; 1069 int bytes = ro_bytes + rw_bytes; 1070 1071 double ro_perc = percent_of(ro_bytes, ro_all); 1072 double rw_perc = percent_of(rw_bytes, rw_all); 1073 double perc = percent_of(bytes, ro_all + rw_all); 1074 1075 msg.info(fmt_stats, name, 1076 ro_count, ro_bytes, ro_perc, 1077 rw_count, rw_bytes, rw_perc, 1078 count, bytes, perc); 1079 1080 all_ro_count += ro_count; 1081 all_ro_bytes += ro_bytes; 1082 all_rw_count += rw_count; 1083 all_rw_bytes += rw_bytes; 1084 } 1085 1086 int all_count = all_ro_count + all_rw_count; 1087 int all_bytes = all_ro_bytes + all_rw_bytes; 1088 1089 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1090 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1091 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1092 1093 msg.info("%s", sep); 1094 msg.info(fmt_stats, "Total", 1095 all_ro_count, all_ro_bytes, all_ro_perc, 1096 all_rw_count, all_rw_bytes, all_rw_perc, 1097 all_count, all_bytes, all_perc); 1098 1099 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1100 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1101 1102 #undef fmt_stats 1103 } 1104 1105 // Populate the shared space. 1106 1107 class VM_PopulateDumpSharedSpace: public VM_Operation { 1108 private: 1109 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1110 GrowableArray<MemRegion> *_open_archive_heap_regions; 1111 1112 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1113 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1114 1115 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1116 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1117 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1118 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1119 void dump_symbols(); 1120 char* dump_read_only_tables(); 1121 void print_class_stats(); 1122 void print_region_stats(); 1123 void print_bitmap_region_stats(size_t size, size_t total_size); 1124 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1125 const char *name, size_t total_size); 1126 void relocate_to_default_base_address(CHeapBitMap* ptrmap); 1127 1128 public: 1129 1130 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1131 void doit(); // outline because gdb sucks 1132 static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1133 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1134 } 1135 bool allow_nested_vm_operations() const { return true; } 1136 }; // class VM_PopulateDumpSharedSpace 1137 1138 class SortedSymbolClosure: public SymbolClosure { 1139 GrowableArray<Symbol*> _symbols; 1140 virtual void do_symbol(Symbol** sym) { 1141 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1142 _symbols.append(*sym); 1143 } 1144 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1145 if (a[0] < b[0]) { 1146 return -1; 1147 } else if (a[0] == b[0]) { 1148 return 0; 1149 } else { 1150 return 1; 1151 } 1152 } 1153 1154 public: 1155 SortedSymbolClosure() { 1156 SymbolTable::symbols_do(this); 1157 _symbols.sort(compare_symbols_by_address); 1158 } 1159 GrowableArray<Symbol*>* get_sorted_symbols() { 1160 return &_symbols; 1161 } 1162 }; 1163 1164 // ArchiveCompactor -- 1165 // 1166 // This class is the central piece of shared archive compaction -- all metaspace data are 1167 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1168 // metaspace data into their final location in the shared regions. 1169 1170 class ArchiveCompactor : AllStatic { 1171 static const int INITIAL_TABLE_SIZE = 8087; 1172 static const int MAX_TABLE_SIZE = 1000000; 1173 1174 static DumpAllocStats* _alloc_stats; 1175 static SortedSymbolClosure* _ssc; 1176 1177 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1178 static RelocationTable* _new_loc_table; 1179 1180 public: 1181 static void initialize() { 1182 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1183 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1184 } 1185 static DumpAllocStats* alloc_stats() { 1186 return _alloc_stats; 1187 } 1188 1189 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1190 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1191 // that are allocated in the RO space. 1192 class OtherROAllocMark { 1193 char* _oldtop; 1194 public: 1195 OtherROAllocMark() { 1196 _oldtop = _ro_region.top(); 1197 } 1198 ~OtherROAllocMark() { 1199 char* newtop = _ro_region.top(); 1200 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1201 } 1202 }; 1203 1204 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1205 address obj = ref->obj(); 1206 int bytes = ref->size() * BytesPerWord; 1207 char* p; 1208 size_t alignment = BytesPerWord; 1209 char* oldtop; 1210 char* newtop; 1211 1212 if (read_only) { 1213 oldtop = _ro_region.top(); 1214 p = _ro_region.allocate(bytes, alignment); 1215 newtop = _ro_region.top(); 1216 } else { 1217 oldtop = _rw_region.top(); 1218 if (ref->msotype() == MetaspaceObj::ClassType) { 1219 // Save a pointer immediate in front of an InstanceKlass, so 1220 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1221 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1222 // in systemDictionaryShared.cpp. 1223 Klass* klass = (Klass*)obj; 1224 if (klass->is_instance_klass()) { 1225 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1226 _rw_region.allocate(sizeof(address), BytesPerWord); 1227 } 1228 } 1229 p = _rw_region.allocate(bytes, alignment); 1230 newtop = _rw_region.top(); 1231 } 1232 memcpy(p, obj, bytes); 1233 1234 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1235 _new_loc_table->add(obj, (address)p); 1236 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1237 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1238 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1239 } 1240 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1241 } 1242 1243 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1244 address* pp = _new_loc_table->lookup(ref->obj()); 1245 assert(pp != NULL, "must be"); 1246 return *pp; 1247 } 1248 1249 private: 1250 // Makes a shallow copy of visited MetaspaceObj's 1251 class ShallowCopier: public UniqueMetaspaceClosure { 1252 bool _read_only; 1253 public: 1254 ShallowCopier(bool read_only) : _read_only(read_only) {} 1255 1256 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1257 if (read_only == _read_only) { 1258 allocate(ref, read_only); 1259 } 1260 return true; // recurse into ref.obj() 1261 } 1262 }; 1263 1264 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1265 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1266 public: 1267 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1268 address new_loc = get_new_loc(ref); 1269 RefRelocator refer; 1270 ref->metaspace_pointers_do_at(&refer, new_loc); 1271 return true; // recurse into ref.obj() 1272 } 1273 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 1274 assert(type == _method_entry_ref, "only special type allowed for now"); 1275 address obj = ref->obj(); 1276 address new_obj = get_new_loc(ref); 1277 size_t offset = pointer_delta(p, obj, sizeof(u1)); 1278 intptr_t* new_p = (intptr_t*)(new_obj + offset); 1279 assert(*p == *new_p, "must be a copy"); 1280 ArchivePtrMarker::mark_pointer((address*)new_p); 1281 } 1282 }; 1283 1284 // Relocate a reference to point to its shallow copy 1285 class RefRelocator: public MetaspaceClosure { 1286 public: 1287 virtual bool do_ref(Ref* ref, bool read_only) { 1288 if (ref->not_null()) { 1289 ref->update(get_new_loc(ref)); 1290 ArchivePtrMarker::mark_pointer(ref->addr()); 1291 } 1292 return false; // Do not recurse. 1293 } 1294 }; 1295 1296 #ifdef ASSERT 1297 class IsRefInArchiveChecker: public MetaspaceClosure { 1298 public: 1299 virtual bool do_ref(Ref* ref, bool read_only) { 1300 if (ref->not_null()) { 1301 char* obj = (char*)ref->obj(); 1302 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1303 "must be relocated to point to CDS archive"); 1304 } 1305 return false; // Do not recurse. 1306 } 1307 }; 1308 #endif 1309 1310 public: 1311 static void copy_and_compact() { 1312 ResourceMark rm; 1313 SortedSymbolClosure the_ssc; // StackObj 1314 _ssc = &the_ssc; 1315 1316 tty->print_cr("Scanning all metaspace objects ... "); 1317 { 1318 // allocate and shallow-copy RW objects, immediately following the MC region 1319 tty->print_cr("Allocating RW objects ... "); 1320 _mc_region.pack(&_rw_region); 1321 1322 ResourceMark rm; 1323 ShallowCopier rw_copier(false); 1324 iterate_roots(&rw_copier); 1325 } 1326 { 1327 // allocate and shallow-copy of RO object, immediately following the RW region 1328 tty->print_cr("Allocating RO objects ... "); 1329 _rw_region.pack(&_ro_region); 1330 1331 ResourceMark rm; 1332 ShallowCopier ro_copier(true); 1333 iterate_roots(&ro_copier); 1334 } 1335 { 1336 tty->print_cr("Relocating embedded pointers ... "); 1337 ResourceMark rm; 1338 ShallowCopyEmbeddedRefRelocator emb_reloc; 1339 iterate_roots(&emb_reloc); 1340 } 1341 { 1342 tty->print_cr("Relocating external roots ... "); 1343 ResourceMark rm; 1344 RefRelocator ext_reloc; 1345 iterate_roots(&ext_reloc); 1346 } 1347 1348 #ifdef ASSERT 1349 { 1350 tty->print_cr("Verifying external roots ... "); 1351 ResourceMark rm; 1352 IsRefInArchiveChecker checker; 1353 iterate_roots(&checker); 1354 } 1355 #endif 1356 1357 // cleanup 1358 _ssc = NULL; 1359 } 1360 1361 // We must relocate the System::_well_known_klasses only after we have copied the 1362 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1363 // old objects which assert that their klass is the original klass. 1364 static void relocate_well_known_klasses() { 1365 { 1366 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1367 ResourceMark rm; 1368 RefRelocator ext_reloc; 1369 SystemDictionary::well_known_klasses_do(&ext_reloc); 1370 } 1371 // NOTE: after this point, we shouldn't have any globals that can reach the old 1372 // objects. 1373 1374 // We cannot use any of the objects in the heap anymore (except for the 1375 // shared strings) because their headers no longer point to valid Klasses. 1376 } 1377 1378 static void iterate_roots(MetaspaceClosure* it) { 1379 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1380 for (int i=0; i<symbols->length(); i++) { 1381 it->push(symbols->adr_at(i)); 1382 } 1383 if (_global_klass_objects != NULL) { 1384 // Need to fix up the pointers 1385 for (int i = 0; i < _global_klass_objects->length(); i++) { 1386 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1387 it->push(_global_klass_objects->adr_at(i)); 1388 } 1389 } 1390 FileMapInfo::metaspace_pointers_do(it); 1391 SystemDictionaryShared::dumptime_classes_do(it); 1392 Universe::metaspace_pointers_do(it); 1393 SymbolTable::metaspace_pointers_do(it); 1394 vmSymbols::metaspace_pointers_do(it); 1395 1396 it->finish(); 1397 } 1398 1399 static Klass* get_relocated_klass(Klass* orig_klass) { 1400 assert(DumpSharedSpaces, "dump time only"); 1401 address* pp = _new_loc_table->lookup((address)orig_klass); 1402 assert(pp != NULL, "must be"); 1403 Klass* klass = (Klass*)(*pp); 1404 assert(klass->is_klass(), "must be"); 1405 return klass; 1406 } 1407 }; 1408 1409 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1410 SortedSymbolClosure* ArchiveCompactor::_ssc; 1411 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1412 1413 void VM_PopulateDumpSharedSpace::dump_symbols() { 1414 tty->print_cr("Dumping symbol table ..."); 1415 1416 NOT_PRODUCT(SymbolTable::verify()); 1417 SymbolTable::write_to_archive(); 1418 } 1419 1420 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1421 ArchiveCompactor::OtherROAllocMark mark; 1422 1423 tty->print("Removing java_mirror ... "); 1424 if (!HeapShared::is_heap_object_archiving_allowed()) { 1425 clear_basic_type_mirrors(); 1426 } 1427 remove_java_mirror_in_classes(); 1428 tty->print_cr("done. "); 1429 1430 SystemDictionaryShared::write_to_archive(); 1431 1432 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 1433 _cloned_cpp_vtptrs = (intptr_t**)_ro_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 1434 1435 // Write the other data to the output array. 1436 char* start = _ro_region.top(); 1437 WriteClosure wc(&_ro_region); 1438 MetaspaceShared::serialize(&wc); 1439 1440 // Write the bitmaps for patching the archive heap regions 1441 dump_archive_heap_oopmaps(); 1442 1443 return start; 1444 } 1445 1446 void VM_PopulateDumpSharedSpace::print_class_stats() { 1447 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1448 { 1449 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1450 for (int i = 0; i < _global_klass_objects->length(); i++) { 1451 Klass* k = _global_klass_objects->at(i); 1452 if (k->is_instance_klass()) { 1453 num_inst ++; 1454 } else if (k->is_objArray_klass()) { 1455 num_obj_array ++; 1456 } else { 1457 assert(k->is_typeArray_klass(), "sanity"); 1458 num_type_array ++; 1459 } 1460 } 1461 tty->print_cr(" instance classes = %5d", num_inst); 1462 tty->print_cr(" obj array classes = %5d", num_obj_array); 1463 tty->print_cr(" type array classes = %5d", num_type_array); 1464 } 1465 } 1466 1467 void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) { 1468 intx addr_delta = MetaspaceShared::final_delta(); 1469 if (addr_delta == 0) { 1470 return; 1471 } 1472 1473 // Patch all pointers that are marked by ptrmap within this region, 1474 // where we have just dumped all the metaspace data. 1475 address patch_base = (address)SharedBaseAddress; 1476 address patch_end = (address)_md_region.top(); 1477 size_t size = patch_end - patch_base; 1478 1479 // debug only -- the current value of the pointers to be patched must be within this 1480 // range (i.e., must point to valid metaspace objects) 1481 address valid_old_base = patch_base; 1482 address valid_old_end = patch_end; 1483 1484 // debug only -- after patching, the pointers must point inside this range 1485 // (the requested location of the archive, as mapped at runtime). 1486 address valid_new_base = (address)Arguments::default_SharedBaseAddress(); 1487 address valid_new_end = valid_new_base + size; 1488 1489 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " 1490 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), 1491 p2i(valid_new_base), p2i(valid_new_end)); 1492 1493 SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1494 valid_new_base, valid_new_end, addr_delta); 1495 ptrmap->iterate(&patcher); 1496 } 1497 1498 void VM_PopulateDumpSharedSpace::doit() { 1499 CHeapBitMap ptrmap; 1500 MetaspaceShared::initialize_ptr_marker(&ptrmap); 1501 1502 // We should no longer allocate anything from the metaspace, so that: 1503 // 1504 // (1) Metaspace::allocate might trigger GC if we have run out of 1505 // committed metaspace, but we can't GC because we're running 1506 // in the VM thread. 1507 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1508 Metaspace::freeze(); 1509 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1510 1511 Thread* THREAD = VMThread::vm_thread(); 1512 1513 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1514 1515 NOT_PRODUCT(SystemDictionary::verify();) 1516 // The following guarantee is meant to ensure that no loader constraints 1517 // exist yet, since the constraints table is not shared. This becomes 1518 // more important now that we don't re-initialize vtables/itables for 1519 // shared classes at runtime, where constraints were previously created. 1520 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1521 "loader constraints are not saved"); 1522 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1523 "placeholders are not saved"); 1524 1525 // At this point, many classes have been loaded. 1526 // Gather systemDictionary classes in a global array and do everything to 1527 // that so we don't have to walk the SystemDictionary again. 1528 SystemDictionaryShared::check_excluded_classes(); 1529 _global_klass_objects = new GrowableArray<Klass*>(1000); 1530 CollectClassesClosure collect_classes; 1531 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1532 1533 print_class_stats(); 1534 1535 // Ensure the ConstMethods won't be modified at run-time 1536 tty->print("Updating ConstMethods ... "); 1537 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1538 tty->print_cr("done. "); 1539 1540 // Remove all references outside the metadata 1541 tty->print("Removing unshareable information ... "); 1542 remove_unshareable_in_classes(); 1543 tty->print_cr("done. "); 1544 1545 ArchiveCompactor::initialize(); 1546 ArchiveCompactor::copy_and_compact(); 1547 1548 dump_symbols(); 1549 1550 // Dump supported java heap objects 1551 _closed_archive_heap_regions = NULL; 1552 _open_archive_heap_regions = NULL; 1553 dump_java_heap_objects(); 1554 1555 ArchiveCompactor::relocate_well_known_klasses(); 1556 1557 char* serialized_data_start = dump_read_only_tables(); 1558 _ro_region.pack(&_md_region); 1559 1560 char* vtbl_list = _md_region.top(); 1561 MetaspaceShared::allocate_cpp_vtable_clones(); 1562 _md_region.pack(); 1563 1564 // During patching, some virtual methods may be called, so at this point 1565 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1566 MetaspaceShared::patch_cpp_vtable_pointers(); 1567 1568 // The vtable clones contain addresses of the current process. 1569 // We don't want to write these addresses into the archive. 1570 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1571 1572 // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress 1573 // without runtime relocation. 1574 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top()); 1575 relocate_to_default_base_address(&ptrmap); 1576 1577 // Create and write the archive file that maps the shared spaces. 1578 1579 FileMapInfo* mapinfo = new FileMapInfo(true); 1580 mapinfo->populate_header(os::vm_allocation_granularity()); 1581 mapinfo->set_serialized_data_start(serialized_data_start); 1582 mapinfo->set_misc_data_patching_start(vtbl_list); 1583 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1584 MetaspaceShared::i2i_entry_code_buffers_size()); 1585 mapinfo->open_for_write(); 1586 1587 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1588 // so it needs to be read/write. 1589 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1590 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1591 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1592 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1593 1594 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap()); 1595 1596 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1597 _closed_archive_heap_regions, 1598 _closed_archive_heap_oopmaps, 1599 MetaspaceShared::first_closed_archive_heap_region, 1600 MetaspaceShared::max_closed_archive_heap_region); 1601 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1602 _open_archive_heap_regions, 1603 _open_archive_heap_oopmaps, 1604 MetaspaceShared::first_open_archive_heap_region, 1605 MetaspaceShared::max_open_archive_heap_region); 1606 1607 mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress()); 1608 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1609 mapinfo->write_header(); 1610 mapinfo->close(); 1611 1612 // Restore the vtable in case we invoke any virtual methods. 1613 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1614 1615 print_region_stats(); 1616 1617 if (log_is_enabled(Info, cds)) { 1618 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1619 int(_mc_region.used()), int(_md_region.used())); 1620 } 1621 1622 if (PrintSystemDictionaryAtExit) { 1623 SystemDictionary::print(); 1624 } 1625 1626 if (AllowArchivingWithJavaAgent) { 1627 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1628 "for testing purposes only and should not be used in a production environment"); 1629 } 1630 1631 // There may be other pending VM operations that operate on the InstanceKlasses, 1632 // which will fail because InstanceKlasses::remove_unshareable_info() 1633 // has been called. Forget these operations and exit the VM directly. 1634 vm_direct_exit(0); 1635 } 1636 1637 void VM_PopulateDumpSharedSpace::print_region_stats() { 1638 // Print statistics of all the regions 1639 const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes(); 1640 const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment()); 1641 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1642 _mc_region.reserved() + _md_region.reserved() + 1643 bitmap_reserved + 1644 _total_closed_archive_region_size + 1645 _total_open_archive_region_size; 1646 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1647 _mc_region.used() + _md_region.used() + 1648 bitmap_used + 1649 _total_closed_archive_region_size + 1650 _total_open_archive_region_size; 1651 const double total_u_perc = percent_of(total_bytes, total_reserved); 1652 1653 _mc_region.print(total_reserved); 1654 _rw_region.print(total_reserved); 1655 _ro_region.print(total_reserved); 1656 _md_region.print(total_reserved); 1657 print_bitmap_region_stats(bitmap_reserved, total_reserved); 1658 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1659 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1660 1661 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1662 total_bytes, total_reserved, total_u_perc); 1663 } 1664 1665 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { 1666 tty->print_cr("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1667 size, size/double(total_size)*100.0, size, p2i(NULL)); 1668 } 1669 1670 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1671 const char *name, size_t total_size) { 1672 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1673 for (int i = 0; i < arr_len; i++) { 1674 char* start = (char*)heap_mem->at(i).start(); 1675 size_t size = heap_mem->at(i).byte_size(); 1676 char* top = start + size; 1677 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1678 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1679 1680 } 1681 } 1682 1683 // Update a Java object to point its Klass* to the new location after 1684 // shared archive has been compacted. 1685 void MetaspaceShared::relocate_klass_ptr(oop o) { 1686 assert(DumpSharedSpaces, "sanity"); 1687 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1688 o->set_klass(k); 1689 } 1690 1691 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { 1692 assert(DumpSharedSpaces, "sanity"); 1693 k = ArchiveCompactor::get_relocated_klass(k); 1694 if (is_final) { 1695 k = (Klass*)(address(k) + final_delta()); 1696 } 1697 return k; 1698 } 1699 1700 class LinkSharedClassesClosure : public KlassClosure { 1701 Thread* THREAD; 1702 bool _made_progress; 1703 public: 1704 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1705 1706 void reset() { _made_progress = false; } 1707 bool made_progress() const { return _made_progress; } 1708 1709 void do_klass(Klass* k) { 1710 if (k->is_instance_klass()) { 1711 InstanceKlass* ik = InstanceKlass::cast(k); 1712 // Link the class to cause the bytecodes to be rewritten and the 1713 // cpcache to be created. Class verification is done according 1714 // to -Xverify setting. 1715 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1716 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1717 1718 ik->constants()->resolve_class_constants(THREAD); 1719 } 1720 } 1721 }; 1722 1723 class CheckSharedClassesClosure : public KlassClosure { 1724 bool _made_progress; 1725 public: 1726 CheckSharedClassesClosure() : _made_progress(false) {} 1727 1728 void reset() { _made_progress = false; } 1729 bool made_progress() const { return _made_progress; } 1730 void do_klass(Klass* k) { 1731 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1732 _made_progress = true; 1733 } 1734 } 1735 }; 1736 1737 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1738 // We need to iterate because verification may cause additional classes 1739 // to be loaded. 1740 LinkSharedClassesClosure link_closure(THREAD); 1741 do { 1742 link_closure.reset(); 1743 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1744 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1745 } while (link_closure.made_progress()); 1746 1747 if (_has_error_classes) { 1748 // Mark all classes whose super class or interfaces failed verification. 1749 CheckSharedClassesClosure check_closure; 1750 do { 1751 // Not completely sure if we need to do this iteratively. Anyway, 1752 // we should come here only if there are unverifiable classes, which 1753 // shouldn't happen in normal cases. So better safe than sorry. 1754 check_closure.reset(); 1755 ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); 1756 } while (check_closure.made_progress()); 1757 } 1758 } 1759 1760 void MetaspaceShared::prepare_for_dumping() { 1761 Arguments::check_unsupported_dumping_properties(); 1762 ClassLoader::initialize_shared_path(); 1763 } 1764 1765 // Preload classes from a list, populate the shared spaces and dump to a 1766 // file. 1767 void MetaspaceShared::preload_and_dump(TRAPS) { 1768 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1769 ResourceMark rm; 1770 char class_list_path_str[JVM_MAXPATHLEN]; 1771 // Preload classes to be shared. 1772 const char* class_list_path; 1773 if (SharedClassListFile == NULL) { 1774 // Construct the path to the class list (in jre/lib) 1775 // Walk up two directories from the location of the VM and 1776 // optionally tack on "lib" (depending on platform) 1777 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1778 for (int i = 0; i < 3; i++) { 1779 char *end = strrchr(class_list_path_str, *os::file_separator()); 1780 if (end != NULL) *end = '\0'; 1781 } 1782 int class_list_path_len = (int)strlen(class_list_path_str); 1783 if (class_list_path_len >= 3) { 1784 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1785 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1786 jio_snprintf(class_list_path_str + class_list_path_len, 1787 sizeof(class_list_path_str) - class_list_path_len, 1788 "%slib", os::file_separator()); 1789 class_list_path_len += 4; 1790 } 1791 } 1792 } 1793 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1794 jio_snprintf(class_list_path_str + class_list_path_len, 1795 sizeof(class_list_path_str) - class_list_path_len, 1796 "%sclasslist", os::file_separator()); 1797 } 1798 class_list_path = class_list_path_str; 1799 } else { 1800 class_list_path = SharedClassListFile; 1801 } 1802 1803 tty->print_cr("Loading classes to share ..."); 1804 _has_error_classes = false; 1805 int class_count = preload_classes(class_list_path, THREAD); 1806 if (ExtraSharedClassListFile) { 1807 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1808 } 1809 tty->print_cr("Loading classes to share: done."); 1810 1811 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1812 1813 if (SharedArchiveConfigFile) { 1814 tty->print_cr("Reading extra data from %s ...", SharedArchiveConfigFile); 1815 read_extra_data(SharedArchiveConfigFile, THREAD); 1816 } 1817 tty->print_cr("Reading extra data: done."); 1818 1819 HeapShared::init_subgraph_entry_fields(THREAD); 1820 1821 // Rewrite and link classes 1822 tty->print_cr("Rewriting and linking classes ..."); 1823 1824 // Link any classes which got missed. This would happen if we have loaded classes that 1825 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1826 // fails verification, all other interfaces that were not specified in the classlist but 1827 // are implemented by K are not verified. 1828 link_and_cleanup_shared_classes(CATCH); 1829 tty->print_cr("Rewriting and linking classes: done"); 1830 1831 if (HeapShared::is_heap_object_archiving_allowed()) { 1832 // Avoid fragmentation while archiving heap objects. 1833 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); 1834 Universe::heap()->collect(GCCause::_archive_time_gc); 1835 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); 1836 } 1837 1838 VM_PopulateDumpSharedSpace op; 1839 VMThread::execute(&op); 1840 } 1841 } 1842 1843 1844 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1845 ClassListParser parser(class_list_path); 1846 int class_count = 0; 1847 1848 while (parser.parse_one_line()) { 1849 Klass* klass = parser.load_current_class(THREAD); 1850 if (HAS_PENDING_EXCEPTION) { 1851 if (klass == NULL && 1852 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1853 // print a warning only when the pending exception is class not found 1854 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 1855 } 1856 CLEAR_PENDING_EXCEPTION; 1857 } 1858 if (klass != NULL) { 1859 if (log_is_enabled(Trace, cds)) { 1860 ResourceMark rm; 1861 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1862 } 1863 1864 if (klass->is_instance_klass()) { 1865 InstanceKlass* ik = InstanceKlass::cast(klass); 1866 1867 // Link the class to cause the bytecodes to be rewritten and the 1868 // cpcache to be created. The linking is done as soon as classes 1869 // are loaded in order that the related data structures (klass and 1870 // cpCache) are located together. 1871 try_link_class(ik, THREAD); 1872 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1873 } 1874 1875 class_count++; 1876 } 1877 } 1878 1879 return class_count; 1880 } 1881 1882 // Returns true if the class's status has changed 1883 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1884 assert(DumpSharedSpaces, "should only be called during dumping"); 1885 if (ik->init_state() < InstanceKlass::linked) { 1886 bool saved = BytecodeVerificationLocal; 1887 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1888 // The verification decision is based on BytecodeVerificationRemote 1889 // for non-system classes. Since we are using the NULL classloader 1890 // to load non-system classes for customized class loaders during dumping, 1891 // we need to temporarily change BytecodeVerificationLocal to be the same as 1892 // BytecodeVerificationRemote. Note this can cause the parent system 1893 // classes also being verified. The extra overhead is acceptable during 1894 // dumping. 1895 BytecodeVerificationLocal = BytecodeVerificationRemote; 1896 } 1897 ik->link_class(THREAD); 1898 if (HAS_PENDING_EXCEPTION) { 1899 ResourceMark rm; 1900 log_warning(cds)("Preload Warning: Verification failed for %s", 1901 ik->external_name()); 1902 CLEAR_PENDING_EXCEPTION; 1903 ik->set_in_error_state(); 1904 _has_error_classes = true; 1905 } 1906 BytecodeVerificationLocal = saved; 1907 return true; 1908 } else { 1909 return false; 1910 } 1911 } 1912 1913 #if INCLUDE_CDS_JAVA_HEAP 1914 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1915 // The closed and open archive heap space has maximum two regions. 1916 // See FileMapInfo::write_archive_heap_regions() for details. 1917 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1918 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1919 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 1920 _open_archive_heap_regions); 1921 ArchiveCompactor::OtherROAllocMark mark; 1922 HeapShared::write_subgraph_info_table(); 1923 } 1924 1925 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 1926 if (HeapShared::is_heap_object_archiving_allowed()) { 1927 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1928 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 1929 1930 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1931 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 1932 } 1933 } 1934 1935 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1936 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 1937 for (int i=0; i<regions->length(); i++) { 1938 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 1939 size_t size_in_bits = oopmap.size(); 1940 size_t size_in_bytes = oopmap.size_in_bytes(); 1941 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); 1942 oopmap.write_to(buffer, size_in_bytes); 1943 log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 1944 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 1945 p2i(buffer), size_in_bytes, 1946 p2i(regions->at(i).start()), regions->at(i).byte_size()); 1947 1948 ArchiveHeapOopmapInfo info; 1949 info._oopmap = (address)buffer; 1950 info._oopmap_size_in_bits = size_in_bits; 1951 oopmaps->append(info); 1952 } 1953 } 1954 #endif // INCLUDE_CDS_JAVA_HEAP 1955 1956 void ReadClosure::do_ptr(void** p) { 1957 assert(*p == NULL, "initializing previous initialized pointer."); 1958 intptr_t obj = nextPtr(); 1959 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1960 "hit tag while initializing ptrs."); 1961 *p = (void*)obj; 1962 } 1963 1964 void ReadClosure::do_u4(u4* p) { 1965 intptr_t obj = nextPtr(); 1966 *p = (u4)(uintx(obj)); 1967 } 1968 1969 void ReadClosure::do_bool(bool* p) { 1970 intptr_t obj = nextPtr(); 1971 *p = (bool)(uintx(obj)); 1972 } 1973 1974 void ReadClosure::do_tag(int tag) { 1975 int old_tag; 1976 old_tag = (int)(intptr_t)nextPtr(); 1977 // do_int(&old_tag); 1978 assert(tag == old_tag, "old tag doesn't match"); 1979 FileMapInfo::assert_mark(tag == old_tag); 1980 } 1981 1982 void ReadClosure::do_oop(oop *p) { 1983 narrowOop o = (narrowOop)nextPtr(); 1984 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 1985 p = NULL; 1986 } else { 1987 assert(HeapShared::is_heap_object_archiving_allowed(), 1988 "Archived heap object is not allowed"); 1989 assert(HeapShared::open_archive_heap_region_mapped(), 1990 "Open archive heap region is not mapped"); 1991 *p = HeapShared::decode_from_archive(o); 1992 } 1993 } 1994 1995 void ReadClosure::do_mirror_oop(oop *p) { 1996 do_oop(p); 1997 oop mirror = *p; 1998 if (mirror != NULL) { 1999 java_lang_Class::update_archived_mirror_native_pointers(mirror); 2000 } 2001 } 2002 2003 void ReadClosure::do_region(u_char* start, size_t size) { 2004 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 2005 assert(size % sizeof(intptr_t) == 0, "bad size"); 2006 do_tag((int)size); 2007 while (size > 0) { 2008 *(intptr_t*)start = nextPtr(); 2009 start += sizeof(intptr_t); 2010 size -= sizeof(intptr_t); 2011 } 2012 } 2013 2014 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { 2015 assert(base <= static_top && static_top <= top, "must be"); 2016 _shared_metaspace_static_top = static_top; 2017 MetaspaceObj::set_shared_metaspace_range(base, top); 2018 } 2019 2020 // Return true if given address is in the misc data region 2021 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2022 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2023 } 2024 2025 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2026 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2027 return true; 2028 } 2029 return false; 2030 } 2031 2032 bool MetaspaceShared::is_shared_dynamic(void* p) { 2033 if ((p < MetaspaceObj::shared_metaspace_top()) && 2034 (p >= _shared_metaspace_static_top)) { 2035 return true; 2036 } else { 2037 return false; 2038 } 2039 } 2040 2041 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 2042 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 2043 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2044 FileMapInfo* static_mapinfo = open_static_archive(); 2045 FileMapInfo* dynamic_mapinfo = NULL; 2046 2047 if (static_mapinfo != NULL) { 2048 dynamic_mapinfo = open_dynamic_archive(); 2049 2050 // First try to map at the requested address 2051 result = map_archives(static_mapinfo, dynamic_mapinfo, true); 2052 if (result == MAP_ARCHIVE_MMAP_FAILURE) { 2053 // Mapping has failed (probably due to ASLR). Let's map at an address chosen 2054 // by the OS. 2055 result = map_archives(static_mapinfo, dynamic_mapinfo, false); 2056 } 2057 } 2058 2059 if (result == MAP_ARCHIVE_SUCCESS) { 2060 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); 2061 char* cds_base = static_mapinfo->mapped_base(); 2062 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); 2063 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); 2064 _mapping_delta = static_mapinfo->mapping_delta(); 2065 if (dynamic_mapped) { 2066 FileMapInfo::set_shared_path_table(dynamic_mapinfo); 2067 } else { 2068 FileMapInfo::set_shared_path_table(static_mapinfo); 2069 } 2070 } else { 2071 set_shared_metaspace_range(NULL, NULL, NULL); 2072 UseSharedSpaces = false; 2073 FileMapInfo::fail_continue("Unable to map shared spaces"); 2074 if (PrintSharedArchiveAndExit) { 2075 vm_exit_during_initialization("Unable to use shared archive."); 2076 } 2077 } 2078 2079 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { 2080 delete static_mapinfo; 2081 } 2082 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { 2083 delete dynamic_mapinfo; 2084 } 2085 } 2086 2087 FileMapInfo* MetaspaceShared::open_static_archive() { 2088 FileMapInfo* mapinfo = new FileMapInfo(true); 2089 if (!mapinfo->initialize()) { 2090 delete(mapinfo); 2091 return NULL; 2092 } 2093 return mapinfo; 2094 } 2095 2096 FileMapInfo* MetaspaceShared::open_dynamic_archive() { 2097 if (DynamicDumpSharedSpaces) { 2098 return NULL; 2099 } 2100 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 2101 return NULL; 2102 } 2103 2104 FileMapInfo* mapinfo = new FileMapInfo(false); 2105 if (!mapinfo->initialize()) { 2106 delete(mapinfo); 2107 return NULL; 2108 } 2109 return mapinfo; 2110 } 2111 2112 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, 2113 bool use_requested_addr) { 2114 // Uncomment the next line to benchmark mapping at alternative locations. 2115 // if (SharedBaseAddress == 0 && use_requested_addr) { return MAP_ARCHIVE_MMAP_FAILURE; } 2116 2117 if (dynamic_mapinfo != NULL) { 2118 // Ensure that the OS won't be able to allocate new memory spaces between the two 2119 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). 2120 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); 2121 } 2122 2123 ReservedSpace main_rs, archive_space_rs, class_space_rs; 2124 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2125 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, 2126 use_requested_addr, main_rs, archive_space_rs, 2127 class_space_rs); 2128 if (mapped_base_address == NULL) { 2129 result = MAP_ARCHIVE_MMAP_FAILURE; 2130 } else { 2131 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2132 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); 2133 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2134 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); 2135 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); 2136 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? 2137 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; 2138 2139 if (static_result == MAP_ARCHIVE_SUCCESS) { 2140 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2141 result = MAP_ARCHIVE_SUCCESS; 2142 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { 2143 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); 2144 // No need to retry mapping the dynamic archive again, as it will never succeed 2145 // (bad file, etc) -- just keep the base archive. 2146 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", 2147 dynamic_mapinfo->full_path()); 2148 result = MAP_ARCHIVE_SUCCESS; 2149 // FIXME: reduce archive space end .... 2150 } else { 2151 result = MAP_ARCHIVE_MMAP_FAILURE; 2152 } 2153 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { 2154 result = MAP_ARCHIVE_OTHER_FAILURE; 2155 } else { 2156 result = MAP_ARCHIVE_MMAP_FAILURE; 2157 } 2158 } 2159 2160 if (result == MAP_ARCHIVE_SUCCESS) { 2161 if (!main_rs.is_reserved() && class_space_rs.is_reserved()) { 2162 MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass); 2163 } 2164 SharedBaseAddress = (size_t)mapped_base_address; 2165 LP64_ONLY({ 2166 if (Metaspace::using_class_space()) { 2167 assert(class_space_rs.is_reserved(), "must be"); 2168 char* cds_base = static_mapinfo->mapped_base(); 2169 Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base); 2170 // map_heap_regions() compares the current narrow oop and klass encodings 2171 // with the archived ones, so it must be done after all encodings are determined. 2172 static_mapinfo->map_heap_regions(); 2173 } 2174 CompressedKlassPointers::set_range(CompressedClassSpaceSize); 2175 }); 2176 } else { 2177 unmap_archive(static_mapinfo); 2178 unmap_archive(dynamic_mapinfo); 2179 release_reserved_spaces(main_rs, archive_space_rs, class_space_rs); 2180 } 2181 2182 return result; 2183 } 2184 2185 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, 2186 FileMapInfo* dynamic_mapinfo, 2187 bool use_requested_addr, 2188 ReservedSpace& main_rs, 2189 ReservedSpace& archive_space_rs, 2190 ReservedSpace& class_space_rs) { 2191 const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space()); 2192 const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size()); 2193 2194 if (use_klass_space) { 2195 assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated"); 2196 } 2197 if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) { 2198 return NULL; 2199 } 2200 2201 // Size and requested location of the archive_space_rs (for both static and dynamic archives) 2202 size_t base_offset = static_mapinfo->mapping_base_offset(); 2203 size_t end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); 2204 assert(base_offset == 0, "must be"); 2205 assert(is_aligned(end_offset, os::vm_allocation_granularity()), "must be"); 2206 assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be"); 2207 2208 // In case reserved_space_alignment() != os::vm_allocation_granularity() 2209 assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be"); 2210 end_offset = align_up(end_offset, reserved_space_alignment()); 2211 2212 size_t archive_space_size = end_offset - base_offset; 2213 2214 // Special handling for Windows because it cannot mmap into a reserved space: 2215 // use_requested_addr: We just map each region individually, and give up if any one of them fails. 2216 // !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap). 2217 // We're going to patch all the pointers anyway so there's no benefit for mmap. 2218 2219 if (use_requested_addr) { 2220 char* archive_space_base = static_mapinfo->requested_base_address() + base_offset; 2221 char* archive_space_end = archive_space_base + archive_space_size; 2222 if (!MetaspaceShared::use_windows_memory_mapping()) { 2223 archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base); 2224 if (!archive_space_rs.is_reserved()) { 2225 return NULL; 2226 } 2227 } 2228 if (use_klass_space) { 2229 // Make sure we can map the klass space immediately following the archive_space space 2230 char* class_space_base = archive_space_end; 2231 class_space_rs = reserve_shared_space(class_space_size, class_space_base); 2232 if (!class_space_rs.is_reserved()) { 2233 return NULL; 2234 } 2235 } 2236 return static_mapinfo->requested_base_address(); 2237 } else { 2238 if (use_klass_space) { 2239 main_rs = reserve_shared_space(archive_space_size + class_space_size); 2240 if (main_rs.is_reserved()) { 2241 archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true); 2242 class_space_rs = main_rs.last_part(archive_space_size); 2243 } 2244 } else { 2245 main_rs = reserve_shared_space(archive_space_size); 2246 archive_space_rs = main_rs; 2247 } 2248 if (archive_space_rs.is_reserved()) { 2249 return archive_space_rs.base(); 2250 } else { 2251 return NULL; 2252 } 2253 } 2254 } 2255 2256 void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs, 2257 ReservedSpace& archive_space_rs, 2258 ReservedSpace& class_space_rs) { 2259 if (main_rs.is_reserved()) { 2260 assert(main_rs.contains(archive_space_rs.base()), "must be"); 2261 assert(main_rs.contains(class_space_rs.base()), "must be"); 2262 log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base())); 2263 main_rs.release(); 2264 } else { 2265 if (archive_space_rs.is_reserved()) { 2266 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); 2267 archive_space_rs.release(); 2268 } 2269 if (class_space_rs.is_reserved()) { 2270 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); 2271 class_space_rs.release(); 2272 } 2273 } 2274 } 2275 2276 static int static_regions[] = {MetaspaceShared::mc, 2277 MetaspaceShared::rw, 2278 MetaspaceShared::ro, 2279 MetaspaceShared::md}; 2280 static int dynamic_regions[] = {MetaspaceShared::rw, 2281 MetaspaceShared::ro, 2282 MetaspaceShared::mc}; 2283 static int static_regions_count = 4; 2284 static int dynamic_regions_count = 3; 2285 2286 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { 2287 assert(UseSharedSpaces, "must be runtime"); 2288 if (mapinfo == NULL) { 2289 return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded. 2290 } 2291 2292 mapinfo->set_is_mapped(false); 2293 2294 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { // FIXME 2295 // FIXME log 2296 return MAP_ARCHIVE_OTHER_FAILURE; 2297 } 2298 2299 MapArchiveResult result = mapinfo->is_static() ? 2300 mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) : 2301 mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs); 2302 2303 if (result != MAP_ARCHIVE_SUCCESS) { 2304 unmap_archive(mapinfo); 2305 return result; 2306 } 2307 2308 if (mapinfo->is_static()) { 2309 if (!mapinfo->validate_shared_path_table()) { 2310 unmap_archive(mapinfo); 2311 return MAP_ARCHIVE_OTHER_FAILURE; 2312 } 2313 } else { 2314 if (!DynamicArchive::validate(mapinfo)) { 2315 unmap_archive(mapinfo); 2316 return MAP_ARCHIVE_OTHER_FAILURE; 2317 } 2318 } 2319 2320 mapinfo->set_is_mapped(true); 2321 return MAP_ARCHIVE_SUCCESS; 2322 } 2323 2324 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { 2325 assert(UseSharedSpaces, "must be runtime"); 2326 if (mapinfo != NULL) { 2327 if (mapinfo->is_static()) { 2328 mapinfo->unmap_regions(static_regions, static_regions_count); 2329 } else { 2330 mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count); 2331 } 2332 mapinfo->set_is_mapped(false); 2333 } 2334 } 2335 2336 // Read the miscellaneous data from the shared file, and 2337 // serialize it out to its various destinations. 2338 2339 void MetaspaceShared::initialize_shared_spaces() { 2340 FileMapInfo *static_mapinfo = FileMapInfo::current_info(); 2341 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); 2342 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); 2343 char* buffer = static_mapinfo->misc_data_patching_start(); 2344 clone_cpp_vtables((intptr_t*)buffer); 2345 2346 // Verify various attributes of the archive, plus initialize the 2347 // shared string/symbol tables 2348 buffer = static_mapinfo->serialized_data_start(); 2349 intptr_t* array = (intptr_t*)buffer; 2350 ReadClosure rc(&array); 2351 serialize(&rc); 2352 2353 // Initialize the run-time symbol table. 2354 SymbolTable::create_table(); 2355 2356 static_mapinfo->patch_archived_heap_embedded_pointers(); 2357 2358 // Close the mapinfo file 2359 static_mapinfo->close(); 2360 2361 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); 2362 if (dynamic_mapinfo != NULL) { 2363 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start(); 2364 ReadClosure rc(&buffer); 2365 SymbolTable::serialize_shared_table_header(&rc, false); 2366 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); 2367 dynamic_mapinfo->close(); 2368 } 2369 2370 if (PrintSharedArchiveAndExit) { 2371 if (PrintSharedDictionary) { 2372 tty->print_cr("\nShared classes:\n"); 2373 SystemDictionaryShared::print_on(tty); 2374 } 2375 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { 2376 tty->print_cr("archive is invalid"); 2377 vm_exit(1); 2378 } else { 2379 tty->print_cr("archive is valid"); 2380 vm_exit(0); 2381 } 2382 } 2383 } 2384 2385 // JVM/TI RedefineClasses() support: 2386 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2387 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2388 2389 if (UseSharedSpaces) { 2390 // remap the shared readonly space to shared readwrite, private 2391 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2392 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2393 return false; 2394 } 2395 if (FileMapInfo::dynamic_info() != NULL) { 2396 mapinfo = FileMapInfo::dynamic_info(); 2397 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2398 return false; 2399 } 2400 } 2401 _remapped_readwrite = true; 2402 } 2403 return true; 2404 } 2405 2406 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2407 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2408 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2409 // or so. 2410 _mc_region.print_out_of_space_msg(name, needed_bytes); 2411 _rw_region.print_out_of_space_msg(name, needed_bytes); 2412 _ro_region.print_out_of_space_msg(name, needed_bytes); 2413 _md_region.print_out_of_space_msg(name, needed_bytes); 2414 2415 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2416 "Please reduce the number of shared classes."); 2417 } 2418 2419 intx MetaspaceShared::final_delta() { // FIXME rename 2420 return intx(Arguments::default_SharedBaseAddress()) // We want the archive to be mapped to here at runtime 2421 - intx(SharedBaseAddress); // .. but the archive is mapped at here at dump time 2422 }