1 /* 2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "gc/shared/softRefPolicy.hpp" 40 #include "interpreter/bytecodeStream.hpp" 41 #include "interpreter/bytecodes.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logMessage.hpp" 44 #include "memory/archiveUtils.inline.hpp" 45 #include "memory/dynamicArchive.hpp" 46 #include "memory/filemap.hpp" 47 #include "memory/heapShared.inline.hpp" 48 #include "memory/metaspace.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/metaspaceShared.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "oops/compressedOops.inline.hpp" 54 #include "oops/instanceClassLoaderKlass.hpp" 55 #include "oops/instanceMirrorKlass.hpp" 56 #include "oops/instanceRefKlass.hpp" 57 #include "oops/methodData.hpp" 58 #include "oops/objArrayKlass.hpp" 59 #include "oops/objArrayOop.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "oops/typeArrayKlass.hpp" 62 #include "prims/jvmtiRedefineClasses.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/os.hpp" 65 #include "runtime/safepointVerifiers.hpp" 66 #include "runtime/signature.hpp" 67 #include "runtime/timerTrace.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/bitMap.inline.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/hashtable.inline.hpp" 74 #if INCLUDE_G1GC 75 #include "gc/g1/g1CollectedHeap.hpp" 76 #endif 77 78 ReservedSpace MetaspaceShared::_shared_rs; 79 VirtualSpace MetaspaceShared::_shared_vs; 80 MetaspaceSharedStats MetaspaceShared::_stats; 81 bool MetaspaceShared::_has_error_classes; 82 bool MetaspaceShared::_archive_loading_failed = false; 83 bool MetaspaceShared::_remapped_readwrite = false; 84 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 85 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 86 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 87 intx MetaspaceShared::_relocation_delta; 88 89 // The CDS archive is divided into the following regions: 90 // mc - misc code (the method entry trampolines, c++ vtables) 91 // rw - read-write metadata 92 // ro - read-only metadata and read-only tables 93 // 94 // ca0 - closed archive heap space #0 95 // ca1 - closed archive heap space #1 (may be empty) 96 // oa0 - open archive heap space #0 97 // oa1 - open archive heap space #1 (may be empty) 98 // 99 // The mc, rw, and ro regions are linearly allocated, starting from 100 // SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions 101 // are page-aligned, and there's no gap between any consecutive regions. 102 // 103 // These 3 regions are populated in the following steps: 104 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 105 // temporarily allocated outside of the shared regions. Only the method entry 106 // trampolines are written into the mc region. 107 // [2] C++ vtables are copied into the mc region. 108 // [3] ArchiveCompactor copies RW metadata into the rw region. 109 // [4] ArchiveCompactor copies RO metadata into the ro region. 110 // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 111 // are copied into the ro region as read-only tables. 112 // 113 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 114 // Their layout is independent of the other 4 regions. 115 116 char* DumpRegion::expand_top_to(char* newtop) { 117 assert(is_allocatable(), "must be initialized and not packed"); 118 assert(newtop >= _top, "must not grow backwards"); 119 if (newtop > _end) { 120 MetaspaceShared::report_out_of_space(_name, newtop - _top); 121 ShouldNotReachHere(); 122 } 123 uintx delta; 124 if (DynamicDumpSharedSpaces) { 125 delta = DynamicArchive::object_delta_uintx(newtop); 126 } else { 127 delta = MetaspaceShared::object_delta_uintx(newtop); 128 } 129 if (delta > MAX_SHARED_DELTA) { 130 // This is just a sanity check and should not appear in any real world usage. This 131 // happens only if you allocate more than 2GB of shared objects and would require 132 // millions of shared classes. 133 vm_exit_during_initialization("Out of memory in the CDS archive", 134 "Please reduce the number of shared classes."); 135 } 136 137 MetaspaceShared::commit_shared_space_to(newtop); 138 _top = newtop; 139 return _top; 140 } 141 142 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 143 char* p = (char*)align_up(_top, alignment); 144 char* newtop = p + align_up(num_bytes, alignment); 145 expand_top_to(newtop); 146 memset(p, 0, newtop - p); 147 return p; 148 } 149 150 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 151 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 152 intptr_t *p = (intptr_t*)_top; 153 char* newtop = _top + sizeof(intptr_t); 154 expand_top_to(newtop); 155 *p = n; 156 if (need_to_mark) { 157 ArchivePtrMarker::mark_pointer(p); 158 } 159 } 160 161 void DumpRegion::print(size_t total_bytes) const { 162 log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 163 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 164 p2i(_base + MetaspaceShared::final_delta())); 165 } 166 167 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 168 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 169 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 170 if (strcmp(_name, failing_region) == 0) { 171 log_error(cds)(" required = %d", int(needed_bytes)); 172 } 173 } 174 175 void DumpRegion::pack(DumpRegion* next) { 176 assert(!is_packed(), "sanity"); 177 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 178 _is_packed = true; 179 if (next != NULL) { 180 next->_base = next->_top = this->_end; 181 next->_end = MetaspaceShared::shared_rs()->end(); 182 } 183 } 184 185 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"); 186 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 187 188 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { 189 // Start with 0 committed bytes. The memory will be committed as needed by 190 // MetaspaceShared::commit_shared_space_to(). 191 if (!_shared_vs.initialize(_shared_rs, 0)) { 192 fatal("Unable to allocate memory for shared space"); 193 } 194 first_space->init(&_shared_rs, (char*)first_space_bottom); 195 } 196 197 DumpRegion* MetaspaceShared::misc_code_dump_space() { 198 return &_mc_region; 199 } 200 201 DumpRegion* MetaspaceShared::read_write_dump_space() { 202 return &_rw_region; 203 } 204 205 DumpRegion* MetaspaceShared::read_only_dump_space() { 206 return &_ro_region; 207 } 208 209 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 210 ReservedSpace* rs) { 211 current->pack(next); 212 } 213 214 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 215 return _mc_region.allocate(num_bytes); 216 } 217 218 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 219 return _ro_region.allocate(num_bytes); 220 } 221 222 // When reserving an address range using ReservedSpace, we need an alignment that satisfies both: 223 // os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions, 224 // while keeping the first range at offset 0 of this range. 225 // Metaspace::reserve_alignment() -- so we can pass the region to 226 // Metaspace::allocate_metaspace_compressed_klass_ptrs. 227 size_t MetaspaceShared::reserved_space_alignment() { 228 size_t os_align = os::vm_allocation_granularity(); 229 size_t ms_align = Metaspace::reserve_alignment(); 230 if (os_align >= ms_align) { 231 assert(os_align % ms_align == 0, "must be a multiple"); 232 return os_align; 233 } else { 234 assert(ms_align % os_align == 0, "must be a multiple"); 235 return ms_align; 236 } 237 } 238 239 ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) { 240 return Metaspace::reserve_space(size, reserved_space_alignment(), 241 requested_address, requested_address != NULL); 242 } 243 244 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 245 assert(DumpSharedSpaces, "should be called for dump time only"); 246 const size_t reserve_alignment = reserved_space_alignment(); 247 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 248 249 #ifdef _LP64 250 // On 64-bit VM, the heap and class space layout will be the same as if 251 // you're running in -Xshare:on mode: 252 // 253 // +-- SharedBaseAddress (default = 0x800000000) 254 // v 255 // +-..---------+---------+ ... +----+----+----+--------------------+ 256 // | Heap | Archive | | MC | RW | RO | class space | 257 // +-..---------+---------+ ... +----+----+----+--------------------+ 258 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 259 // 260 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 261 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 262 #else 263 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 264 size_t cds_total = align_down(256*M, reserve_alignment); 265 #endif 266 267 bool use_requested_base = true; 268 if (ArchiveRelocationMode == 1) { 269 log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); 270 use_requested_base = false; 271 } 272 273 // First try to reserve the space at the specified SharedBaseAddress. 274 assert(!_shared_rs.is_reserved(), "must be"); 275 if (use_requested_base) { 276 _shared_rs = reserve_shared_space(cds_total, shared_base); 277 } 278 if (_shared_rs.is_reserved()) { 279 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 280 } else { 281 // Get a mmap region anywhere if the SharedBaseAddress fails. 282 _shared_rs = reserve_shared_space(cds_total); 283 } 284 if (!_shared_rs.is_reserved()) { 285 vm_exit_during_initialization("Unable to reserve memory for shared space", 286 err_msg(SIZE_FORMAT " bytes.", cds_total)); 287 } 288 289 #ifdef _LP64 290 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 291 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 292 // will store Klasses into this space. 293 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 294 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 295 // then the RO parts. 296 297 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 298 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 299 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 300 _shared_rs = _shared_rs.first_part(max_archive_size); 301 302 if (UseCompressedClassPointers) { 303 // Set up compress class pointers. 304 CompressedKlassPointers::set_base((address)_shared_rs.base()); 305 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 306 // with AOT. 307 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes); 308 // Set the range of klass addresses to 4GB. 309 CompressedKlassPointers::set_range(cds_total); 310 Metaspace::initialize_class_space(tmp_class_space); 311 } 312 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 313 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 314 315 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 316 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 317 #endif 318 319 init_shared_dump_space(&_mc_region); 320 SharedBaseAddress = (size_t)_shared_rs.base(); 321 log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 322 _shared_rs.size(), p2i(_shared_rs.base())); 323 } 324 325 // Called by universe_post_init() 326 void MetaspaceShared::post_initialize(TRAPS) { 327 if (UseSharedSpaces) { 328 int size = FileMapInfo::get_number_of_shared_paths(); 329 if (size > 0) { 330 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 331 if (!DynamicDumpSharedSpaces) { 332 FileMapInfo* info; 333 if (FileMapInfo::dynamic_info() == NULL) { 334 info = FileMapInfo::current_info(); 335 } else { 336 info = FileMapInfo::dynamic_info(); 337 } 338 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 339 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 340 } 341 } 342 } 343 } 344 345 static GrowableArray<Handle>* _extra_interned_strings = NULL; 346 347 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 348 _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); 349 350 HashtableTextDump reader(filename); 351 reader.check_version("VERSION: 1.0"); 352 353 while (reader.remain() > 0) { 354 int utf8_length; 355 int prefix_type = reader.scan_prefix(&utf8_length); 356 ResourceMark rm(THREAD); 357 if (utf8_length == 0x7fffffff) { 358 // buf_len will overflown 32-bit value. 359 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 360 } 361 int buf_len = utf8_length+1; 362 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 363 reader.get_utf8(utf8_buffer, utf8_length); 364 utf8_buffer[utf8_length] = '\0'; 365 366 if (prefix_type == HashtableTextDump::SymbolPrefix) { 367 SymbolTable::new_permanent_symbol(utf8_buffer); 368 } else{ 369 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 370 oop s = StringTable::intern(utf8_buffer, THREAD); 371 372 if (HAS_PENDING_EXCEPTION) { 373 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 374 reader.last_line_no(), utf8_length); 375 CLEAR_PENDING_EXCEPTION; 376 } else { 377 #if INCLUDE_G1GC 378 if (UseG1GC) { 379 typeArrayOop body = java_lang_String::value(s); 380 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 381 if (hr->is_humongous()) { 382 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 383 // to maximize free heap space and minimize fragmentation. 384 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 385 reader.last_line_no(), utf8_length); 386 continue; 387 } 388 } 389 #endif 390 // Interned strings are GC'ed if there are no references to it, so let's 391 // add a reference to keep this string alive. 392 assert(s != NULL, "must succeed"); 393 Handle h(THREAD, s); 394 _extra_interned_strings->append(h); 395 } 396 } 397 } 398 } 399 400 void MetaspaceShared::commit_shared_space_to(char* newtop) { 401 Arguments::assert_is_dumping_archive(); 402 char* base = _shared_rs.base(); 403 size_t need_committed_size = newtop - base; 404 size_t has_committed_size = _shared_vs.committed_size(); 405 if (need_committed_size < has_committed_size) { 406 return; 407 } 408 409 size_t min_bytes = need_committed_size - has_committed_size; 410 size_t preferred_bytes = 1 * M; 411 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 412 413 size_t commit =MAX2(min_bytes, preferred_bytes); 414 commit = MIN2(commit, uncommitted); 415 assert(commit <= uncommitted, "sanity"); 416 417 bool result = _shared_vs.expand_by(commit, false); 418 ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high()); 419 420 if (!result) { 421 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 422 need_committed_size)); 423 } 424 425 log_debug(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 426 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 427 } 428 429 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { 430 ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); 431 } 432 433 // Read/write a data stream for restoring/preserving metadata pointers and 434 // miscellaneous data from/to the shared archive file. 435 436 void MetaspaceShared::serialize(SerializeClosure* soc) { 437 int tag = 0; 438 soc->do_tag(--tag); 439 440 // Verify the sizes of various metadata in the system. 441 soc->do_tag(sizeof(Method)); 442 soc->do_tag(sizeof(ConstMethod)); 443 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 444 soc->do_tag(sizeof(ConstantPool)); 445 soc->do_tag(sizeof(ConstantPoolCache)); 446 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 447 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 448 soc->do_tag(sizeof(Symbol)); 449 450 // Dump/restore miscellaneous metadata. 451 JavaClasses::serialize_offsets(soc); 452 Universe::serialize(soc); 453 soc->do_tag(--tag); 454 455 // Dump/restore references to commonly used names and signatures. 456 vmSymbols::serialize(soc); 457 soc->do_tag(--tag); 458 459 // Dump/restore the symbol/string/subgraph_info tables 460 SymbolTable::serialize_shared_table_header(soc); 461 StringTable::serialize_shared_table_header(soc); 462 HeapShared::serialize_subgraph_info_table_header(soc); 463 SystemDictionaryShared::serialize_dictionary_headers(soc); 464 465 InstanceMirrorKlass::serialize_offsets(soc); 466 soc->do_tag(--tag); 467 468 serialize_cloned_cpp_vtptrs(soc); 469 soc->do_tag(--tag); 470 471 soc->do_tag(666); 472 } 473 474 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 475 if (DumpSharedSpaces) { 476 if (_i2i_entry_code_buffers == NULL) { 477 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 478 _i2i_entry_code_buffers_size = total_size; 479 } 480 } else if (UseSharedSpaces) { 481 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 482 } else { 483 return NULL; 484 } 485 486 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 487 return _i2i_entry_code_buffers; 488 } 489 490 uintx MetaspaceShared::object_delta_uintx(void* obj) { 491 Arguments::assert_is_dumping_archive(); 492 if (DumpSharedSpaces) { 493 assert(shared_rs()->contains(obj), "must be"); 494 } else { 495 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 496 } 497 address base_address = address(SharedBaseAddress); 498 uintx deltax = address(obj) - base_address; 499 return deltax; 500 } 501 502 // Global object for holding classes that have been loaded. Since this 503 // is run at a safepoint just before exit, this is the entire set of classes. 504 static GrowableArray<Klass*>* _global_klass_objects; 505 506 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 507 return _global_klass_objects; 508 } 509 510 static void collect_array_classes(Klass* k) { 511 _global_klass_objects->append_if_missing(k); 512 if (k->is_array_klass()) { 513 // Add in the array classes too 514 ArrayKlass* ak = ArrayKlass::cast(k); 515 Klass* h = ak->higher_dimension(); 516 if (h != NULL) { 517 h->array_klasses_do(collect_array_classes); 518 } 519 } 520 } 521 522 class CollectClassesClosure : public KlassClosure { 523 void do_klass(Klass* k) { 524 if (k->is_instance_klass() && 525 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 526 // Don't add to the _global_klass_objects 527 } else { 528 _global_klass_objects->append_if_missing(k); 529 } 530 if (k->is_array_klass()) { 531 // Add in the array classes too 532 ArrayKlass* ak = ArrayKlass::cast(k); 533 Klass* h = ak->higher_dimension(); 534 if (h != NULL) { 535 h->array_klasses_do(collect_array_classes); 536 } 537 } 538 } 539 }; 540 541 static void remove_unshareable_in_classes() { 542 for (int i = 0; i < _global_klass_objects->length(); i++) { 543 Klass* k = _global_klass_objects->at(i); 544 if (!k->is_objArray_klass()) { 545 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 546 // on their array classes. 547 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 548 k->remove_unshareable_info(); 549 } 550 } 551 } 552 553 static void remove_java_mirror_in_classes() { 554 for (int i = 0; i < _global_klass_objects->length(); i++) { 555 Klass* k = _global_klass_objects->at(i); 556 if (!k->is_objArray_klass()) { 557 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 558 // on their array classes. 559 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 560 k->remove_java_mirror(); 561 } 562 } 563 } 564 565 static void clear_basic_type_mirrors() { 566 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 567 Universe::set_int_mirror(NULL); 568 Universe::set_float_mirror(NULL); 569 Universe::set_double_mirror(NULL); 570 Universe::set_byte_mirror(NULL); 571 Universe::set_bool_mirror(NULL); 572 Universe::set_char_mirror(NULL); 573 Universe::set_long_mirror(NULL); 574 Universe::set_short_mirror(NULL); 575 Universe::set_void_mirror(NULL); 576 } 577 578 static void rewrite_nofast_bytecode(const methodHandle& method) { 579 BytecodeStream bcs(method); 580 while (!bcs.is_last_bytecode()) { 581 Bytecodes::Code opcode = bcs.next(); 582 switch (opcode) { 583 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 584 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 585 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 586 case Bytecodes::_iload: { 587 if (!bcs.is_wide()) { 588 *bcs.bcp() = Bytecodes::_nofast_iload; 589 } 590 break; 591 } 592 default: break; 593 } 594 } 595 } 596 597 // Walk all methods in the class list to ensure that they won't be modified at 598 // run time. This includes: 599 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 600 // at run time by RewriteBytecodes/RewriteFrequentPairs 601 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 602 static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) { 603 for (int i = 0; i < _global_klass_objects->length(); i++) { 604 Klass* k = _global_klass_objects->at(i); 605 if (k->is_instance_klass()) { 606 InstanceKlass* ik = InstanceKlass::cast(k); 607 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik); 608 } 609 } 610 } 611 612 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) { 613 for (int i = 0; i < ik->methods()->length(); i++) { 614 methodHandle m(thread, ik->methods()->at(i)); 615 rewrite_nofast_bytecode(m); 616 Fingerprinter fp(m); 617 // The side effect of this call sets method's fingerprint field. 618 fp.fingerprint(); 619 } 620 } 621 622 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 623 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 624 // 625 // Addresses of the vtables and the methods may be different across JVM runs, 626 // if libjvm.so is dynamically loaded at a different base address. 627 // 628 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 629 // 630 // + at dump time: we redirect the _vptr to point to our own vtables inside 631 // the CDS image 632 // + at run time: we clone the actual contents of the vtables from libjvm.so 633 // into our own tables. 634 635 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 636 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 637 f(ConstantPool) \ 638 f(InstanceKlass) \ 639 f(InstanceClassLoaderKlass) \ 640 f(InstanceMirrorKlass) \ 641 f(InstanceRefKlass) \ 642 f(Method) \ 643 f(ObjArrayKlass) \ 644 f(TypeArrayKlass) 645 646 class CppVtableInfo { 647 intptr_t _vtable_size; 648 intptr_t _cloned_vtable[1]; 649 public: 650 static int num_slots(int vtable_size) { 651 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 652 } 653 int vtable_size() { return int(uintx(_vtable_size)); } 654 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 655 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 656 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 657 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 658 static size_t byte_size(int vtable_size) { 659 CppVtableInfo i; 660 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 661 } 662 }; 663 664 template <class T> class CppVtableCloner : public T { 665 static intptr_t* vtable_of(Metadata& m) { 666 return *((intptr_t**)&m); 667 } 668 static CppVtableInfo* _info; 669 670 static int get_vtable_length(const char* name); 671 672 public: 673 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 674 static intptr_t* allocate(const char* name); 675 676 // Clone the vtable to ... 677 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 678 679 static void zero_vtable_clone() { 680 assert(DumpSharedSpaces, "dump-time only"); 681 _info->zero(); 682 } 683 684 static bool is_valid_shared_object(const T* obj) { 685 intptr_t* vptr = *(intptr_t**)obj; 686 return vptr == _info->cloned_vtable(); 687 } 688 }; 689 690 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 691 692 template <class T> 693 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 694 assert(is_aligned(_mc_region.top(), sizeof(intptr_t)), "bad alignment"); 695 int n = get_vtable_length(name); 696 _info = (CppVtableInfo*)_mc_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 697 _info->set_vtable_size(n); 698 699 intptr_t* p = clone_vtable(name, _info); 700 assert((char*)p == _mc_region.top(), "must be"); 701 702 return _info->cloned_vtable(); 703 } 704 705 template <class T> 706 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 707 if (!DumpSharedSpaces) { 708 assert(_info == 0, "_info is initialized only at dump time"); 709 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 710 } 711 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 712 int n = info->vtable_size(); 713 intptr_t* srcvtable = vtable_of(tmp); 714 intptr_t* dstvtable = info->cloned_vtable(); 715 716 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 717 // safe to do memcpy. 718 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 719 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 720 return dstvtable + n; 721 } 722 723 // To determine the size of the vtable for each type, we use the following 724 // trick by declaring 2 subclasses: 725 // 726 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 727 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 728 // 729 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 730 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 731 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 732 // - Their last entry is different. 733 // 734 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 735 // and find the first entry that's different. 736 // 737 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 738 // esoteric compilers. 739 740 template <class T> class CppVtableTesterB: public T { 741 public: 742 virtual int last_virtual_method() {return 1;} 743 }; 744 745 template <class T> class CppVtableTesterA : public T { 746 public: 747 virtual void* last_virtual_method() { 748 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 749 // compiler/linker won't alias the two functions. 750 return NULL; 751 } 752 }; 753 754 template <class T> 755 int CppVtableCloner<T>::get_vtable_length(const char* name) { 756 CppVtableTesterA<T> a; 757 CppVtableTesterB<T> b; 758 759 intptr_t* avtable = vtable_of(a); 760 intptr_t* bvtable = vtable_of(b); 761 762 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 763 int vtable_len = 1; 764 for (; ; vtable_len++) { 765 if (avtable[vtable_len] != bvtable[vtable_len]) { 766 break; 767 } 768 } 769 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 770 771 return vtable_len; 772 } 773 774 #define ALLOC_CPP_VTABLE_CLONE(c) \ 775 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \ 776 ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); 777 778 #define CLONE_CPP_VTABLE(c) \ 779 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 780 781 #define ZERO_CPP_VTABLE(c) \ 782 CppVtableCloner<c>::zero_vtable_clone(); 783 784 //------------------------------ for DynamicDumpSharedSpaces - start 785 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 786 787 enum { 788 // E.g., ConstantPool_Kind == 0, InstanceKlass == 1, etc. 789 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 790 _num_cloned_vtable_kinds 791 }; 792 793 // This is the index of all the cloned vtables. E.g., for 794 // ConstantPool* cp = ....; // an archived constant pool 795 // InstanceKlass* ik = ....;// an archived class 796 // the following holds true: 797 // _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0] 798 // _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0] 799 static intptr_t** _cloned_cpp_vtptrs = NULL; 800 801 void MetaspaceShared::allocate_cloned_cpp_vtptrs() { 802 assert(DumpSharedSpaces, "must"); 803 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 804 _cloned_cpp_vtptrs = (intptr_t**)_mc_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 805 } 806 807 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 808 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 809 } 810 811 intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) { 812 Arguments::assert_is_dumping_archive(); 813 int kind = -1; 814 switch (msotype) { 815 case MetaspaceObj::SymbolType: 816 case MetaspaceObj::TypeArrayU1Type: 817 case MetaspaceObj::TypeArrayU2Type: 818 case MetaspaceObj::TypeArrayU4Type: 819 case MetaspaceObj::TypeArrayU8Type: 820 case MetaspaceObj::TypeArrayOtherType: 821 case MetaspaceObj::ConstMethodType: 822 case MetaspaceObj::ConstantPoolCacheType: 823 case MetaspaceObj::AnnotationsType: 824 case MetaspaceObj::MethodCountersType: 825 case MetaspaceObj::RecordComponentType: 826 // These have no vtables. 827 break; 828 case MetaspaceObj::ClassType: 829 { 830 Klass* k = (Klass*)obj; 831 assert(k->is_klass(), "must be"); 832 if (k->is_instance_klass()) { 833 InstanceKlass* ik = InstanceKlass::cast(k); 834 if (ik->is_class_loader_instance_klass()) { 835 kind = InstanceClassLoaderKlass_Kind; 836 } else if (ik->is_reference_instance_klass()) { 837 kind = InstanceRefKlass_Kind; 838 } else if (ik->is_mirror_instance_klass()) { 839 kind = InstanceMirrorKlass_Kind; 840 } else { 841 kind = InstanceKlass_Kind; 842 } 843 } else if (k->is_typeArray_klass()) { 844 kind = TypeArrayKlass_Kind; 845 } else { 846 assert(k->is_objArray_klass(), "must be"); 847 kind = ObjArrayKlass_Kind; 848 } 849 } 850 break; 851 852 case MetaspaceObj::MethodType: 853 { 854 Method* m = (Method*)obj; 855 assert(m->is_method(), "must be"); 856 kind = Method_Kind; 857 } 858 break; 859 860 case MetaspaceObj::MethodDataType: 861 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 862 ShouldNotReachHere(); 863 break; 864 865 case MetaspaceObj::ConstantPoolType: 866 { 867 ConstantPool *cp = (ConstantPool*)obj; 868 assert(cp->is_constantPool(), "must be"); 869 kind = ConstantPool_Kind; 870 } 871 break; 872 873 default: 874 ShouldNotReachHere(); 875 } 876 877 if (kind >= 0) { 878 assert(kind < _num_cloned_vtable_kinds, "must be"); 879 return _cloned_cpp_vtptrs[kind]; 880 } else { 881 return NULL; 882 } 883 } 884 885 //------------------------------ for DynamicDumpSharedSpaces - end 886 887 // This can be called at both dump time and run time: 888 // - clone the contents of the c++ vtables into the space 889 // allocated by allocate_cpp_vtable_clones() 890 void MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 891 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 892 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 893 } 894 895 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 896 assert(DumpSharedSpaces, "dump-time only"); 897 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 898 } 899 900 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 901 char* MetaspaceShared::allocate_cpp_vtable_clones() { 902 char* cloned_vtables = _mc_region.top(); // This is the beginning of all the cloned vtables 903 904 assert(DumpSharedSpaces, "dump-time only"); 905 // Layout (each slot is a intptr_t): 906 // [number of slots in the first vtable = n1] 907 // [ <n1> slots for the first vtable] 908 // [number of slots in the first second = n2] 909 // [ <n2> slots for the second vtable] 910 // ... 911 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 912 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 913 914 return cloned_vtables; 915 } 916 917 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 918 assert(is_in_shared_metaspace(m), "must be"); 919 return CppVtableCloner<Method>::is_valid_shared_object(m); 920 } 921 922 void WriteClosure::do_oop(oop* o) { 923 if (*o == NULL) { 924 _dump_region->append_intptr_t(0); 925 } else { 926 assert(HeapShared::is_heap_object_archiving_allowed(), 927 "Archiving heap object is not allowed"); 928 _dump_region->append_intptr_t( 929 (intptr_t)CompressedOops::encode_not_null(*o)); 930 } 931 } 932 933 void WriteClosure::do_region(u_char* start, size_t size) { 934 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 935 assert(size % sizeof(intptr_t) == 0, "bad size"); 936 do_tag((int)size); 937 while (size > 0) { 938 _dump_region->append_intptr_t(*(intptr_t*)start, true); 939 start += sizeof(intptr_t); 940 size -= sizeof(intptr_t); 941 } 942 } 943 944 // This is for dumping detailed statistics for the allocations 945 // in the shared spaces. 946 class DumpAllocStats : public ResourceObj { 947 public: 948 949 // Here's poor man's enum inheritance 950 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 951 METASPACE_OBJ_TYPES_DO(f) \ 952 f(SymbolHashentry) \ 953 f(SymbolBucket) \ 954 f(StringHashentry) \ 955 f(StringBucket) \ 956 f(Other) 957 958 enum Type { 959 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 960 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 961 _number_of_types 962 }; 963 964 static const char * type_name(Type type) { 965 switch(type) { 966 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 967 default: 968 ShouldNotReachHere(); 969 return NULL; 970 } 971 } 972 973 public: 974 enum { RO = 0, RW = 1 }; 975 976 int _counts[2][_number_of_types]; 977 int _bytes [2][_number_of_types]; 978 979 DumpAllocStats() { 980 memset(_counts, 0, sizeof(_counts)); 981 memset(_bytes, 0, sizeof(_bytes)); 982 }; 983 984 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 985 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 986 int which = (read_only) ? RO : RW; 987 _counts[which][type] ++; 988 _bytes [which][type] += byte_size; 989 } 990 991 void record_other_type(int byte_size, bool read_only) { 992 int which = (read_only) ? RO : RW; 993 _bytes [which][OtherType] += byte_size; 994 } 995 void print_stats(int ro_all, int rw_all, int mc_all); 996 }; 997 998 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { 999 // Calculate size of data that was not allocated by Metaspace::allocate() 1000 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1001 1002 // symbols 1003 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1004 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1005 1006 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1007 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1008 1009 // strings 1010 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1011 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1012 1013 _counts[RO][StringBucketType] = stats->string.bucket_count; 1014 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1015 1016 // TODO: count things like dictionary, vtable, etc 1017 _bytes[RW][OtherType] += mc_all; 1018 rw_all += mc_all; // mc is mapped Read/Write 1019 1020 // prevent divide-by-zero 1021 if (ro_all < 1) { 1022 ro_all = 1; 1023 } 1024 if (rw_all < 1) { 1025 rw_all = 1; 1026 } 1027 1028 int all_ro_count = 0; 1029 int all_ro_bytes = 0; 1030 int all_rw_count = 0; 1031 int all_rw_bytes = 0; 1032 1033 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1034 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1035 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1036 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1037 1038 LogMessage(cds) msg; 1039 1040 msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):"); 1041 msg.debug("%s", hdr); 1042 msg.debug("%s", sep); 1043 for (int type = 0; type < int(_number_of_types); type ++) { 1044 const char *name = type_name((Type)type); 1045 int ro_count = _counts[RO][type]; 1046 int ro_bytes = _bytes [RO][type]; 1047 int rw_count = _counts[RW][type]; 1048 int rw_bytes = _bytes [RW][type]; 1049 int count = ro_count + rw_count; 1050 int bytes = ro_bytes + rw_bytes; 1051 1052 double ro_perc = percent_of(ro_bytes, ro_all); 1053 double rw_perc = percent_of(rw_bytes, rw_all); 1054 double perc = percent_of(bytes, ro_all + rw_all); 1055 1056 msg.debug(fmt_stats, name, 1057 ro_count, ro_bytes, ro_perc, 1058 rw_count, rw_bytes, rw_perc, 1059 count, bytes, perc); 1060 1061 all_ro_count += ro_count; 1062 all_ro_bytes += ro_bytes; 1063 all_rw_count += rw_count; 1064 all_rw_bytes += rw_bytes; 1065 } 1066 1067 int all_count = all_ro_count + all_rw_count; 1068 int all_bytes = all_ro_bytes + all_rw_bytes; 1069 1070 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1071 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1072 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1073 1074 msg.debug("%s", sep); 1075 msg.debug(fmt_stats, "Total", 1076 all_ro_count, all_ro_bytes, all_ro_perc, 1077 all_rw_count, all_rw_bytes, all_rw_perc, 1078 all_count, all_bytes, all_perc); 1079 1080 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1081 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1082 1083 #undef fmt_stats 1084 } 1085 1086 // Populate the shared space. 1087 1088 class VM_PopulateDumpSharedSpace: public VM_Operation { 1089 private: 1090 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1091 GrowableArray<MemRegion> *_open_archive_heap_regions; 1092 1093 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1094 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1095 1096 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1097 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1098 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1099 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1100 void dump_symbols(); 1101 char* dump_read_only_tables(); 1102 void print_class_stats(); 1103 void print_region_stats(); 1104 void print_bitmap_region_stats(size_t size, size_t total_size); 1105 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1106 const char *name, size_t total_size); 1107 void relocate_to_default_base_address(CHeapBitMap* ptrmap); 1108 1109 public: 1110 1111 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1112 void doit(); // outline because gdb sucks 1113 bool allow_nested_vm_operations() const { return true; } 1114 }; // class VM_PopulateDumpSharedSpace 1115 1116 class SortedSymbolClosure: public SymbolClosure { 1117 GrowableArray<Symbol*> _symbols; 1118 virtual void do_symbol(Symbol** sym) { 1119 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1120 _symbols.append(*sym); 1121 } 1122 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1123 if (a[0] < b[0]) { 1124 return -1; 1125 } else if (a[0] == b[0]) { 1126 return 0; 1127 } else { 1128 return 1; 1129 } 1130 } 1131 1132 public: 1133 SortedSymbolClosure() { 1134 SymbolTable::symbols_do(this); 1135 _symbols.sort(compare_symbols_by_address); 1136 } 1137 GrowableArray<Symbol*>* get_sorted_symbols() { 1138 return &_symbols; 1139 } 1140 }; 1141 1142 // ArchiveCompactor -- 1143 // 1144 // This class is the central piece of shared archive compaction -- all metaspace data are 1145 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1146 // metaspace data into their final location in the shared regions. 1147 1148 class ArchiveCompactor : AllStatic { 1149 static const int INITIAL_TABLE_SIZE = 8087; 1150 static const int MAX_TABLE_SIZE = 1000000; 1151 1152 static DumpAllocStats* _alloc_stats; 1153 static SortedSymbolClosure* _ssc; 1154 1155 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1156 static RelocationTable* _new_loc_table; 1157 1158 public: 1159 static void initialize() { 1160 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1161 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1162 } 1163 static DumpAllocStats* alloc_stats() { 1164 return _alloc_stats; 1165 } 1166 1167 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1168 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1169 // that are allocated in the RO space. 1170 class OtherROAllocMark { 1171 char* _oldtop; 1172 public: 1173 OtherROAllocMark() { 1174 _oldtop = _ro_region.top(); 1175 } 1176 ~OtherROAllocMark() { 1177 char* newtop = _ro_region.top(); 1178 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1179 } 1180 }; 1181 1182 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1183 address obj = ref->obj(); 1184 int bytes = ref->size() * BytesPerWord; 1185 char* p; 1186 size_t alignment = BytesPerWord; 1187 char* oldtop; 1188 char* newtop; 1189 1190 if (read_only) { 1191 oldtop = _ro_region.top(); 1192 p = _ro_region.allocate(bytes, alignment); 1193 newtop = _ro_region.top(); 1194 } else { 1195 oldtop = _rw_region.top(); 1196 if (ref->msotype() == MetaspaceObj::ClassType) { 1197 // Save a pointer immediate in front of an InstanceKlass, so 1198 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1199 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1200 // in systemDictionaryShared.cpp. 1201 Klass* klass = (Klass*)obj; 1202 if (klass->is_instance_klass()) { 1203 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1204 _rw_region.allocate(sizeof(address), BytesPerWord); 1205 } 1206 } 1207 p = _rw_region.allocate(bytes, alignment); 1208 newtop = _rw_region.top(); 1209 } 1210 memcpy(p, obj, bytes); 1211 1212 intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), (address)p); 1213 if (cloned_vtable != NULL) { 1214 *(address*)p = (address)cloned_vtable; 1215 ArchivePtrMarker::mark_pointer((address*)p); 1216 } 1217 1218 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1219 _new_loc_table->add(obj, (address)p); 1220 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1221 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1222 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1223 } 1224 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1225 } 1226 1227 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1228 address* pp = _new_loc_table->lookup(ref->obj()); 1229 assert(pp != NULL, "must be"); 1230 return *pp; 1231 } 1232 1233 private: 1234 // Makes a shallow copy of visited MetaspaceObj's 1235 class ShallowCopier: public UniqueMetaspaceClosure { 1236 bool _read_only; 1237 public: 1238 ShallowCopier(bool read_only) : _read_only(read_only) {} 1239 1240 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1241 if (read_only == _read_only) { 1242 allocate(ref, read_only); 1243 } 1244 return true; // recurse into ref.obj() 1245 } 1246 }; 1247 1248 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1249 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1250 public: 1251 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1252 address new_loc = get_new_loc(ref); 1253 RefRelocator refer; 1254 ref->metaspace_pointers_do_at(&refer, new_loc); 1255 return true; // recurse into ref.obj() 1256 } 1257 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 1258 assert(type == _method_entry_ref, "only special type allowed for now"); 1259 address obj = ref->obj(); 1260 address new_obj = get_new_loc(ref); 1261 size_t offset = pointer_delta(p, obj, sizeof(u1)); 1262 intptr_t* new_p = (intptr_t*)(new_obj + offset); 1263 assert(*p == *new_p, "must be a copy"); 1264 ArchivePtrMarker::mark_pointer((address*)new_p); 1265 } 1266 }; 1267 1268 // Relocate a reference to point to its shallow copy 1269 class RefRelocator: public MetaspaceClosure { 1270 public: 1271 virtual bool do_ref(Ref* ref, bool read_only) { 1272 if (ref->not_null()) { 1273 ref->update(get_new_loc(ref)); 1274 ArchivePtrMarker::mark_pointer(ref->addr()); 1275 } 1276 return false; // Do not recurse. 1277 } 1278 }; 1279 1280 #ifdef ASSERT 1281 class IsRefInArchiveChecker: public MetaspaceClosure { 1282 public: 1283 virtual bool do_ref(Ref* ref, bool read_only) { 1284 if (ref->not_null()) { 1285 char* obj = (char*)ref->obj(); 1286 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1287 "must be relocated to point to CDS archive"); 1288 } 1289 return false; // Do not recurse. 1290 } 1291 }; 1292 #endif 1293 1294 public: 1295 static void copy_and_compact() { 1296 ResourceMark rm; 1297 SortedSymbolClosure the_ssc; // StackObj 1298 _ssc = &the_ssc; 1299 1300 log_info(cds)("Scanning all metaspace objects ... "); 1301 { 1302 // allocate and shallow-copy RW objects, immediately following the MC region 1303 log_info(cds)("Allocating RW objects ... "); 1304 _mc_region.pack(&_rw_region); 1305 1306 ResourceMark rm; 1307 ShallowCopier rw_copier(false); 1308 iterate_roots(&rw_copier); 1309 } 1310 { 1311 // allocate and shallow-copy of RO object, immediately following the RW region 1312 log_info(cds)("Allocating RO objects ... "); 1313 _rw_region.pack(&_ro_region); 1314 1315 ResourceMark rm; 1316 ShallowCopier ro_copier(true); 1317 iterate_roots(&ro_copier); 1318 } 1319 { 1320 log_info(cds)("Relocating embedded pointers ... "); 1321 ResourceMark rm; 1322 ShallowCopyEmbeddedRefRelocator emb_reloc; 1323 iterate_roots(&emb_reloc); 1324 } 1325 { 1326 log_info(cds)("Relocating external roots ... "); 1327 ResourceMark rm; 1328 RefRelocator ext_reloc; 1329 iterate_roots(&ext_reloc); 1330 } 1331 1332 #ifdef ASSERT 1333 { 1334 log_info(cds)("Verifying external roots ... "); 1335 ResourceMark rm; 1336 IsRefInArchiveChecker checker; 1337 iterate_roots(&checker); 1338 } 1339 #endif 1340 1341 1342 // cleanup 1343 _ssc = NULL; 1344 } 1345 1346 // We must relocate the System::_well_known_klasses only after we have copied the 1347 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1348 // old objects which assert that their klass is the original klass. 1349 static void relocate_well_known_klasses() { 1350 { 1351 log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... "); 1352 ResourceMark rm; 1353 RefRelocator ext_reloc; 1354 SystemDictionary::well_known_klasses_do(&ext_reloc); 1355 } 1356 // NOTE: after this point, we shouldn't have any globals that can reach the old 1357 // objects. 1358 1359 // We cannot use any of the objects in the heap anymore (except for the 1360 // shared strings) because their headers no longer point to valid Klasses. 1361 } 1362 1363 static void iterate_roots(MetaspaceClosure* it) { 1364 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1365 for (int i=0; i<symbols->length(); i++) { 1366 it->push(symbols->adr_at(i)); 1367 } 1368 if (_global_klass_objects != NULL) { 1369 // Need to fix up the pointers 1370 for (int i = 0; i < _global_klass_objects->length(); i++) { 1371 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1372 it->push(_global_klass_objects->adr_at(i)); 1373 } 1374 } 1375 FileMapInfo::metaspace_pointers_do(it); 1376 SystemDictionaryShared::dumptime_classes_do(it); 1377 Universe::metaspace_pointers_do(it); 1378 SymbolTable::metaspace_pointers_do(it); 1379 vmSymbols::metaspace_pointers_do(it); 1380 1381 it->finish(); 1382 } 1383 1384 static Klass* get_relocated_klass(Klass* orig_klass) { 1385 assert(DumpSharedSpaces, "dump time only"); 1386 address* pp = _new_loc_table->lookup((address)orig_klass); 1387 assert(pp != NULL, "must be"); 1388 Klass* klass = (Klass*)(*pp); 1389 assert(klass->is_klass(), "must be"); 1390 return klass; 1391 } 1392 }; 1393 1394 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1395 SortedSymbolClosure* ArchiveCompactor::_ssc; 1396 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1397 1398 void VM_PopulateDumpSharedSpace::dump_symbols() { 1399 log_info(cds)("Dumping symbol table ..."); 1400 1401 NOT_PRODUCT(SymbolTable::verify()); 1402 SymbolTable::write_to_archive(); 1403 } 1404 1405 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1406 ArchiveCompactor::OtherROAllocMark mark; 1407 1408 log_info(cds)("Removing java_mirror ... "); 1409 if (!HeapShared::is_heap_object_archiving_allowed()) { 1410 clear_basic_type_mirrors(); 1411 } 1412 remove_java_mirror_in_classes(); 1413 log_info(cds)("done. "); 1414 1415 SystemDictionaryShared::write_to_archive(); 1416 1417 // Write the other data to the output array. 1418 char* start = _ro_region.top(); 1419 WriteClosure wc(&_ro_region); 1420 MetaspaceShared::serialize(&wc); 1421 1422 // Write the bitmaps for patching the archive heap regions 1423 dump_archive_heap_oopmaps(); 1424 1425 return start; 1426 } 1427 1428 void VM_PopulateDumpSharedSpace::print_class_stats() { 1429 log_info(cds)("Number of classes %d", _global_klass_objects->length()); 1430 { 1431 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1432 for (int i = 0; i < _global_klass_objects->length(); i++) { 1433 Klass* k = _global_klass_objects->at(i); 1434 if (k->is_instance_klass()) { 1435 num_inst ++; 1436 } else if (k->is_objArray_klass()) { 1437 num_obj_array ++; 1438 } else { 1439 assert(k->is_typeArray_klass(), "sanity"); 1440 num_type_array ++; 1441 } 1442 } 1443 log_info(cds)(" instance classes = %5d", num_inst); 1444 log_info(cds)(" obj array classes = %5d", num_obj_array); 1445 log_info(cds)(" type array classes = %5d", num_type_array); 1446 } 1447 } 1448 1449 void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) { 1450 intx addr_delta = MetaspaceShared::final_delta(); 1451 if (addr_delta == 0) { 1452 ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top()); 1453 } else { 1454 // We are not able to reserve space at Arguments::default_SharedBaseAddress() (due to ASLR). 1455 // This means that the current content of the archive is based on a random 1456 // address. Let's relocate all the pointers, so that it can be mapped to 1457 // Arguments::default_SharedBaseAddress() without runtime relocation. 1458 // 1459 // Note: both the base and dynamic archive are written with 1460 // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress() 1461 1462 // Patch all pointers that are marked by ptrmap within this region, 1463 // where we have just dumped all the metaspace data. 1464 address patch_base = (address)SharedBaseAddress; 1465 address patch_end = (address)_ro_region.top(); 1466 size_t size = patch_end - patch_base; 1467 1468 // the current value of the pointers to be patched must be within this 1469 // range (i.e., must point to valid metaspace objects) 1470 address valid_old_base = patch_base; 1471 address valid_old_end = patch_end; 1472 1473 // after patching, the pointers must point inside this range 1474 // (the requested location of the archive, as mapped at runtime). 1475 address valid_new_base = (address)Arguments::default_SharedBaseAddress(); 1476 address valid_new_end = valid_new_base + size; 1477 1478 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " 1479 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), 1480 p2i(valid_new_base), p2i(valid_new_end)); 1481 1482 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1483 valid_new_base, valid_new_end, addr_delta, ptrmap); 1484 ptrmap->iterate(&patcher); 1485 ArchivePtrMarker::compact(patcher.max_non_null_offset()); 1486 } 1487 } 1488 1489 void VM_PopulateDumpSharedSpace::doit() { 1490 CHeapBitMap ptrmap; 1491 MetaspaceShared::initialize_ptr_marker(&ptrmap); 1492 1493 // We should no longer allocate anything from the metaspace, so that: 1494 // 1495 // (1) Metaspace::allocate might trigger GC if we have run out of 1496 // committed metaspace, but we can't GC because we're running 1497 // in the VM thread. 1498 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1499 Metaspace::freeze(); 1500 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1501 1502 Thread* THREAD = VMThread::vm_thread(); 1503 1504 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1505 1506 NOT_PRODUCT(SystemDictionary::verify();) 1507 // The following guarantee is meant to ensure that no loader constraints 1508 // exist yet, since the constraints table is not shared. This becomes 1509 // more important now that we don't re-initialize vtables/itables for 1510 // shared classes at runtime, where constraints were previously created. 1511 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1512 "loader constraints are not saved"); 1513 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1514 "placeholders are not saved"); 1515 1516 // At this point, many classes have been loaded. 1517 // Gather systemDictionary classes in a global array and do everything to 1518 // that so we don't have to walk the SystemDictionary again. 1519 SystemDictionaryShared::check_excluded_classes(); 1520 _global_klass_objects = new GrowableArray<Klass*>(1000); 1521 CollectClassesClosure collect_classes; 1522 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1523 1524 print_class_stats(); 1525 1526 // Ensure the ConstMethods won't be modified at run-time 1527 log_info(cds)("Updating ConstMethods ... "); 1528 rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD); 1529 log_info(cds)("done. "); 1530 1531 // Remove all references outside the metadata 1532 log_info(cds)("Removing unshareable information ... "); 1533 remove_unshareable_in_classes(); 1534 log_info(cds)("done. "); 1535 1536 MetaspaceShared::allocate_cloned_cpp_vtptrs(); 1537 char* cloned_vtables = _mc_region.top(); 1538 MetaspaceShared::allocate_cpp_vtable_clones(); 1539 1540 ArchiveCompactor::initialize(); 1541 ArchiveCompactor::copy_and_compact(); 1542 1543 dump_symbols(); 1544 1545 // Dump supported java heap objects 1546 _closed_archive_heap_regions = NULL; 1547 _open_archive_heap_regions = NULL; 1548 dump_java_heap_objects(); 1549 1550 ArchiveCompactor::relocate_well_known_klasses(); 1551 1552 char* serialized_data = dump_read_only_tables(); 1553 _ro_region.pack(); 1554 1555 // The vtable clones contain addresses of the current process. 1556 // We don't want to write these addresses into the archive. 1557 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1558 1559 // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress() 1560 // without runtime relocation. 1561 relocate_to_default_base_address(&ptrmap); 1562 1563 // Create and write the archive file that maps the shared spaces. 1564 1565 FileMapInfo* mapinfo = new FileMapInfo(true); 1566 mapinfo->populate_header(os::vm_allocation_granularity()); 1567 mapinfo->set_serialized_data(serialized_data); 1568 mapinfo->set_cloned_vtables(cloned_vtables); 1569 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1570 MetaspaceShared::i2i_entry_code_buffers_size()); 1571 mapinfo->open_for_write(); 1572 MetaspaceShared::write_core_archive_regions(mapinfo); 1573 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1574 _closed_archive_heap_regions, 1575 _closed_archive_heap_oopmaps, 1576 MetaspaceShared::first_closed_archive_heap_region, 1577 MetaspaceShared::max_closed_archive_heap_region); 1578 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1579 _open_archive_heap_regions, 1580 _open_archive_heap_oopmaps, 1581 MetaspaceShared::first_open_archive_heap_region, 1582 MetaspaceShared::max_open_archive_heap_region); 1583 1584 mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress()); 1585 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1586 mapinfo->write_header(); 1587 mapinfo->close(); 1588 1589 print_region_stats(); 1590 1591 if (log_is_enabled(Info, cds)) { 1592 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1593 int(_mc_region.used())); 1594 } 1595 1596 if (PrintSystemDictionaryAtExit) { 1597 SystemDictionary::print(); 1598 } 1599 1600 if (AllowArchivingWithJavaAgent) { 1601 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1602 "for testing purposes only and should not be used in a production environment"); 1603 } 1604 1605 // There may be other pending VM operations that operate on the InstanceKlasses, 1606 // which will fail because InstanceKlasses::remove_unshareable_info() 1607 // has been called. Forget these operations and exit the VM directly. 1608 vm_direct_exit(0); 1609 } 1610 1611 void VM_PopulateDumpSharedSpace::print_region_stats() { 1612 // Print statistics of all the regions 1613 const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes(); 1614 const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment()); 1615 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1616 _mc_region.reserved() + 1617 bitmap_reserved + 1618 _total_closed_archive_region_size + 1619 _total_open_archive_region_size; 1620 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1621 _mc_region.used() + 1622 bitmap_used + 1623 _total_closed_archive_region_size + 1624 _total_open_archive_region_size; 1625 const double total_u_perc = percent_of(total_bytes, total_reserved); 1626 1627 _mc_region.print(total_reserved); 1628 _rw_region.print(total_reserved); 1629 _ro_region.print(total_reserved); 1630 print_bitmap_region_stats(bitmap_reserved, total_reserved); 1631 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1632 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1633 1634 log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1635 total_bytes, total_reserved, total_u_perc); 1636 } 1637 1638 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { 1639 log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1640 size, size/double(total_size)*100.0, size, p2i(NULL)); 1641 } 1642 1643 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1644 const char *name, size_t total_size) { 1645 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1646 for (int i = 0; i < arr_len; i++) { 1647 char* start = (char*)heap_mem->at(i).start(); 1648 size_t size = heap_mem->at(i).byte_size(); 1649 char* top = start + size; 1650 log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1651 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1652 1653 } 1654 } 1655 1656 void MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo) { 1657 // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with 1658 // MetaspaceShared::n_regions (internal to hotspot). 1659 assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); 1660 1661 // mc contains the trampoline code for method entries, which are patched at run time, 1662 // so it needs to be read/write. 1663 write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1664 write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1665 write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1666 mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap()); 1667 } 1668 1669 void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1670 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1671 } 1672 1673 // Update a Java object to point its Klass* to the new location after 1674 // shared archive has been compacted. 1675 void MetaspaceShared::relocate_klass_ptr(oop o) { 1676 assert(DumpSharedSpaces, "sanity"); 1677 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1678 o->set_klass(k); 1679 } 1680 1681 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { 1682 assert(DumpSharedSpaces, "sanity"); 1683 k = ArchiveCompactor::get_relocated_klass(k); 1684 if (is_final) { 1685 k = (Klass*)(address(k) + final_delta()); 1686 } 1687 return k; 1688 } 1689 1690 class LinkSharedClassesClosure : public KlassClosure { 1691 Thread* THREAD; 1692 bool _made_progress; 1693 public: 1694 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1695 1696 void reset() { _made_progress = false; } 1697 bool made_progress() const { return _made_progress; } 1698 1699 void do_klass(Klass* k) { 1700 if (k->is_instance_klass()) { 1701 InstanceKlass* ik = InstanceKlass::cast(k); 1702 // Link the class to cause the bytecodes to be rewritten and the 1703 // cpcache to be created. Class verification is done according 1704 // to -Xverify setting. 1705 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1706 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1707 1708 ik->constants()->resolve_class_constants(THREAD); 1709 } 1710 } 1711 }; 1712 1713 class CheckSharedClassesClosure : public KlassClosure { 1714 bool _made_progress; 1715 public: 1716 CheckSharedClassesClosure() : _made_progress(false) {} 1717 1718 void reset() { _made_progress = false; } 1719 bool made_progress() const { return _made_progress; } 1720 void do_klass(Klass* k) { 1721 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1722 _made_progress = true; 1723 } 1724 } 1725 }; 1726 1727 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1728 // We need to iterate because verification may cause additional classes 1729 // to be loaded. 1730 LinkSharedClassesClosure link_closure(THREAD); 1731 do { 1732 link_closure.reset(); 1733 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1734 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1735 } while (link_closure.made_progress()); 1736 1737 if (_has_error_classes) { 1738 // Mark all classes whose super class or interfaces failed verification. 1739 CheckSharedClassesClosure check_closure; 1740 do { 1741 // Not completely sure if we need to do this iteratively. Anyway, 1742 // we should come here only if there are unverifiable classes, which 1743 // shouldn't happen in normal cases. So better safe than sorry. 1744 check_closure.reset(); 1745 ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); 1746 } while (check_closure.made_progress()); 1747 } 1748 } 1749 1750 void MetaspaceShared::prepare_for_dumping() { 1751 Arguments::check_unsupported_dumping_properties(); 1752 ClassLoader::initialize_shared_path(); 1753 } 1754 1755 // Preload classes from a list, populate the shared spaces and dump to a 1756 // file. 1757 void MetaspaceShared::preload_and_dump(TRAPS) { 1758 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1759 ResourceMark rm(THREAD); 1760 char class_list_path_str[JVM_MAXPATHLEN]; 1761 // Preload classes to be shared. 1762 const char* class_list_path; 1763 if (SharedClassListFile == NULL) { 1764 // Construct the path to the class list (in jre/lib) 1765 // Walk up two directories from the location of the VM and 1766 // optionally tack on "lib" (depending on platform) 1767 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1768 for (int i = 0; i < 3; i++) { 1769 char *end = strrchr(class_list_path_str, *os::file_separator()); 1770 if (end != NULL) *end = '\0'; 1771 } 1772 int class_list_path_len = (int)strlen(class_list_path_str); 1773 if (class_list_path_len >= 3) { 1774 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1775 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1776 jio_snprintf(class_list_path_str + class_list_path_len, 1777 sizeof(class_list_path_str) - class_list_path_len, 1778 "%slib", os::file_separator()); 1779 class_list_path_len += 4; 1780 } 1781 } 1782 } 1783 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1784 jio_snprintf(class_list_path_str + class_list_path_len, 1785 sizeof(class_list_path_str) - class_list_path_len, 1786 "%sclasslist", os::file_separator()); 1787 } 1788 class_list_path = class_list_path_str; 1789 } else { 1790 class_list_path = SharedClassListFile; 1791 } 1792 1793 log_info(cds)("Loading classes to share ..."); 1794 _has_error_classes = false; 1795 int class_count = preload_classes(class_list_path, THREAD); 1796 if (ExtraSharedClassListFile) { 1797 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1798 } 1799 log_info(cds)("Loading classes to share: done."); 1800 1801 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1802 1803 if (SharedArchiveConfigFile) { 1804 log_info(cds)("Reading extra data from %s ...", SharedArchiveConfigFile); 1805 read_extra_data(SharedArchiveConfigFile, THREAD); 1806 } 1807 log_info(cds)("Reading extra data: done."); 1808 1809 HeapShared::init_subgraph_entry_fields(THREAD); 1810 1811 // Rewrite and link classes 1812 log_info(cds)("Rewriting and linking classes ..."); 1813 1814 // Link any classes which got missed. This would happen if we have loaded classes that 1815 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1816 // fails verification, all other interfaces that were not specified in the classlist but 1817 // are implemented by K are not verified. 1818 link_and_cleanup_shared_classes(CATCH); 1819 log_info(cds)("Rewriting and linking classes: done"); 1820 1821 if (HeapShared::is_heap_object_archiving_allowed()) { 1822 // Avoid fragmentation while archiving heap objects. 1823 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); 1824 Universe::heap()->collect(GCCause::_archive_time_gc); 1825 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); 1826 } 1827 1828 VM_PopulateDumpSharedSpace op; 1829 VMThread::execute(&op); 1830 } 1831 } 1832 1833 1834 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1835 ClassListParser parser(class_list_path); 1836 int class_count = 0; 1837 1838 while (parser.parse_one_line()) { 1839 Klass* klass = parser.load_current_class(THREAD); 1840 if (HAS_PENDING_EXCEPTION) { 1841 if (klass == NULL && 1842 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1843 // print a warning only when the pending exception is class not found 1844 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 1845 } 1846 CLEAR_PENDING_EXCEPTION; 1847 } 1848 if (klass != NULL) { 1849 if (log_is_enabled(Trace, cds)) { 1850 ResourceMark rm(THREAD); 1851 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1852 } 1853 1854 if (klass->is_instance_klass()) { 1855 InstanceKlass* ik = InstanceKlass::cast(klass); 1856 1857 // Link the class to cause the bytecodes to be rewritten and the 1858 // cpcache to be created. The linking is done as soon as classes 1859 // are loaded in order that the related data structures (klass and 1860 // cpCache) are located together. 1861 try_link_class(ik, THREAD); 1862 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1863 } 1864 1865 class_count++; 1866 } 1867 } 1868 1869 return class_count; 1870 } 1871 1872 // Returns true if the class's status has changed 1873 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1874 assert(DumpSharedSpaces, "should only be called during dumping"); 1875 if (ik->init_state() < InstanceKlass::linked) { 1876 bool saved = BytecodeVerificationLocal; 1877 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1878 // The verification decision is based on BytecodeVerificationRemote 1879 // for non-system classes. Since we are using the NULL classloader 1880 // to load non-system classes for customized class loaders during dumping, 1881 // we need to temporarily change BytecodeVerificationLocal to be the same as 1882 // BytecodeVerificationRemote. Note this can cause the parent system 1883 // classes also being verified. The extra overhead is acceptable during 1884 // dumping. 1885 BytecodeVerificationLocal = BytecodeVerificationRemote; 1886 } 1887 ik->link_class(THREAD); 1888 if (HAS_PENDING_EXCEPTION) { 1889 ResourceMark rm(THREAD); 1890 log_warning(cds)("Preload Warning: Verification failed for %s", 1891 ik->external_name()); 1892 CLEAR_PENDING_EXCEPTION; 1893 ik->set_in_error_state(); 1894 _has_error_classes = true; 1895 } 1896 BytecodeVerificationLocal = saved; 1897 return true; 1898 } else { 1899 return false; 1900 } 1901 } 1902 1903 #if INCLUDE_CDS_JAVA_HEAP 1904 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1905 // The closed and open archive heap space has maximum two regions. 1906 // See FileMapInfo::write_archive_heap_regions() for details. 1907 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1908 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1909 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 1910 _open_archive_heap_regions); 1911 ArchiveCompactor::OtherROAllocMark mark; 1912 HeapShared::write_subgraph_info_table(); 1913 } 1914 1915 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 1916 if (HeapShared::is_heap_object_archiving_allowed()) { 1917 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1918 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 1919 1920 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1921 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 1922 } 1923 } 1924 1925 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1926 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 1927 for (int i=0; i<regions->length(); i++) { 1928 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 1929 size_t size_in_bits = oopmap.size(); 1930 size_t size_in_bytes = oopmap.size_in_bytes(); 1931 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); 1932 oopmap.write_to(buffer, size_in_bytes); 1933 log_info(cds, heap)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 1934 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 1935 p2i(buffer), size_in_bytes, 1936 p2i(regions->at(i).start()), regions->at(i).byte_size()); 1937 1938 ArchiveHeapOopmapInfo info; 1939 info._oopmap = (address)buffer; 1940 info._oopmap_size_in_bits = size_in_bits; 1941 oopmaps->append(info); 1942 } 1943 } 1944 #endif // INCLUDE_CDS_JAVA_HEAP 1945 1946 void ReadClosure::do_ptr(void** p) { 1947 assert(*p == NULL, "initializing previous initialized pointer."); 1948 intptr_t obj = nextPtr(); 1949 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1950 "hit tag while initializing ptrs."); 1951 *p = (void*)obj; 1952 } 1953 1954 void ReadClosure::do_u4(u4* p) { 1955 intptr_t obj = nextPtr(); 1956 *p = (u4)(uintx(obj)); 1957 } 1958 1959 void ReadClosure::do_bool(bool* p) { 1960 intptr_t obj = nextPtr(); 1961 *p = (bool)(uintx(obj)); 1962 } 1963 1964 void ReadClosure::do_tag(int tag) { 1965 int old_tag; 1966 old_tag = (int)(intptr_t)nextPtr(); 1967 // do_int(&old_tag); 1968 assert(tag == old_tag, "old tag doesn't match"); 1969 FileMapInfo::assert_mark(tag == old_tag); 1970 } 1971 1972 void ReadClosure::do_oop(oop *p) { 1973 narrowOop o = (narrowOop)nextPtr(); 1974 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 1975 p = NULL; 1976 } else { 1977 assert(HeapShared::is_heap_object_archiving_allowed(), 1978 "Archived heap object is not allowed"); 1979 assert(HeapShared::open_archive_heap_region_mapped(), 1980 "Open archive heap region is not mapped"); 1981 *p = HeapShared::decode_from_archive(o); 1982 } 1983 } 1984 1985 void ReadClosure::do_region(u_char* start, size_t size) { 1986 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1987 assert(size % sizeof(intptr_t) == 0, "bad size"); 1988 do_tag((int)size); 1989 while (size > 0) { 1990 *(intptr_t*)start = nextPtr(); 1991 start += sizeof(intptr_t); 1992 size -= sizeof(intptr_t); 1993 } 1994 } 1995 1996 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { 1997 assert(base <= static_top && static_top <= top, "must be"); 1998 _shared_metaspace_static_top = static_top; 1999 MetaspaceObj::set_shared_metaspace_range(base, top); 2000 } 2001 2002 // Return true if given address is in the misc data region 2003 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2004 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2005 } 2006 2007 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2008 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2009 return true; 2010 } 2011 return false; 2012 } 2013 2014 bool MetaspaceShared::is_shared_dynamic(void* p) { 2015 if ((p < MetaspaceObj::shared_metaspace_top()) && 2016 (p >= _shared_metaspace_static_top)) { 2017 return true; 2018 } else { 2019 return false; 2020 } 2021 } 2022 2023 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 2024 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 2025 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2026 FileMapInfo* static_mapinfo = open_static_archive(); 2027 FileMapInfo* dynamic_mapinfo = NULL; 2028 2029 if (static_mapinfo != NULL) { 2030 dynamic_mapinfo = open_dynamic_archive(); 2031 2032 // First try to map at the requested address 2033 result = map_archives(static_mapinfo, dynamic_mapinfo, true); 2034 if (result == MAP_ARCHIVE_MMAP_FAILURE) { 2035 // Mapping has failed (probably due to ASLR). Let's map at an address chosen 2036 // by the OS. 2037 log_info(cds)("Try to map archive(s) at an alternative address"); 2038 result = map_archives(static_mapinfo, dynamic_mapinfo, false); 2039 } 2040 } 2041 2042 if (result == MAP_ARCHIVE_SUCCESS) { 2043 bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); 2044 char* cds_base = static_mapinfo->mapped_base(); 2045 char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); 2046 set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); 2047 _relocation_delta = static_mapinfo->relocation_delta(); 2048 if (dynamic_mapped) { 2049 FileMapInfo::set_shared_path_table(dynamic_mapinfo); 2050 } else { 2051 FileMapInfo::set_shared_path_table(static_mapinfo); 2052 } 2053 } else { 2054 set_shared_metaspace_range(NULL, NULL, NULL); 2055 UseSharedSpaces = false; 2056 FileMapInfo::fail_continue("Unable to map shared spaces"); 2057 if (PrintSharedArchiveAndExit) { 2058 vm_exit_during_initialization("Unable to use shared archive."); 2059 } 2060 } 2061 2062 if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { 2063 delete static_mapinfo; 2064 } 2065 if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { 2066 delete dynamic_mapinfo; 2067 } 2068 } 2069 2070 FileMapInfo* MetaspaceShared::open_static_archive() { 2071 FileMapInfo* mapinfo = new FileMapInfo(true); 2072 if (!mapinfo->initialize()) { 2073 delete(mapinfo); 2074 return NULL; 2075 } 2076 return mapinfo; 2077 } 2078 2079 FileMapInfo* MetaspaceShared::open_dynamic_archive() { 2080 if (DynamicDumpSharedSpaces) { 2081 return NULL; 2082 } 2083 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 2084 return NULL; 2085 } 2086 2087 FileMapInfo* mapinfo = new FileMapInfo(false); 2088 if (!mapinfo->initialize()) { 2089 delete(mapinfo); 2090 return NULL; 2091 } 2092 return mapinfo; 2093 } 2094 2095 // use_requested_addr: 2096 // true = map at FileMapHeader::_requested_base_address 2097 // false = map at an alternative address picked by OS. 2098 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, 2099 bool use_requested_addr) { 2100 PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2101 // For product build only -- this is for benchmarking the cost of doing relocation. 2102 // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage. 2103 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2104 return MAP_ARCHIVE_MMAP_FAILURE; 2105 }); 2106 2107 if (ArchiveRelocationMode == 2 && !use_requested_addr) { 2108 log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); 2109 return MAP_ARCHIVE_MMAP_FAILURE; 2110 }; 2111 2112 if (dynamic_mapinfo != NULL) { 2113 // Ensure that the OS won't be able to allocate new memory spaces between the two 2114 // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). 2115 assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); 2116 } 2117 2118 ReservedSpace main_rs, archive_space_rs, class_space_rs; 2119 MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; 2120 char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, 2121 use_requested_addr, main_rs, archive_space_rs, 2122 class_space_rs); 2123 if (mapped_base_address == NULL) { 2124 result = MAP_ARCHIVE_MMAP_FAILURE; 2125 } else { 2126 log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2127 p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); 2128 log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", 2129 p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); 2130 MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); 2131 MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? 2132 map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; 2133 2134 DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { 2135 // This is for simulating mmap failures at the requested address. In debug builds, we do it 2136 // here (after all archives have possibly been mapped), so we can thoroughly test the code for 2137 // failure handling (releasing all allocated resource, etc). 2138 log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); 2139 if (static_result == MAP_ARCHIVE_SUCCESS) { 2140 static_result = MAP_ARCHIVE_MMAP_FAILURE; 2141 } 2142 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2143 dynamic_result = MAP_ARCHIVE_MMAP_FAILURE; 2144 } 2145 }); 2146 2147 if (static_result == MAP_ARCHIVE_SUCCESS) { 2148 if (dynamic_result == MAP_ARCHIVE_SUCCESS) { 2149 result = MAP_ARCHIVE_SUCCESS; 2150 } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { 2151 assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); 2152 // No need to retry mapping the dynamic archive again, as it will never succeed 2153 // (bad file, etc) -- just keep the base archive. 2154 log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", 2155 dynamic_mapinfo->full_path()); 2156 result = MAP_ARCHIVE_SUCCESS; 2157 // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no 2158 // easy API to do that right now. 2159 } else { 2160 result = MAP_ARCHIVE_MMAP_FAILURE; 2161 } 2162 } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { 2163 result = MAP_ARCHIVE_OTHER_FAILURE; 2164 } else { 2165 result = MAP_ARCHIVE_MMAP_FAILURE; 2166 } 2167 } 2168 2169 if (result == MAP_ARCHIVE_SUCCESS) { 2170 if (!main_rs.is_reserved() && class_space_rs.is_reserved()) { 2171 MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass); 2172 } 2173 SharedBaseAddress = (size_t)mapped_base_address; 2174 LP64_ONLY({ 2175 if (Metaspace::using_class_space()) { 2176 assert(class_space_rs.is_reserved(), "must be"); 2177 char* cds_base = static_mapinfo->mapped_base(); 2178 Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base); 2179 // map_heap_regions() compares the current narrow oop and klass encodings 2180 // with the archived ones, so it must be done after all encodings are determined. 2181 static_mapinfo->map_heap_regions(); 2182 CompressedKlassPointers::set_range(CompressedClassSpaceSize); 2183 } 2184 }); 2185 } else { 2186 unmap_archive(static_mapinfo); 2187 unmap_archive(dynamic_mapinfo); 2188 release_reserved_spaces(main_rs, archive_space_rs, class_space_rs); 2189 } 2190 2191 return result; 2192 } 2193 2194 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, 2195 FileMapInfo* dynamic_mapinfo, 2196 bool use_requested_addr, 2197 ReservedSpace& main_rs, 2198 ReservedSpace& archive_space_rs, 2199 ReservedSpace& class_space_rs) { 2200 const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space()); 2201 const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size()); 2202 2203 if (use_klass_space) { 2204 assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated"); 2205 } 2206 if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) { 2207 return NULL; 2208 } 2209 2210 // Size and requested location of the archive_space_rs (for both static and dynamic archives) 2211 size_t base_offset = static_mapinfo->mapping_base_offset(); 2212 size_t end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); 2213 assert(base_offset == 0, "must be"); 2214 assert(is_aligned(end_offset, os::vm_allocation_granularity()), "must be"); 2215 assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be"); 2216 2217 // In case reserved_space_alignment() != os::vm_allocation_granularity() 2218 assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be"); 2219 end_offset = align_up(end_offset, reserved_space_alignment()); 2220 2221 size_t archive_space_size = end_offset - base_offset; 2222 2223 // Special handling for Windows because it cannot mmap into a reserved space: 2224 // use_requested_addr: We just map each region individually, and give up if any one of them fails. 2225 // !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap). 2226 // We're going to patch all the pointers anyway so there's no benefit for mmap. 2227 2228 if (use_requested_addr) { 2229 char* archive_space_base = static_mapinfo->requested_base_address() + base_offset; 2230 char* archive_space_end = archive_space_base + archive_space_size; 2231 if (!MetaspaceShared::use_windows_memory_mapping()) { 2232 archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base); 2233 if (!archive_space_rs.is_reserved()) { 2234 return NULL; 2235 } 2236 } 2237 if (use_klass_space) { 2238 // Make sure we can map the klass space immediately following the archive_space space 2239 // Don't call reserve_shared_space here as that may try to enforce platform-specific 2240 // alignment rules which only apply to the archive base address 2241 char* class_space_base = archive_space_end; 2242 class_space_rs = ReservedSpace(class_space_size, reserved_space_alignment(), 2243 false /* large_pages */, class_space_base); 2244 if (!class_space_rs.is_reserved()) { 2245 return NULL; 2246 } 2247 } 2248 return static_mapinfo->requested_base_address(); 2249 } else { 2250 if (use_klass_space) { 2251 main_rs = reserve_shared_space(archive_space_size + class_space_size); 2252 if (main_rs.is_reserved()) { 2253 archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true); 2254 class_space_rs = main_rs.last_part(archive_space_size); 2255 } 2256 } else { 2257 main_rs = reserve_shared_space(archive_space_size); 2258 archive_space_rs = main_rs; 2259 } 2260 if (archive_space_rs.is_reserved()) { 2261 return archive_space_rs.base(); 2262 } else { 2263 return NULL; 2264 } 2265 } 2266 } 2267 2268 void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs, 2269 ReservedSpace& archive_space_rs, 2270 ReservedSpace& class_space_rs) { 2271 if (main_rs.is_reserved()) { 2272 assert(main_rs.contains(archive_space_rs.base()), "must be"); 2273 assert(main_rs.contains(class_space_rs.base()), "must be"); 2274 log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base())); 2275 main_rs.release(); 2276 } else { 2277 if (archive_space_rs.is_reserved()) { 2278 log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); 2279 archive_space_rs.release(); 2280 } 2281 if (class_space_rs.is_reserved()) { 2282 log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); 2283 class_space_rs.release(); 2284 } 2285 } 2286 } 2287 2288 static int archive_regions[] = {MetaspaceShared::mc, 2289 MetaspaceShared::rw, 2290 MetaspaceShared::ro}; 2291 static int archive_regions_count = 3; 2292 2293 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { 2294 assert(UseSharedSpaces, "must be runtime"); 2295 if (mapinfo == NULL) { 2296 return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded. 2297 } 2298 2299 mapinfo->set_is_mapped(false); 2300 2301 if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { 2302 log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT 2303 " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); 2304 return MAP_ARCHIVE_OTHER_FAILURE; 2305 } 2306 2307 MapArchiveResult result = 2308 mapinfo->map_regions(archive_regions, archive_regions_count, mapped_base_address, rs); 2309 2310 if (result != MAP_ARCHIVE_SUCCESS) { 2311 unmap_archive(mapinfo); 2312 return result; 2313 } 2314 2315 if (mapinfo->is_static()) { 2316 if (!mapinfo->validate_shared_path_table()) { 2317 unmap_archive(mapinfo); 2318 return MAP_ARCHIVE_OTHER_FAILURE; 2319 } 2320 } else { 2321 if (!DynamicArchive::validate(mapinfo)) { 2322 unmap_archive(mapinfo); 2323 return MAP_ARCHIVE_OTHER_FAILURE; 2324 } 2325 } 2326 2327 mapinfo->set_is_mapped(true); 2328 return MAP_ARCHIVE_SUCCESS; 2329 } 2330 2331 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { 2332 assert(UseSharedSpaces, "must be runtime"); 2333 if (mapinfo != NULL) { 2334 mapinfo->unmap_regions(archive_regions, archive_regions_count); 2335 mapinfo->set_is_mapped(false); 2336 } 2337 } 2338 2339 // Read the miscellaneous data from the shared file, and 2340 // serialize it out to its various destinations. 2341 2342 void MetaspaceShared::initialize_shared_spaces() { 2343 FileMapInfo *static_mapinfo = FileMapInfo::current_info(); 2344 _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); 2345 _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); 2346 char* buffer = static_mapinfo->cloned_vtables(); 2347 clone_cpp_vtables((intptr_t*)buffer); 2348 2349 // Verify various attributes of the archive, plus initialize the 2350 // shared string/symbol tables 2351 buffer = static_mapinfo->serialized_data(); 2352 intptr_t* array = (intptr_t*)buffer; 2353 ReadClosure rc(&array); 2354 serialize(&rc); 2355 2356 // Initialize the run-time symbol table. 2357 SymbolTable::create_table(); 2358 2359 static_mapinfo->patch_archived_heap_embedded_pointers(); 2360 2361 // Close the mapinfo file 2362 static_mapinfo->close(); 2363 2364 FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); 2365 if (dynamic_mapinfo != NULL) { 2366 intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data(); 2367 ReadClosure rc(&buffer); 2368 SymbolTable::serialize_shared_table_header(&rc, false); 2369 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); 2370 dynamic_mapinfo->close(); 2371 } 2372 2373 if (PrintSharedArchiveAndExit) { 2374 if (PrintSharedDictionary) { 2375 tty->print_cr("\nShared classes:\n"); 2376 SystemDictionaryShared::print_on(tty); 2377 } 2378 if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { 2379 tty->print_cr("archive is invalid"); 2380 vm_exit(1); 2381 } else { 2382 tty->print_cr("archive is valid"); 2383 vm_exit(0); 2384 } 2385 } 2386 } 2387 2388 // JVM/TI RedefineClasses() support: 2389 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2390 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2391 2392 if (UseSharedSpaces) { 2393 // remap the shared readonly space to shared readwrite, private 2394 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2395 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2396 return false; 2397 } 2398 if (FileMapInfo::dynamic_info() != NULL) { 2399 mapinfo = FileMapInfo::dynamic_info(); 2400 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2401 return false; 2402 } 2403 } 2404 _remapped_readwrite = true; 2405 } 2406 return true; 2407 } 2408 2409 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2410 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2411 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2412 // or so. 2413 _mc_region.print_out_of_space_msg(name, needed_bytes); 2414 _rw_region.print_out_of_space_msg(name, needed_bytes); 2415 _ro_region.print_out_of_space_msg(name, needed_bytes); 2416 2417 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2418 "Please reduce the number of shared classes."); 2419 } 2420 2421 // This is used to relocate the pointers so that the archive can be mapped at 2422 // Arguments::default_SharedBaseAddress() without runtime relocation. 2423 intx MetaspaceShared::final_delta() { 2424 return intx(Arguments::default_SharedBaseAddress()) // We want the archive to be mapped to here at runtime 2425 - intx(SharedBaseAddress); // .. but the archive is mapped at here at dump time 2426 }