1 /* 2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "gc/shared/softRefPolicy.hpp" 40 #include "interpreter/bytecodeStream.hpp" 41 #include "interpreter/bytecodes.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logMessage.hpp" 44 #include "memory/filemap.hpp" 45 #include "memory/heapShared.inline.hpp" 46 #include "memory/metaspace.hpp" 47 #include "memory/metaspaceClosure.hpp" 48 #include "memory/metaspaceShared.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/universe.hpp" 51 #include "memory/dynamicArchive.hpp" 52 #include "oops/compressedOops.inline.hpp" 53 #include "oops/instanceClassLoaderKlass.hpp" 54 #include "oops/instanceMirrorKlass.hpp" 55 #include "oops/instanceRefKlass.hpp" 56 #include "oops/methodData.hpp" 57 #include "oops/objArrayKlass.hpp" 58 #include "oops/objArrayOop.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "oops/typeArrayKlass.hpp" 61 #include "prims/jvmtiRedefineClasses.hpp" 62 #include "runtime/handles.inline.hpp" 63 #include "runtime/os.hpp" 64 #include "runtime/safepointVerifiers.hpp" 65 #include "runtime/signature.hpp" 66 #include "runtime/timerTrace.hpp" 67 #include "runtime/vmThread.hpp" 68 #include "runtime/vmOperations.hpp" 69 #include "utilities/align.hpp" 70 #include "utilities/bitMap.hpp" 71 #include "utilities/defaultStream.hpp" 72 #include "utilities/hashtable.inline.hpp" 73 #if INCLUDE_G1GC 74 #include "gc/g1/g1CollectedHeap.hpp" 75 #endif 76 77 ReservedSpace MetaspaceShared::_shared_rs; 78 VirtualSpace MetaspaceShared::_shared_vs; 79 MetaspaceSharedStats MetaspaceShared::_stats; 80 bool MetaspaceShared::_has_error_classes; 81 bool MetaspaceShared::_archive_loading_failed = false; 82 bool MetaspaceShared::_remapped_readwrite = false; 83 address MetaspaceShared::_i2i_entry_code_buffers = NULL; 84 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; 85 size_t MetaspaceShared::_core_spaces_size = 0; 86 void* MetaspaceShared::_shared_metaspace_static_top = NULL; 87 88 // The CDS archive is divided into the following regions: 89 // mc - misc code (the method entry trampolines) 90 // rw - read-write metadata 91 // ro - read-only metadata and read-only tables 92 // md - misc data (the c++ vtables) 93 // 94 // ca0 - closed archive heap space #0 95 // ca1 - closed archive heap space #1 (may be empty) 96 // oa0 - open archive heap space #0 97 // oa1 - open archive heap space #1 (may be empty) 98 // 99 // The mc, rw, ro, and md regions are linearly allocated, starting from 100 // SharedBaseAddress, in the order of mc->rw->ro->md. The size of these 4 regions 101 // are page-aligned, and there's no gap between any consecutive regions. 102 // 103 // These 4 regions are populated in the following steps: 104 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 105 // temporarily allocated outside of the shared regions. Only the method entry 106 // trampolines are written into the mc region. 107 // [2] ArchiveCompactor copies RW metadata into the rw region. 108 // [3] ArchiveCompactor copies RO metadata into the ro region. 109 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 110 // are copied into the ro region as read-only tables. 111 // [5] C++ vtables are copied into the md region. 112 // 113 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 114 // Their layout is independent of the other 4 regions. 115 116 char* DumpRegion::expand_top_to(char* newtop) { 117 assert(is_allocatable(), "must be initialized and not packed"); 118 assert(newtop >= _top, "must not grow backwards"); 119 if (newtop > _end) { 120 MetaspaceShared::report_out_of_space(_name, newtop - _top); 121 ShouldNotReachHere(); 122 } 123 uintx delta; 124 if (DynamicDumpSharedSpaces) { 125 delta = DynamicArchive::object_delta_uintx(newtop); 126 } else { 127 delta = MetaspaceShared::object_delta_uintx(newtop); 128 } 129 if (delta > MAX_SHARED_DELTA) { 130 // This is just a sanity check and should not appear in any real world usage. This 131 // happens only if you allocate more than 2GB of shared objects and would require 132 // millions of shared classes. 133 vm_exit_during_initialization("Out of memory in the CDS archive", 134 "Please reduce the number of shared classes."); 135 } 136 137 MetaspaceShared::commit_shared_space_to(newtop); 138 _top = newtop; 139 return _top; 140 } 141 142 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 143 char* p = (char*)align_up(_top, alignment); 144 char* newtop = p + align_up(num_bytes, alignment); 145 expand_top_to(newtop); 146 memset(p, 0, newtop - p); 147 return p; 148 } 149 150 void DumpRegion::print(size_t total_bytes) const { 151 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 152 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); 153 } 154 155 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 156 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 157 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 158 if (strcmp(_name, failing_region) == 0) { 159 tty->print_cr(" required = %d", int(needed_bytes)); 160 } else { 161 tty->cr(); 162 } 163 } 164 165 void DumpRegion::pack(DumpRegion* next) { 166 assert(!is_packed(), "sanity"); 167 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 168 _is_packed = true; 169 if (next != NULL) { 170 next->_base = next->_top = this->_end; 171 next->_end = MetaspaceShared::shared_rs()->end(); 172 } 173 } 174 175 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); 176 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 177 178 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { 179 // Start with 0 committed bytes. The memory will be committed as needed by 180 // MetaspaceShared::commit_shared_space_to(). 181 if (!_shared_vs.initialize(_shared_rs, 0)) { 182 vm_exit_during_initialization("Unable to allocate memory for shared space"); 183 } 184 first_space->init(&_shared_rs, (char*)first_space_bottom); 185 } 186 187 DumpRegion* MetaspaceShared::misc_code_dump_space() { 188 return &_mc_region; 189 } 190 191 DumpRegion* MetaspaceShared::read_write_dump_space() { 192 return &_rw_region; 193 } 194 195 DumpRegion* MetaspaceShared::read_only_dump_space() { 196 return &_ro_region; 197 } 198 199 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, 200 ReservedSpace* rs) { 201 current->pack(next); 202 } 203 204 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 205 return _mc_region.allocate(num_bytes); 206 } 207 208 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 209 return _ro_region.allocate(num_bytes); 210 } 211 212 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 213 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 214 215 // If using shared space, open the file that contains the shared space 216 // and map in the memory before initializing the rest of metaspace (so 217 // the addresses don't conflict) 218 FileMapInfo* mapinfo = new FileMapInfo(true); 219 220 // Open the shared archive file, read and validate the header. If 221 // initialization fails, shared spaces [UseSharedSpaces] are 222 // disabled and the file is closed. 223 // Map in spaces now also 224 if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) { 225 size_t cds_total = core_spaces_size(); 226 address cds_address = (address)mapinfo->region_addr(0); 227 char* cds_end = (char *)align_up(cds_address + cds_total, 228 Metaspace::reserve_alignment()); 229 230 // Mapping the dynamic archive before allocating the class space 231 cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end); 232 233 #ifdef _LP64 234 if (Metaspace::using_class_space()) { 235 // If UseCompressedClassPointers is set then allocate the metaspace area 236 // above the heap and above the CDS area (if it exists). 237 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 238 // map_heap_regions() compares the current narrow oop and klass encodings 239 // with the archived ones, so it must be done after all encodings are determined. 240 mapinfo->map_heap_regions(); 241 } 242 CompressedKlassPointers::set_range(CompressedClassSpaceSize); 243 #endif // _LP64 244 } else { 245 assert(!mapinfo->is_open() && !UseSharedSpaces, 246 "archive file not closed or shared spaces not disabled."); 247 } 248 } 249 250 char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces( 251 char* static_start, char* static_end) { 252 assert(UseSharedSpaces, "must be runtime"); 253 char* cds_end = static_end; 254 if (!DynamicDumpSharedSpaces) { 255 address dynamic_top = DynamicArchive::map(); 256 if (dynamic_top != NULL) { 257 assert(dynamic_top > (address)static_start, "Unexpected layout"); 258 MetaspaceObj::expand_shared_metaspace_range(dynamic_top); 259 cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment()); 260 } 261 } 262 return cds_end; 263 } 264 265 ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment, 266 bool large, char* requested_address) { 267 if (requested_address != NULL) { 268 _shared_rs = ReservedSpace(size, alignment, large, requested_address); 269 } else { 270 _shared_rs = ReservedSpace(size, alignment, large); 271 } 272 return &_shared_rs; 273 } 274 275 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 276 assert(DumpSharedSpaces, "should be called for dump time only"); 277 const size_t reserve_alignment = Metaspace::reserve_alignment(); 278 bool large_pages = false; // No large pages when dumping the CDS archive. 279 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 280 281 #ifdef _LP64 282 // On 64-bit VM, the heap and class space layout will be the same as if 283 // you're running in -Xshare:on mode: 284 // 285 // +-- SharedBaseAddress (default = 0x800000000) 286 // v 287 // +-..---------+---------+ ... +----+----+----+----+---------------+ 288 // | Heap | Archive | | MC | RW | RO | MD | class space | 289 // +-..---------+---------+ ... +----+----+----+----+---------------+ 290 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->| 291 // 292 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 293 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 294 #else 295 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 296 size_t cds_total = align_down(256*M, reserve_alignment); 297 #endif 298 299 // First try to reserve the space at the specified SharedBaseAddress. 300 //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); 301 reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base); 302 if (_shared_rs.is_reserved()) { 303 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 304 } else { 305 // Get a mmap region anywhere if the SharedBaseAddress fails. 306 //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); 307 reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL); 308 } 309 if (!_shared_rs.is_reserved()) { 310 vm_exit_during_initialization("Unable to reserve memory for shared space", 311 err_msg(SIZE_FORMAT " bytes.", cds_total)); 312 } 313 314 #ifdef _LP64 315 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 316 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 317 // will store Klasses into this space. 318 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 319 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 320 // then the RO parts. 321 322 assert(UseCompressedOops && UseCompressedClassPointers, 323 "UseCompressedOops and UseCompressedClassPointers must be set"); 324 325 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 326 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 327 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 328 _shared_rs = _shared_rs.first_part(max_archive_size); 329 330 // Set up compress class pointers. 331 CompressedKlassPointers::set_base((address)_shared_rs.base()); 332 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 333 // with AOT. 334 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes); 335 // Set the range of klass addresses to 4GB. 336 CompressedKlassPointers::set_range(cds_total); 337 338 Metaspace::initialize_class_space(tmp_class_space); 339 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 340 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); 341 342 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 343 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 344 #endif 345 346 init_shared_dump_space(&_mc_region); 347 SharedBaseAddress = (size_t)_shared_rs.base(); 348 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 349 _shared_rs.size(), p2i(_shared_rs.base())); 350 } 351 352 // Called by universe_post_init() 353 void MetaspaceShared::post_initialize(TRAPS) { 354 if (UseSharedSpaces) { 355 int size = FileMapInfo::get_number_of_shared_paths(); 356 if (size > 0) { 357 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 358 if (!DynamicDumpSharedSpaces) { 359 FileMapInfo* info; 360 if (FileMapInfo::dynamic_info() == NULL) { 361 info = FileMapInfo::current_info(); 362 } else { 363 info = FileMapInfo::dynamic_info(); 364 } 365 ClassLoaderExt::init_paths_start_index(info->app_class_paths_start_index()); 366 ClassLoaderExt::init_app_module_paths_start_index(info->app_module_paths_start_index()); 367 } 368 } 369 } 370 } 371 372 static GrowableArray<Handle>* _extra_interned_strings = NULL; 373 374 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 375 _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); 376 377 HashtableTextDump reader(filename); 378 reader.check_version("VERSION: 1.0"); 379 380 while (reader.remain() > 0) { 381 int utf8_length; 382 int prefix_type = reader.scan_prefix(&utf8_length); 383 ResourceMark rm(THREAD); 384 if (utf8_length == 0x7fffffff) { 385 // buf_len will overflown 32-bit value. 386 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 387 } 388 int buf_len = utf8_length+1; 389 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 390 reader.get_utf8(utf8_buffer, utf8_length); 391 utf8_buffer[utf8_length] = '\0'; 392 393 if (prefix_type == HashtableTextDump::SymbolPrefix) { 394 SymbolTable::new_permanent_symbol(utf8_buffer); 395 } else{ 396 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 397 oop s = StringTable::intern(utf8_buffer, THREAD); 398 399 if (HAS_PENDING_EXCEPTION) { 400 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 401 reader.last_line_no(), utf8_length); 402 CLEAR_PENDING_EXCEPTION; 403 } else { 404 #if INCLUDE_G1GC 405 if (UseG1GC) { 406 typeArrayOop body = java_lang_String::value(s); 407 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 408 if (hr->is_humongous()) { 409 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 410 // to maximize free heap space and minimize fragmentation. 411 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 412 reader.last_line_no(), utf8_length); 413 continue; 414 } 415 } 416 #endif 417 // Interned strings are GC'ed if there are no references to it, so let's 418 // add a reference to keep this string alive. 419 assert(s != NULL, "must succeed"); 420 Handle h(THREAD, s); 421 _extra_interned_strings->append(h); 422 } 423 } 424 } 425 } 426 427 void MetaspaceShared::commit_shared_space_to(char* newtop) { 428 assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only"); 429 char* base = _shared_rs.base(); 430 size_t need_committed_size = newtop - base; 431 size_t has_committed_size = _shared_vs.committed_size(); 432 if (need_committed_size < has_committed_size) { 433 return; 434 } 435 436 size_t min_bytes = need_committed_size - has_committed_size; 437 size_t preferred_bytes = 1 * M; 438 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 439 440 size_t commit =MAX2(min_bytes, preferred_bytes); 441 commit = MIN2(commit, uncommitted); 442 assert(commit <= uncommitted, "sanity"); 443 444 bool result = _shared_vs.expand_by(commit, false); 445 if (!result) { 446 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 447 need_committed_size)); 448 } 449 450 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 451 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 452 } 453 454 // Read/write a data stream for restoring/preserving metadata pointers and 455 // miscellaneous data from/to the shared archive file. 456 457 void MetaspaceShared::serialize(SerializeClosure* soc) { 458 int tag = 0; 459 soc->do_tag(--tag); 460 461 // Verify the sizes of various metadata in the system. 462 soc->do_tag(sizeof(Method)); 463 soc->do_tag(sizeof(ConstMethod)); 464 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 465 soc->do_tag(sizeof(ConstantPool)); 466 soc->do_tag(sizeof(ConstantPoolCache)); 467 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 468 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 469 soc->do_tag(sizeof(Symbol)); 470 471 // Dump/restore miscellaneous metadata. 472 Universe::serialize(soc); 473 soc->do_tag(--tag); 474 475 // Dump/restore references to commonly used names and signatures. 476 vmSymbols::serialize(soc); 477 soc->do_tag(--tag); 478 479 // Dump/restore the symbol/string/subgraph_info tables 480 SymbolTable::serialize_shared_table_header(soc); 481 StringTable::serialize_shared_table_header(soc); 482 HeapShared::serialize_subgraph_info_table_header(soc); 483 SystemDictionaryShared::serialize_dictionary_headers(soc); 484 485 JavaClasses::serialize_offsets(soc); 486 InstanceMirrorKlass::serialize_offsets(soc); 487 soc->do_tag(--tag); 488 489 serialize_cloned_cpp_vtptrs(soc); 490 soc->do_tag(--tag); 491 492 soc->do_tag(666); 493 } 494 495 address MetaspaceShared::i2i_entry_code_buffers(size_t total_size) { 496 if (DumpSharedSpaces) { 497 if (_i2i_entry_code_buffers == NULL) { 498 _i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 499 _i2i_entry_code_buffers_size = total_size; 500 } 501 } else if (UseSharedSpaces) { 502 assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); 503 } else { 504 return NULL; 505 } 506 507 assert(_i2i_entry_code_buffers_size == total_size, "must not change"); 508 return _i2i_entry_code_buffers; 509 } 510 511 uintx MetaspaceShared::object_delta_uintx(void* obj) { 512 assert(DumpSharedSpaces || DynamicDumpSharedSpaces, 513 "supported only for dumping"); 514 if (DumpSharedSpaces) { 515 assert(shared_rs()->contains(obj), "must be"); 516 } else { 517 assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be"); 518 } 519 address base_address = address(SharedBaseAddress); 520 uintx deltax = address(obj) - base_address; 521 return deltax; 522 } 523 524 // Global object for holding classes that have been loaded. Since this 525 // is run at a safepoint just before exit, this is the entire set of classes. 526 static GrowableArray<Klass*>* _global_klass_objects; 527 528 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 529 return _global_klass_objects; 530 } 531 532 static void collect_array_classes(Klass* k) { 533 _global_klass_objects->append_if_missing(k); 534 if (k->is_array_klass()) { 535 // Add in the array classes too 536 ArrayKlass* ak = ArrayKlass::cast(k); 537 Klass* h = ak->higher_dimension(); 538 if (h != NULL) { 539 h->array_klasses_do(collect_array_classes); 540 } 541 } 542 } 543 544 class CollectClassesClosure : public KlassClosure { 545 void do_klass(Klass* k) { 546 if (k->is_instance_klass() && 547 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 548 // Don't add to the _global_klass_objects 549 } else { 550 _global_klass_objects->append_if_missing(k); 551 } 552 if (k->is_array_klass()) { 553 // Add in the array classes too 554 ArrayKlass* ak = ArrayKlass::cast(k); 555 Klass* h = ak->higher_dimension(); 556 if (h != NULL) { 557 h->array_klasses_do(collect_array_classes); 558 } 559 } 560 } 561 }; 562 563 static void remove_unshareable_in_classes() { 564 for (int i = 0; i < _global_klass_objects->length(); i++) { 565 Klass* k = _global_klass_objects->at(i); 566 if (!k->is_objArray_klass()) { 567 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 568 // on their array classes. 569 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 570 k->remove_unshareable_info(); 571 } 572 } 573 } 574 575 static void remove_java_mirror_in_classes() { 576 for (int i = 0; i < _global_klass_objects->length(); i++) { 577 Klass* k = _global_klass_objects->at(i); 578 if (!k->is_objArray_klass()) { 579 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 580 // on their array classes. 581 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 582 k->remove_java_mirror(); 583 } 584 } 585 } 586 587 static void clear_basic_type_mirrors() { 588 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 589 Universe::set_int_mirror(NULL); 590 Universe::set_float_mirror(NULL); 591 Universe::set_double_mirror(NULL); 592 Universe::set_byte_mirror(NULL); 593 Universe::set_bool_mirror(NULL); 594 Universe::set_char_mirror(NULL); 595 Universe::set_long_mirror(NULL); 596 Universe::set_short_mirror(NULL); 597 Universe::set_void_mirror(NULL); 598 } 599 600 static void rewrite_nofast_bytecode(Method* method) { 601 BytecodeStream bcs(method); 602 while (!bcs.is_last_bytecode()) { 603 Bytecodes::Code opcode = bcs.next(); 604 switch (opcode) { 605 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 606 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 607 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 608 case Bytecodes::_iload: { 609 if (!bcs.is_wide()) { 610 *bcs.bcp() = Bytecodes::_nofast_iload; 611 } 612 break; 613 } 614 default: break; 615 } 616 } 617 } 618 619 // Walk all methods in the class list to ensure that they won't be modified at 620 // run time. This includes: 621 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 622 // at run time by RewriteBytecodes/RewriteFrequentPairs 623 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 624 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 625 for (int i = 0; i < _global_klass_objects->length(); i++) { 626 Klass* k = _global_klass_objects->at(i); 627 if (k->is_instance_klass()) { 628 InstanceKlass* ik = InstanceKlass::cast(k); 629 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik); 630 } 631 } 632 } 633 634 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik) { 635 for (int i = 0; i < ik->methods()->length(); i++) { 636 Method* m = ik->methods()->at(i); 637 rewrite_nofast_bytecode(m); 638 Fingerprinter fp(m); 639 // The side effect of this call sets method's fingerprint field. 640 fp.fingerprint(); 641 } 642 } 643 644 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 645 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 646 // 647 // Addresses of the vtables and the methods may be different across JVM runs, 648 // if libjvm.so is dynamically loaded at a different base address. 649 // 650 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 651 // 652 // + at dump time: we redirect the _vptr to point to our own vtables inside 653 // the CDS image 654 // + at run time: we clone the actual contents of the vtables from libjvm.so 655 // into our own tables. 656 657 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 658 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 659 f(ConstantPool) \ 660 f(InstanceKlass) \ 661 f(InstanceClassLoaderKlass) \ 662 f(InstanceMirrorKlass) \ 663 f(InstanceRefKlass) \ 664 f(Method) \ 665 f(ObjArrayKlass) \ 666 f(TypeArrayKlass) 667 668 class CppVtableInfo { 669 intptr_t _vtable_size; 670 intptr_t _cloned_vtable[1]; 671 public: 672 static int num_slots(int vtable_size) { 673 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 674 } 675 int vtable_size() { return int(uintx(_vtable_size)); } 676 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 677 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 678 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 679 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 680 static size_t byte_size(int vtable_size) { 681 CppVtableInfo i; 682 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 683 } 684 }; 685 686 template <class T> class CppVtableCloner : public T { 687 static intptr_t* vtable_of(Metadata& m) { 688 return *((intptr_t**)&m); 689 } 690 static CppVtableInfo* _info; 691 692 static int get_vtable_length(const char* name); 693 694 public: 695 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 696 static intptr_t* allocate(const char* name); 697 698 // Clone the vtable to ... 699 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 700 701 static void zero_vtable_clone() { 702 assert(DumpSharedSpaces, "dump-time only"); 703 _info->zero(); 704 } 705 706 // Switch the vtable pointer to point to the cloned vtable. 707 static void patch(Metadata* obj) { 708 assert(DumpSharedSpaces, "dump-time only"); 709 *(void**)obj = (void*)(_info->cloned_vtable()); 710 } 711 712 static bool is_valid_shared_object(const T* obj) { 713 intptr_t* vptr = *(intptr_t**)obj; 714 return vptr == _info->cloned_vtable(); 715 } 716 }; 717 718 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 719 720 template <class T> 721 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 722 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 723 int n = get_vtable_length(name); 724 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 725 _info->set_vtable_size(n); 726 727 intptr_t* p = clone_vtable(name, _info); 728 assert((char*)p == _md_region.top(), "must be"); 729 730 return _info->cloned_vtable(); 731 } 732 733 template <class T> 734 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 735 if (!DumpSharedSpaces) { 736 assert(_info == 0, "_info is initialized only at dump time"); 737 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 738 } 739 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 740 int n = info->vtable_size(); 741 intptr_t* srcvtable = vtable_of(tmp); 742 intptr_t* dstvtable = info->cloned_vtable(); 743 744 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 745 // safe to do memcpy. 746 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 747 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 748 return dstvtable + n; 749 } 750 751 // To determine the size of the vtable for each type, we use the following 752 // trick by declaring 2 subclasses: 753 // 754 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 755 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 756 // 757 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 758 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 759 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 760 // - Their last entry is different. 761 // 762 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 763 // and find the first entry that's different. 764 // 765 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 766 // esoteric compilers. 767 768 template <class T> class CppVtableTesterB: public T { 769 public: 770 virtual int last_virtual_method() {return 1;} 771 }; 772 773 template <class T> class CppVtableTesterA : public T { 774 public: 775 virtual void* last_virtual_method() { 776 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 777 // compiler/linker won't alias the two functions. 778 return NULL; 779 } 780 }; 781 782 template <class T> 783 int CppVtableCloner<T>::get_vtable_length(const char* name) { 784 CppVtableTesterA<T> a; 785 CppVtableTesterB<T> b; 786 787 intptr_t* avtable = vtable_of(a); 788 intptr_t* bvtable = vtable_of(b); 789 790 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 791 int vtable_len = 1; 792 for (; ; vtable_len++) { 793 if (avtable[vtable_len] != bvtable[vtable_len]) { 794 break; 795 } 796 } 797 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 798 799 return vtable_len; 800 } 801 802 #define ALLOC_CPP_VTABLE_CLONE(c) \ 803 _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); 804 805 #define CLONE_CPP_VTABLE(c) \ 806 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 807 808 #define ZERO_CPP_VTABLE(c) \ 809 CppVtableCloner<c>::zero_vtable_clone(); 810 811 //------------------------------ for DynamicDumpSharedSpaces - start 812 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, 813 814 enum { 815 CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) 816 _num_cloned_vtable_kinds 817 }; 818 819 static intptr_t** _cloned_cpp_vtptrs = NULL; 820 821 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { 822 soc->do_ptr((void**)&_cloned_cpp_vtptrs); 823 } 824 825 intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) { 826 assert(DynamicDumpSharedSpaces, "must"); 827 int kind = -1; 828 switch (msotype) { 829 case MetaspaceObj::SymbolType: 830 case MetaspaceObj::TypeArrayU1Type: 831 case MetaspaceObj::TypeArrayU2Type: 832 case MetaspaceObj::TypeArrayU4Type: 833 case MetaspaceObj::TypeArrayU8Type: 834 case MetaspaceObj::TypeArrayOtherType: 835 case MetaspaceObj::ConstMethodType: 836 case MetaspaceObj::ConstantPoolCacheType: 837 case MetaspaceObj::AnnotationsType: 838 case MetaspaceObj::MethodCountersType: 839 // These have no vtables. 840 break; 841 case MetaspaceObj::ClassType: 842 { 843 Klass* k = (Klass*)obj; 844 assert(k->is_klass(), "must be"); 845 if (k->is_instance_klass()) { 846 kind = InstanceKlass_Kind; 847 } else { 848 assert(k->is_objArray_klass(), 849 "We shouldn't archive any other klasses in DynamicDumpSharedSpaces"); 850 kind = ObjArrayKlass_Kind; 851 } 852 } 853 break; 854 855 case MetaspaceObj::MethodType: 856 { 857 Method* m = (Method*)obj; 858 assert(m->is_method(), "must be"); 859 kind = Method_Kind; 860 } 861 break; 862 863 case MetaspaceObj::MethodDataType: 864 // We don't archive MethodData <-- should have been removed in removed_unsharable_info 865 ShouldNotReachHere(); 866 break; 867 868 case MetaspaceObj::ConstantPoolType: 869 { 870 ConstantPool *cp = (ConstantPool*)obj; 871 assert(cp->is_constantPool(), "must be"); 872 kind = ConstantPool_Kind; 873 } 874 break; 875 876 default: 877 ShouldNotReachHere(); 878 } 879 880 if (kind >= 0) { 881 assert(kind < _num_cloned_vtable_kinds, "must be"); 882 return _cloned_cpp_vtptrs[kind]; 883 } else { 884 return NULL; 885 } 886 } 887 888 //------------------------------ for DynamicDumpSharedSpaces - end 889 890 // This can be called at both dump time and run time. 891 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 892 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 893 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 894 return p; 895 } 896 897 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 898 assert(DumpSharedSpaces, "dump-time only"); 899 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 900 } 901 902 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 903 void MetaspaceShared::allocate_cpp_vtable_clones() { 904 assert(DumpSharedSpaces, "dump-time only"); 905 // Layout (each slot is a intptr_t): 906 // [number of slots in the first vtable = n1] 907 // [ <n1> slots for the first vtable] 908 // [number of slots in the first second = n2] 909 // [ <n2> slots for the second vtable] 910 // ... 911 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 912 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 913 } 914 915 // Switch the vtable pointer to point to the cloned vtable. We assume the 916 // vtable pointer is in first slot in object. 917 void MetaspaceShared::patch_cpp_vtable_pointers() { 918 int n = _global_klass_objects->length(); 919 for (int i = 0; i < n; i++) { 920 Klass* obj = _global_klass_objects->at(i); 921 if (obj->is_instance_klass()) { 922 InstanceKlass* ik = InstanceKlass::cast(obj); 923 if (ik->is_class_loader_instance_klass()) { 924 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 925 } else if (ik->is_reference_instance_klass()) { 926 CppVtableCloner<InstanceRefKlass>::patch(ik); 927 } else if (ik->is_mirror_instance_klass()) { 928 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 929 } else { 930 CppVtableCloner<InstanceKlass>::patch(ik); 931 } 932 ConstantPool* cp = ik->constants(); 933 CppVtableCloner<ConstantPool>::patch(cp); 934 for (int j = 0; j < ik->methods()->length(); j++) { 935 Method* m = ik->methods()->at(j); 936 CppVtableCloner<Method>::patch(m); 937 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 938 } 939 } else if (obj->is_objArray_klass()) { 940 CppVtableCloner<ObjArrayKlass>::patch(obj); 941 } else { 942 assert(obj->is_typeArray_klass(), "sanity"); 943 CppVtableCloner<TypeArrayKlass>::patch(obj); 944 } 945 } 946 } 947 948 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 949 assert(is_in_shared_metaspace(m), "must be"); 950 return CppVtableCloner<Method>::is_valid_shared_object(m); 951 } 952 953 void WriteClosure::do_oop(oop* o) { 954 if (*o == NULL) { 955 _dump_region->append_intptr_t(0); 956 } else { 957 assert(HeapShared::is_heap_object_archiving_allowed(), 958 "Archiving heap object is not allowed"); 959 _dump_region->append_intptr_t( 960 (intptr_t)CompressedOops::encode_not_null(*o)); 961 } 962 } 963 964 void WriteClosure::do_region(u_char* start, size_t size) { 965 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 966 assert(size % sizeof(intptr_t) == 0, "bad size"); 967 do_tag((int)size); 968 while (size > 0) { 969 _dump_region->append_intptr_t(*(intptr_t*)start); 970 start += sizeof(intptr_t); 971 size -= sizeof(intptr_t); 972 } 973 } 974 975 // This is for dumping detailed statistics for the allocations 976 // in the shared spaces. 977 class DumpAllocStats : public ResourceObj { 978 public: 979 980 // Here's poor man's enum inheritance 981 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 982 METASPACE_OBJ_TYPES_DO(f) \ 983 f(SymbolHashentry) \ 984 f(SymbolBucket) \ 985 f(StringHashentry) \ 986 f(StringBucket) \ 987 f(Other) 988 989 enum Type { 990 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 991 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 992 _number_of_types 993 }; 994 995 static const char * type_name(Type type) { 996 switch(type) { 997 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 998 default: 999 ShouldNotReachHere(); 1000 return NULL; 1001 } 1002 } 1003 1004 public: 1005 enum { RO = 0, RW = 1 }; 1006 1007 int _counts[2][_number_of_types]; 1008 int _bytes [2][_number_of_types]; 1009 1010 DumpAllocStats() { 1011 memset(_counts, 0, sizeof(_counts)); 1012 memset(_bytes, 0, sizeof(_bytes)); 1013 }; 1014 1015 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 1016 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 1017 int which = (read_only) ? RO : RW; 1018 _counts[which][type] ++; 1019 _bytes [which][type] += byte_size; 1020 } 1021 1022 void record_other_type(int byte_size, bool read_only) { 1023 int which = (read_only) ? RO : RW; 1024 _bytes [which][OtherType] += byte_size; 1025 } 1026 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 1027 }; 1028 1029 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 1030 // Calculate size of data that was not allocated by Metaspace::allocate() 1031 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 1032 1033 // symbols 1034 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 1035 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 1036 1037 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 1038 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 1039 1040 // strings 1041 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 1042 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 1043 1044 _counts[RO][StringBucketType] = stats->string.bucket_count; 1045 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 1046 1047 // TODO: count things like dictionary, vtable, etc 1048 _bytes[RW][OtherType] += mc_all + md_all; 1049 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 1050 1051 // prevent divide-by-zero 1052 if (ro_all < 1) { 1053 ro_all = 1; 1054 } 1055 if (rw_all < 1) { 1056 rw_all = 1; 1057 } 1058 1059 int all_ro_count = 0; 1060 int all_ro_bytes = 0; 1061 int all_rw_count = 0; 1062 int all_rw_bytes = 0; 1063 1064 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1065 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1066 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1067 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1068 1069 LogMessage(cds) msg; 1070 1071 msg.info("Detailed metadata info (excluding st regions; rw stats include md/mc regions):"); 1072 msg.info("%s", hdr); 1073 msg.info("%s", sep); 1074 for (int type = 0; type < int(_number_of_types); type ++) { 1075 const char *name = type_name((Type)type); 1076 int ro_count = _counts[RO][type]; 1077 int ro_bytes = _bytes [RO][type]; 1078 int rw_count = _counts[RW][type]; 1079 int rw_bytes = _bytes [RW][type]; 1080 int count = ro_count + rw_count; 1081 int bytes = ro_bytes + rw_bytes; 1082 1083 double ro_perc = percent_of(ro_bytes, ro_all); 1084 double rw_perc = percent_of(rw_bytes, rw_all); 1085 double perc = percent_of(bytes, ro_all + rw_all); 1086 1087 msg.info(fmt_stats, name, 1088 ro_count, ro_bytes, ro_perc, 1089 rw_count, rw_bytes, rw_perc, 1090 count, bytes, perc); 1091 1092 all_ro_count += ro_count; 1093 all_ro_bytes += ro_bytes; 1094 all_rw_count += rw_count; 1095 all_rw_bytes += rw_bytes; 1096 } 1097 1098 int all_count = all_ro_count + all_rw_count; 1099 int all_bytes = all_ro_bytes + all_rw_bytes; 1100 1101 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1102 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1103 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1104 1105 msg.info("%s", sep); 1106 msg.info(fmt_stats, "Total", 1107 all_ro_count, all_ro_bytes, all_ro_perc, 1108 all_rw_count, all_rw_bytes, all_rw_perc, 1109 all_count, all_bytes, all_perc); 1110 1111 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1112 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1113 1114 #undef fmt_stats 1115 } 1116 1117 // Populate the shared space. 1118 1119 class VM_PopulateDumpSharedSpace: public VM_Operation { 1120 private: 1121 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1122 GrowableArray<MemRegion> *_open_archive_heap_regions; 1123 1124 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1125 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1126 1127 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1128 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1129 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1130 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1131 void dump_symbols(); 1132 char* dump_read_only_tables(); 1133 void print_region_stats(); 1134 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1135 const char *name, const size_t total_size); 1136 public: 1137 1138 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1139 void doit(); // outline because gdb sucks 1140 static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { 1141 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1142 } 1143 bool allow_nested_vm_operations() const { return true; } 1144 }; // class VM_PopulateDumpSharedSpace 1145 1146 class SortedSymbolClosure: public SymbolClosure { 1147 GrowableArray<Symbol*> _symbols; 1148 virtual void do_symbol(Symbol** sym) { 1149 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1150 _symbols.append(*sym); 1151 } 1152 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1153 if (a[0] < b[0]) { 1154 return -1; 1155 } else if (a[0] == b[0]) { 1156 return 0; 1157 } else { 1158 return 1; 1159 } 1160 } 1161 1162 public: 1163 SortedSymbolClosure() { 1164 SymbolTable::symbols_do(this); 1165 _symbols.sort(compare_symbols_by_address); 1166 } 1167 GrowableArray<Symbol*>* get_sorted_symbols() { 1168 return &_symbols; 1169 } 1170 }; 1171 1172 // ArchiveCompactor -- 1173 // 1174 // This class is the central piece of shared archive compaction -- all metaspace data are 1175 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1176 // metaspace data into their final location in the shared regions. 1177 1178 class ArchiveCompactor : AllStatic { 1179 static const int INITIAL_TABLE_SIZE = 8087; 1180 static const int MAX_TABLE_SIZE = 1000000; 1181 1182 static DumpAllocStats* _alloc_stats; 1183 static SortedSymbolClosure* _ssc; 1184 1185 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1186 static RelocationTable* _new_loc_table; 1187 1188 public: 1189 static void initialize() { 1190 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1191 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1192 } 1193 static DumpAllocStats* alloc_stats() { 1194 return _alloc_stats; 1195 } 1196 1197 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1198 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1199 // that are allocated in the RO space. 1200 class OtherROAllocMark { 1201 char* _oldtop; 1202 public: 1203 OtherROAllocMark() { 1204 _oldtop = _ro_region.top(); 1205 } 1206 ~OtherROAllocMark() { 1207 char* newtop = _ro_region.top(); 1208 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1209 } 1210 }; 1211 1212 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1213 address obj = ref->obj(); 1214 int bytes = ref->size() * BytesPerWord; 1215 char* p; 1216 size_t alignment = BytesPerWord; 1217 char* oldtop; 1218 char* newtop; 1219 1220 if (read_only) { 1221 oldtop = _ro_region.top(); 1222 p = _ro_region.allocate(bytes, alignment); 1223 newtop = _ro_region.top(); 1224 } else { 1225 oldtop = _rw_region.top(); 1226 if (ref->msotype() == MetaspaceObj::ClassType) { 1227 // Save a pointer immediate in front of an InstanceKlass, so 1228 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1229 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1230 // in systemDictionaryShared.cpp. 1231 Klass* klass = (Klass*)obj; 1232 if (klass->is_instance_klass()) { 1233 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1234 _rw_region.allocate(sizeof(address), BytesPerWord); 1235 } 1236 } 1237 p = _rw_region.allocate(bytes, alignment); 1238 newtop = _rw_region.top(); 1239 } 1240 memcpy(p, obj, bytes); 1241 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1242 _new_loc_table->add(obj, (address)p); 1243 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1244 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1245 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1246 } 1247 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1248 } 1249 1250 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1251 address* pp = _new_loc_table->lookup(ref->obj()); 1252 assert(pp != NULL, "must be"); 1253 return *pp; 1254 } 1255 1256 private: 1257 // Makes a shallow copy of visited MetaspaceObj's 1258 class ShallowCopier: public UniqueMetaspaceClosure { 1259 bool _read_only; 1260 public: 1261 ShallowCopier(bool read_only) : _read_only(read_only) {} 1262 1263 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1264 if (read_only == _read_only) { 1265 allocate(ref, read_only); 1266 } 1267 return true; // recurse into ref.obj() 1268 } 1269 }; 1270 1271 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1272 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1273 public: 1274 virtual bool do_unique_ref(Ref* ref, bool read_only) { 1275 address new_loc = get_new_loc(ref); 1276 RefRelocator refer; 1277 ref->metaspace_pointers_do_at(&refer, new_loc); 1278 return true; // recurse into ref.obj() 1279 } 1280 }; 1281 1282 // Relocate a reference to point to its shallow copy 1283 class RefRelocator: public MetaspaceClosure { 1284 public: 1285 virtual bool do_ref(Ref* ref, bool read_only) { 1286 if (ref->not_null()) { 1287 ref->update(get_new_loc(ref)); 1288 } 1289 return false; // Do not recurse. 1290 } 1291 }; 1292 1293 #ifdef ASSERT 1294 class IsRefInArchiveChecker: public MetaspaceClosure { 1295 public: 1296 virtual bool do_ref(Ref* ref, bool read_only) { 1297 if (ref->not_null()) { 1298 char* obj = (char*)ref->obj(); 1299 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1300 "must be relocated to point to CDS archive"); 1301 } 1302 return false; // Do not recurse. 1303 } 1304 }; 1305 #endif 1306 1307 public: 1308 static void copy_and_compact() { 1309 ResourceMark rm; 1310 SortedSymbolClosure the_ssc; // StackObj 1311 _ssc = &the_ssc; 1312 1313 tty->print_cr("Scanning all metaspace objects ... "); 1314 { 1315 // allocate and shallow-copy RW objects, immediately following the MC region 1316 tty->print_cr("Allocating RW objects ... "); 1317 _mc_region.pack(&_rw_region); 1318 1319 ResourceMark rm; 1320 ShallowCopier rw_copier(false); 1321 iterate_roots(&rw_copier); 1322 } 1323 { 1324 // allocate and shallow-copy of RO object, immediately following the RW region 1325 tty->print_cr("Allocating RO objects ... "); 1326 _rw_region.pack(&_ro_region); 1327 1328 ResourceMark rm; 1329 ShallowCopier ro_copier(true); 1330 iterate_roots(&ro_copier); 1331 } 1332 { 1333 tty->print_cr("Relocating embedded pointers ... "); 1334 ResourceMark rm; 1335 ShallowCopyEmbeddedRefRelocator emb_reloc; 1336 iterate_roots(&emb_reloc); 1337 } 1338 { 1339 tty->print_cr("Relocating external roots ... "); 1340 ResourceMark rm; 1341 RefRelocator ext_reloc; 1342 iterate_roots(&ext_reloc); 1343 } 1344 1345 #ifdef ASSERT 1346 { 1347 tty->print_cr("Verifying external roots ... "); 1348 ResourceMark rm; 1349 IsRefInArchiveChecker checker; 1350 iterate_roots(&checker); 1351 } 1352 #endif 1353 1354 1355 // cleanup 1356 _ssc = NULL; 1357 } 1358 1359 // We must relocate the System::_well_known_klasses only after we have copied the 1360 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1361 // old objects which assert that their klass is the original klass. 1362 static void relocate_well_known_klasses() { 1363 { 1364 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1365 ResourceMark rm; 1366 RefRelocator ext_reloc; 1367 SystemDictionary::well_known_klasses_do(&ext_reloc); 1368 } 1369 // NOTE: after this point, we shouldn't have any globals that can reach the old 1370 // objects. 1371 1372 // We cannot use any of the objects in the heap anymore (except for the 1373 // shared strings) because their headers no longer point to valid Klasses. 1374 } 1375 1376 static void iterate_roots(MetaspaceClosure* it) { 1377 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1378 for (int i=0; i<symbols->length(); i++) { 1379 it->push(symbols->adr_at(i)); 1380 } 1381 if (_global_klass_objects != NULL) { 1382 // Need to fix up the pointers 1383 for (int i = 0; i < _global_klass_objects->length(); i++) { 1384 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1385 it->push(_global_klass_objects->adr_at(i)); 1386 } 1387 } 1388 FileMapInfo::metaspace_pointers_do(it); 1389 SystemDictionaryShared::dumptime_classes_do(it); 1390 Universe::metaspace_pointers_do(it); 1391 SymbolTable::metaspace_pointers_do(it); 1392 vmSymbols::metaspace_pointers_do(it); 1393 1394 it->finish(); 1395 } 1396 1397 static Klass* get_relocated_klass(Klass* orig_klass) { 1398 assert(DumpSharedSpaces, "dump time only"); 1399 address* pp = _new_loc_table->lookup((address)orig_klass); 1400 assert(pp != NULL, "must be"); 1401 Klass* klass = (Klass*)(*pp); 1402 assert(klass->is_klass(), "must be"); 1403 return klass; 1404 } 1405 }; 1406 1407 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1408 SortedSymbolClosure* ArchiveCompactor::_ssc; 1409 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1410 1411 void VM_PopulateDumpSharedSpace::dump_symbols() { 1412 tty->print_cr("Dumping symbol table ..."); 1413 1414 NOT_PRODUCT(SymbolTable::verify()); 1415 SymbolTable::write_to_archive(); 1416 } 1417 1418 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1419 ArchiveCompactor::OtherROAllocMark mark; 1420 1421 tty->print("Removing java_mirror ... "); 1422 if (!HeapShared::is_heap_object_archiving_allowed()) { 1423 clear_basic_type_mirrors(); 1424 } 1425 remove_java_mirror_in_classes(); 1426 tty->print_cr("done. "); 1427 1428 SystemDictionaryShared::write_to_archive(); 1429 1430 size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); 1431 _cloned_cpp_vtptrs = (intptr_t**)_ro_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); 1432 1433 // Write the other data to the output array. 1434 char* start = _ro_region.top(); 1435 WriteClosure wc(&_ro_region); 1436 MetaspaceShared::serialize(&wc); 1437 1438 // Write the bitmaps for patching the archive heap regions 1439 dump_archive_heap_oopmaps(); 1440 1441 return start; 1442 } 1443 1444 void VM_PopulateDumpSharedSpace::doit() { 1445 // We should no longer allocate anything from the metaspace, so that: 1446 // 1447 // (1) Metaspace::allocate might trigger GC if we have run out of 1448 // committed metaspace, but we can't GC because we're running 1449 // in the VM thread. 1450 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1451 Metaspace::freeze(); 1452 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 1453 1454 Thread* THREAD = VMThread::vm_thread(); 1455 1456 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1457 1458 NOT_PRODUCT(SystemDictionary::verify();) 1459 // The following guarantee is meant to ensure that no loader constraints 1460 // exist yet, since the constraints table is not shared. This becomes 1461 // more important now that we don't re-initialize vtables/itables for 1462 // shared classes at runtime, where constraints were previously created. 1463 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1464 "loader constraints are not saved"); 1465 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1466 "placeholders are not saved"); 1467 1468 // At this point, many classes have been loaded. 1469 // Gather systemDictionary classes in a global array and do everything to 1470 // that so we don't have to walk the SystemDictionary again. 1471 SystemDictionaryShared::check_excluded_classes(); 1472 _global_klass_objects = new GrowableArray<Klass*>(1000); 1473 CollectClassesClosure collect_classes; 1474 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1475 1476 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1477 { 1478 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1479 for (int i = 0; i < _global_klass_objects->length(); i++) { 1480 Klass* k = _global_klass_objects->at(i); 1481 if (k->is_instance_klass()) { 1482 num_inst ++; 1483 } else if (k->is_objArray_klass()) { 1484 num_obj_array ++; 1485 } else { 1486 assert(k->is_typeArray_klass(), "sanity"); 1487 num_type_array ++; 1488 } 1489 } 1490 tty->print_cr(" instance classes = %5d", num_inst); 1491 tty->print_cr(" obj array classes = %5d", num_obj_array); 1492 tty->print_cr(" type array classes = %5d", num_type_array); 1493 } 1494 1495 // Ensure the ConstMethods won't be modified at run-time 1496 tty->print("Updating ConstMethods ... "); 1497 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1498 tty->print_cr("done. "); 1499 1500 // Remove all references outside the metadata 1501 tty->print("Removing unshareable information ... "); 1502 remove_unshareable_in_classes(); 1503 tty->print_cr("done. "); 1504 1505 ArchiveCompactor::initialize(); 1506 ArchiveCompactor::copy_and_compact(); 1507 1508 dump_symbols(); 1509 1510 // Dump supported java heap objects 1511 _closed_archive_heap_regions = NULL; 1512 _open_archive_heap_regions = NULL; 1513 dump_java_heap_objects(); 1514 1515 ArchiveCompactor::relocate_well_known_klasses(); 1516 1517 char* serialized_data_start = dump_read_only_tables(); 1518 _ro_region.pack(&_md_region); 1519 1520 char* vtbl_list = _md_region.top(); 1521 MetaspaceShared::allocate_cpp_vtable_clones(); 1522 _md_region.pack(); 1523 1524 // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size 1525 // is just the spaces between the two ends. 1526 size_t core_spaces_size = _md_region.end() - _mc_region.base(); 1527 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), 1528 "should already be aligned"); 1529 1530 // During patching, some virtual methods may be called, so at this point 1531 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1532 MetaspaceShared::patch_cpp_vtable_pointers(); 1533 1534 // The vtable clones contain addresses of the current process. 1535 // We don't want to write these addresses into the archive. 1536 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1537 1538 // Create and write the archive file that maps the shared spaces. 1539 1540 FileMapInfo* mapinfo = new FileMapInfo(true); 1541 mapinfo->populate_header(os::vm_allocation_granularity()); 1542 mapinfo->set_serialized_data_start(serialized_data_start); 1543 mapinfo->set_misc_data_patching_start(vtbl_list); 1544 mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), 1545 MetaspaceShared::i2i_entry_code_buffers_size()); 1546 mapinfo->set_core_spaces_size(core_spaces_size); 1547 mapinfo->open_for_write(); 1548 1549 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1550 // so it needs to be read/write. 1551 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1552 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1553 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1554 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1555 1556 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1557 _closed_archive_heap_regions, 1558 _closed_archive_heap_oopmaps, 1559 MetaspaceShared::first_closed_archive_heap_region, 1560 MetaspaceShared::max_closed_archive_heap_region); 1561 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1562 _open_archive_heap_regions, 1563 _open_archive_heap_oopmaps, 1564 MetaspaceShared::first_open_archive_heap_region, 1565 MetaspaceShared::max_open_archive_heap_region); 1566 1567 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1568 mapinfo->write_header(); 1569 mapinfo->close(); 1570 1571 // Restore the vtable in case we invoke any virtual methods. 1572 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1573 1574 print_region_stats(); 1575 1576 if (log_is_enabled(Info, cds)) { 1577 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1578 int(_mc_region.used()), int(_md_region.used())); 1579 } 1580 1581 if (PrintSystemDictionaryAtExit) { 1582 SystemDictionary::print(); 1583 } 1584 1585 if (AllowArchivingWithJavaAgent) { 1586 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1587 "for testing purposes only and should not be used in a production environment"); 1588 } 1589 1590 // There may be other pending VM operations that operate on the InstanceKlasses, 1591 // which will fail because InstanceKlasses::remove_unshareable_info() 1592 // has been called. Forget these operations and exit the VM directly. 1593 vm_direct_exit(0); 1594 } 1595 1596 void VM_PopulateDumpSharedSpace::print_region_stats() { 1597 // Print statistics of all the regions 1598 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1599 _mc_region.reserved() + _md_region.reserved() + 1600 _total_closed_archive_region_size + 1601 _total_open_archive_region_size; 1602 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1603 _mc_region.used() + _md_region.used() + 1604 _total_closed_archive_region_size + 1605 _total_open_archive_region_size; 1606 const double total_u_perc = percent_of(total_bytes, total_reserved); 1607 1608 _mc_region.print(total_reserved); 1609 _rw_region.print(total_reserved); 1610 _ro_region.print(total_reserved); 1611 _md_region.print(total_reserved); 1612 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1613 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1614 1615 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1616 total_bytes, total_reserved, total_u_perc); 1617 } 1618 1619 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1620 const char *name, const size_t total_size) { 1621 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1622 for (int i = 0; i < arr_len; i++) { 1623 char* start = (char*)heap_mem->at(i).start(); 1624 size_t size = heap_mem->at(i).byte_size(); 1625 char* top = start + size; 1626 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1627 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1628 1629 } 1630 } 1631 1632 // Update a Java object to point its Klass* to the new location after 1633 // shared archive has been compacted. 1634 void MetaspaceShared::relocate_klass_ptr(oop o) { 1635 assert(DumpSharedSpaces, "sanity"); 1636 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1637 o->set_klass(k); 1638 } 1639 1640 Klass* MetaspaceShared::get_relocated_klass(Klass *k) { 1641 assert(DumpSharedSpaces, "sanity"); 1642 return ArchiveCompactor::get_relocated_klass(k); 1643 } 1644 1645 class LinkSharedClassesClosure : public KlassClosure { 1646 Thread* THREAD; 1647 bool _made_progress; 1648 public: 1649 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1650 1651 void reset() { _made_progress = false; } 1652 bool made_progress() const { return _made_progress; } 1653 1654 void do_klass(Klass* k) { 1655 if (k->is_instance_klass()) { 1656 InstanceKlass* ik = InstanceKlass::cast(k); 1657 // Link the class to cause the bytecodes to be rewritten and the 1658 // cpcache to be created. Class verification is done according 1659 // to -Xverify setting. 1660 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1661 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1662 1663 ik->constants()->resolve_class_constants(THREAD); 1664 } 1665 } 1666 }; 1667 1668 class CheckSharedClassesClosure : public KlassClosure { 1669 bool _made_progress; 1670 public: 1671 CheckSharedClassesClosure() : _made_progress(false) {} 1672 1673 void reset() { _made_progress = false; } 1674 bool made_progress() const { return _made_progress; } 1675 void do_klass(Klass* k) { 1676 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1677 _made_progress = true; 1678 } 1679 } 1680 }; 1681 1682 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1683 // We need to iterate because verification may cause additional classes 1684 // to be loaded. 1685 LinkSharedClassesClosure link_closure(THREAD); 1686 do { 1687 link_closure.reset(); 1688 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1689 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1690 } while (link_closure.made_progress()); 1691 1692 if (_has_error_classes) { 1693 // Mark all classes whose super class or interfaces failed verification. 1694 CheckSharedClassesClosure check_closure; 1695 do { 1696 // Not completely sure if we need to do this iteratively. Anyway, 1697 // we should come here only if there are unverifiable classes, which 1698 // shouldn't happen in normal cases. So better safe than sorry. 1699 check_closure.reset(); 1700 ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); 1701 } while (check_closure.made_progress()); 1702 } 1703 } 1704 1705 void MetaspaceShared::prepare_for_dumping() { 1706 Arguments::check_unsupported_dumping_properties(); 1707 ClassLoader::initialize_shared_path(); 1708 } 1709 1710 // Preload classes from a list, populate the shared spaces and dump to a 1711 // file. 1712 void MetaspaceShared::preload_and_dump(TRAPS) { 1713 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1714 ResourceMark rm; 1715 char class_list_path_str[JVM_MAXPATHLEN]; 1716 // Preload classes to be shared. 1717 const char* class_list_path; 1718 if (SharedClassListFile == NULL) { 1719 // Construct the path to the class list (in jre/lib) 1720 // Walk up two directories from the location of the VM and 1721 // optionally tack on "lib" (depending on platform) 1722 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1723 for (int i = 0; i < 3; i++) { 1724 char *end = strrchr(class_list_path_str, *os::file_separator()); 1725 if (end != NULL) *end = '\0'; 1726 } 1727 int class_list_path_len = (int)strlen(class_list_path_str); 1728 if (class_list_path_len >= 3) { 1729 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1730 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1731 jio_snprintf(class_list_path_str + class_list_path_len, 1732 sizeof(class_list_path_str) - class_list_path_len, 1733 "%slib", os::file_separator()); 1734 class_list_path_len += 4; 1735 } 1736 } 1737 } 1738 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1739 jio_snprintf(class_list_path_str + class_list_path_len, 1740 sizeof(class_list_path_str) - class_list_path_len, 1741 "%sclasslist", os::file_separator()); 1742 } 1743 class_list_path = class_list_path_str; 1744 } else { 1745 class_list_path = SharedClassListFile; 1746 } 1747 1748 tty->print_cr("Loading classes to share ..."); 1749 _has_error_classes = false; 1750 int class_count = preload_classes(class_list_path, THREAD); 1751 if (ExtraSharedClassListFile) { 1752 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1753 } 1754 tty->print_cr("Loading classes to share: done."); 1755 1756 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1757 1758 if (SharedArchiveConfigFile) { 1759 tty->print_cr("Reading extra data from %s ...", SharedArchiveConfigFile); 1760 read_extra_data(SharedArchiveConfigFile, THREAD); 1761 } 1762 tty->print_cr("Reading extra data: done."); 1763 1764 HeapShared::init_subgraph_entry_fields(THREAD); 1765 1766 // Rewrite and link classes 1767 tty->print_cr("Rewriting and linking classes ..."); 1768 1769 // Link any classes which got missed. This would happen if we have loaded classes that 1770 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1771 // fails verification, all other interfaces that were not specified in the classlist but 1772 // are implemented by K are not verified. 1773 link_and_cleanup_shared_classes(CATCH); 1774 tty->print_cr("Rewriting and linking classes: done"); 1775 1776 if (HeapShared::is_heap_object_archiving_allowed()) { 1777 // Avoid fragmentation while archiving heap objects. 1778 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); 1779 Universe::heap()->collect(GCCause::_archive_time_gc); 1780 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); 1781 } 1782 1783 VM_PopulateDumpSharedSpace op; 1784 VMThread::execute(&op); 1785 } 1786 } 1787 1788 1789 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1790 ClassListParser parser(class_list_path); 1791 int class_count = 0; 1792 1793 while (parser.parse_one_line()) { 1794 Klass* klass = parser.load_current_class(THREAD); 1795 if (HAS_PENDING_EXCEPTION) { 1796 if (klass == NULL && 1797 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1798 // print a warning only when the pending exception is class not found 1799 log_warning(cds)("Preload Warning: Cannot find %s", parser.current_class_name()); 1800 } 1801 CLEAR_PENDING_EXCEPTION; 1802 } 1803 if (klass != NULL) { 1804 if (log_is_enabled(Trace, cds)) { 1805 ResourceMark rm; 1806 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1807 } 1808 1809 if (klass->is_instance_klass()) { 1810 InstanceKlass* ik = InstanceKlass::cast(klass); 1811 1812 // Link the class to cause the bytecodes to be rewritten and the 1813 // cpcache to be created. The linking is done as soon as classes 1814 // are loaded in order that the related data structures (klass and 1815 // cpCache) are located together. 1816 try_link_class(ik, THREAD); 1817 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1818 } 1819 1820 class_count++; 1821 } 1822 } 1823 1824 return class_count; 1825 } 1826 1827 // Returns true if the class's status has changed 1828 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1829 assert(DumpSharedSpaces, "should only be called during dumping"); 1830 if (ik->init_state() < InstanceKlass::linked) { 1831 bool saved = BytecodeVerificationLocal; 1832 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1833 // The verification decision is based on BytecodeVerificationRemote 1834 // for non-system classes. Since we are using the NULL classloader 1835 // to load non-system classes for customized class loaders during dumping, 1836 // we need to temporarily change BytecodeVerificationLocal to be the same as 1837 // BytecodeVerificationRemote. Note this can cause the parent system 1838 // classes also being verified. The extra overhead is acceptable during 1839 // dumping. 1840 BytecodeVerificationLocal = BytecodeVerificationRemote; 1841 } 1842 ik->link_class(THREAD); 1843 if (HAS_PENDING_EXCEPTION) { 1844 ResourceMark rm; 1845 log_warning(cds)("Preload Warning: Verification failed for %s", 1846 ik->external_name()); 1847 CLEAR_PENDING_EXCEPTION; 1848 ik->set_in_error_state(); 1849 _has_error_classes = true; 1850 } 1851 BytecodeVerificationLocal = saved; 1852 return true; 1853 } else { 1854 return false; 1855 } 1856 } 1857 1858 #if INCLUDE_CDS_JAVA_HEAP 1859 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1860 // The closed and open archive heap space has maximum two regions. 1861 // See FileMapInfo::write_archive_heap_regions() for details. 1862 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1863 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1864 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 1865 _open_archive_heap_regions); 1866 ArchiveCompactor::OtherROAllocMark mark; 1867 HeapShared::write_subgraph_info_table(); 1868 } 1869 1870 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 1871 if (HeapShared::is_heap_object_archiving_allowed()) { 1872 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1873 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 1874 1875 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1876 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 1877 } 1878 } 1879 1880 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1881 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 1882 for (int i=0; i<regions->length(); i++) { 1883 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 1884 size_t size_in_bits = oopmap.size(); 1885 size_t size_in_bytes = oopmap.size_in_bytes(); 1886 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); 1887 oopmap.write_to(buffer, size_in_bytes); 1888 log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 1889 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 1890 p2i(buffer), size_in_bytes, 1891 p2i(regions->at(i).start()), regions->at(i).byte_size()); 1892 1893 ArchiveHeapOopmapInfo info; 1894 info._oopmap = (address)buffer; 1895 info._oopmap_size_in_bits = size_in_bits; 1896 oopmaps->append(info); 1897 } 1898 } 1899 #endif // INCLUDE_CDS_JAVA_HEAP 1900 1901 void ReadClosure::do_ptr(void** p) { 1902 assert(*p == NULL, "initializing previous initialized pointer."); 1903 intptr_t obj = nextPtr(); 1904 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1905 "hit tag while initializing ptrs."); 1906 *p = (void*)obj; 1907 } 1908 1909 void ReadClosure::do_u4(u4* p) { 1910 intptr_t obj = nextPtr(); 1911 *p = (u4)(uintx(obj)); 1912 } 1913 1914 void ReadClosure::do_bool(bool* p) { 1915 intptr_t obj = nextPtr(); 1916 *p = (bool)(uintx(obj)); 1917 } 1918 1919 void ReadClosure::do_tag(int tag) { 1920 int old_tag; 1921 old_tag = (int)(intptr_t)nextPtr(); 1922 // do_int(&old_tag); 1923 assert(tag == old_tag, "old tag doesn't match"); 1924 FileMapInfo::assert_mark(tag == old_tag); 1925 } 1926 1927 void ReadClosure::do_oop(oop *p) { 1928 narrowOop o = (narrowOop)nextPtr(); 1929 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 1930 p = NULL; 1931 } else { 1932 assert(HeapShared::is_heap_object_archiving_allowed(), 1933 "Archived heap object is not allowed"); 1934 assert(HeapShared::open_archive_heap_region_mapped(), 1935 "Open archive heap region is not mapped"); 1936 *p = HeapShared::decode_from_archive(o); 1937 } 1938 } 1939 1940 void ReadClosure::do_region(u_char* start, size_t size) { 1941 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1942 assert(size % sizeof(intptr_t) == 0, "bad size"); 1943 do_tag((int)size); 1944 while (size > 0) { 1945 *(intptr_t*)start = nextPtr(); 1946 start += sizeof(intptr_t); 1947 size -= sizeof(intptr_t); 1948 } 1949 } 1950 1951 void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) { 1952 _shared_metaspace_static_top = top; 1953 MetaspaceObj::set_shared_metaspace_range(base, top); 1954 } 1955 1956 // Return true if given address is in the misc data region 1957 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 1958 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 1959 } 1960 1961 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 1962 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 1963 return true; 1964 } 1965 return false; 1966 } 1967 1968 bool MetaspaceShared::is_shared_dynamic(void* p) { 1969 if ((p < MetaspaceObj::shared_metaspace_top()) && 1970 (p >= _shared_metaspace_static_top)) { 1971 return true; 1972 } else { 1973 return false; 1974 } 1975 } 1976 1977 // Map shared spaces at requested addresses and return if succeeded. 1978 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { 1979 size_t image_alignment = mapinfo->alignment(); 1980 1981 #ifndef _WINDOWS 1982 // Map in the shared memory and then map the regions on top of it. 1983 // On Windows, don't map the memory here because it will cause the 1984 // mappings of the regions to fail. 1985 ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); 1986 if (!shared_rs.is_reserved()) return false; 1987 #endif 1988 1989 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 1990 1991 // Map each shared region 1992 int regions[] = {mc, rw, ro, md}; 1993 size_t len = sizeof(regions)/sizeof(int); 1994 char* saved_base[] = {NULL, NULL, NULL, NULL}; 1995 char* top = mapinfo->map_regions(regions, saved_base, len ); 1996 1997 if (top != NULL && 1998 (image_alignment == (size_t)os::vm_allocation_granularity()) && 1999 mapinfo->validate_shared_path_table()) { 2000 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for 2001 // fast checking in MetaspaceShared::is_in_shared_metaspace() and 2002 // MetaspaceObj::is_shared(). 2003 _core_spaces_size = mapinfo->core_spaces_size(); 2004 set_shared_metaspace_range((void*)saved_base[0], (void*)top); 2005 return true; 2006 } else { 2007 mapinfo->unmap_regions(regions, saved_base, len); 2008 #ifndef _WINDOWS 2009 // Release the entire mapped region 2010 shared_rs.release(); 2011 #endif 2012 // If -Xshare:on is specified, print out the error message and exit VM, 2013 // otherwise, set UseSharedSpaces to false and continue. 2014 if (RequireSharedSpaces || PrintSharedArchiveAndExit) { 2015 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); 2016 } else { 2017 FLAG_SET_DEFAULT(UseSharedSpaces, false); 2018 } 2019 return false; 2020 } 2021 } 2022 2023 // Read the miscellaneous data from the shared file, and 2024 // serialize it out to its various destinations. 2025 2026 void MetaspaceShared::initialize_shared_spaces() { 2027 FileMapInfo *mapinfo = FileMapInfo::current_info(); 2028 _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers(); 2029 _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size(); 2030 // _core_spaces_size is loaded from the shared archive immediatelly after mapping 2031 assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity"); 2032 char* buffer = mapinfo->misc_data_patching_start(); 2033 clone_cpp_vtables((intptr_t*)buffer); 2034 2035 // Verify various attributes of the archive, plus initialize the 2036 // shared string/symbol tables 2037 buffer = mapinfo->serialized_data_start(); 2038 intptr_t* array = (intptr_t*)buffer; 2039 ReadClosure rc(&array); 2040 serialize(&rc); 2041 2042 // Initialize the run-time symbol table. 2043 SymbolTable::create_table(); 2044 2045 mapinfo->patch_archived_heap_embedded_pointers(); 2046 2047 // Close the mapinfo file 2048 mapinfo->close(); 2049 2050 if (PrintSharedArchiveAndExit) { 2051 if (PrintSharedDictionary) { 2052 tty->print_cr("\nShared classes:\n"); 2053 SystemDictionaryShared::print_on(tty); 2054 } 2055 if (_archive_loading_failed) { 2056 tty->print_cr("archive is invalid"); 2057 vm_exit(1); 2058 } else { 2059 tty->print_cr("archive is valid"); 2060 vm_exit(0); 2061 } 2062 } 2063 } 2064 2065 // JVM/TI RedefineClasses() support: 2066 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2067 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2068 2069 if (UseSharedSpaces) { 2070 // remap the shared readonly space to shared readwrite, private 2071 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2072 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2073 return false; 2074 } 2075 if (FileMapInfo::dynamic_info() != NULL) { 2076 mapinfo = FileMapInfo::dynamic_info(); 2077 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2078 return false; 2079 } 2080 } 2081 _remapped_readwrite = true; 2082 } 2083 return true; 2084 } 2085 2086 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2087 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2088 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2089 // or so. 2090 _mc_region.print_out_of_space_msg(name, needed_bytes); 2091 _rw_region.print_out_of_space_msg(name, needed_bytes); 2092 _ro_region.print_out_of_space_msg(name, needed_bytes); 2093 _md_region.print_out_of_space_msg(name, needed_bytes); 2094 2095 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2096 "Please reduce the number of shared classes."); 2097 }