1 /* 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/placeholders.hpp" 33 #include "classfile/symbolTable.hpp" 34 #include "classfile/stringTable.hpp" 35 #include "classfile/systemDictionary.hpp" 36 #include "classfile/systemDictionaryShared.hpp" 37 #include "code/codeCache.hpp" 38 #include "interpreter/bytecodeStream.hpp" 39 #include "interpreter/bytecodes.hpp" 40 #include "logging/log.hpp" 41 #include "logging/logMessage.hpp" 42 #include "memory/filemap.hpp" 43 #include "memory/heapShared.inline.hpp" 44 #include "memory/metaspace.hpp" 45 #include "memory/metaspaceClosure.hpp" 46 #include "memory/metaspaceShared.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/instanceClassLoaderKlass.hpp" 50 #include "oops/instanceMirrorKlass.hpp" 51 #include "oops/instanceRefKlass.hpp" 52 #include "oops/objArrayKlass.hpp" 53 #include "oops/objArrayOop.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "oops/typeArrayKlass.hpp" 56 #include "prims/jvmtiRedefineClasses.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/os.hpp" 59 #include "runtime/safepointVerifiers.hpp" 60 #include "runtime/signature.hpp" 61 #include "runtime/timerTrace.hpp" 62 #include "runtime/vmThread.hpp" 63 #include "runtime/vm_operations.hpp" 64 #include "utilities/align.hpp" 65 #include "utilities/bitMap.hpp" 66 #include "utilities/defaultStream.hpp" 67 #if INCLUDE_G1GC 68 #include "gc/g1/g1CollectedHeap.hpp" 69 #endif 70 71 ReservedSpace MetaspaceShared::_shared_rs; 72 VirtualSpace MetaspaceShared::_shared_vs; 73 MetaspaceSharedStats MetaspaceShared::_stats; 74 bool MetaspaceShared::_has_error_classes; 75 bool MetaspaceShared::_archive_loading_failed = false; 76 bool MetaspaceShared::_remapped_readwrite = false; 77 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; 78 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; 79 size_t MetaspaceShared::_core_spaces_size = 0; 80 81 // The CDS archive is divided into the following regions: 82 // mc - misc code (the method entry trampolines) 83 // rw - read-write metadata 84 // ro - read-only metadata and read-only tables 85 // md - misc data (the c++ vtables) 86 // od - optional data (original class files) 87 // 88 // ca0 - closed archive heap space #0 89 // ca1 - closed archive heap space #1 (may be empty) 90 // oa0 - open archive heap space #0 91 // oa1 - open archive heap space #1 (may be empty) 92 // 93 // The mc, rw, ro, md and od regions are linearly allocated, starting from 94 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions 95 // are page-aligned, and there's no gap between any consecutive regions. 96 // 97 // These 5 regions are populated in the following steps: 98 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 99 // temporarily allocated outside of the shared regions. Only the method entry 100 // trampolines are written into the mc region. 101 // [2] ArchiveCompactor copies RW metadata into the rw region. 102 // [3] ArchiveCompactor copies RO metadata into the ro region. 103 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 104 // are copied into the ro region as read-only tables. 105 // [5] C++ vtables are copied into the md region. 106 // [6] Original class files are copied into the od region. 107 // 108 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 109 // Their layout is independent of the other 5 regions. 110 111 class DumpRegion { 112 private: 113 const char* _name; 114 char* _base; 115 char* _top; 116 char* _end; 117 bool _is_packed; 118 119 char* expand_top_to(char* newtop) { 120 assert(is_allocatable(), "must be initialized and not packed"); 121 assert(newtop >= _top, "must not grow backwards"); 122 if (newtop > _end) { 123 MetaspaceShared::report_out_of_space(_name, newtop - _top); 124 ShouldNotReachHere(); 125 } 126 uintx delta = MetaspaceShared::object_delta_uintx(newtop); 127 if (delta > MAX_SHARED_DELTA) { 128 // This is just a sanity check and should not appear in any real world usage. This 129 // happens only if you allocate more than 2GB of shared objects and would require 130 // millions of shared classes. 131 vm_exit_during_initialization("Out of memory in the CDS archive", 132 "Please reduce the number of shared classes."); 133 } 134 135 MetaspaceShared::commit_shared_space_to(newtop); 136 _top = newtop; 137 return _top; 138 } 139 140 public: 141 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} 142 143 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { 144 char* p = (char*)align_up(_top, alignment); 145 char* newtop = p + align_up(num_bytes, alignment); 146 expand_top_to(newtop); 147 memset(p, 0, newtop - p); 148 return p; 149 } 150 151 void append_intptr_t(intptr_t n) { 152 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 153 intptr_t *p = (intptr_t*)_top; 154 char* newtop = _top + sizeof(intptr_t); 155 expand_top_to(newtop); 156 *p = n; 157 } 158 159 char* base() const { return _base; } 160 char* top() const { return _top; } 161 char* end() const { return _end; } 162 size_t reserved() const { return _end - _base; } 163 size_t used() const { return _top - _base; } 164 bool is_packed() const { return _is_packed; } 165 bool is_allocatable() const { 166 return !is_packed() && _base != NULL; 167 } 168 169 void print(size_t total_bytes) const { 170 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 171 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); 172 } 173 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 174 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 175 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 176 if (strcmp(_name, failing_region) == 0) { 177 tty->print_cr(" required = %d", int(needed_bytes)); 178 } else { 179 tty->cr(); 180 } 181 } 182 183 void init(const ReservedSpace* rs) { 184 _base = _top = rs->base(); 185 _end = rs->end(); 186 } 187 void init(char* b, char* t, char* e) { 188 _base = b; 189 _top = t; 190 _end = e; 191 } 192 193 void pack(DumpRegion* next = NULL) { 194 assert(!is_packed(), "sanity"); 195 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 196 _is_packed = true; 197 if (next != NULL) { 198 next->_base = next->_top = this->_end; 199 next->_end = MetaspaceShared::shared_rs()->end(); 200 } 201 } 202 bool contains(char* p) { 203 return base() <= p && p < top(); 204 } 205 }; 206 207 208 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); 209 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 210 211 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 212 return _mc_region.allocate(num_bytes); 213 } 214 215 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 216 return _ro_region.allocate(num_bytes); 217 } 218 219 char* MetaspaceShared::read_only_space_top() { 220 return _ro_region.top(); 221 } 222 223 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 224 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 225 226 // If using shared space, open the file that contains the shared space 227 // and map in the memory before initializing the rest of metaspace (so 228 // the addresses don't conflict) 229 address cds_address = NULL; 230 FileMapInfo* mapinfo = new FileMapInfo(); 231 232 // Open the shared archive file, read and validate the header. If 233 // initialization fails, shared spaces [UseSharedSpaces] are 234 // disabled and the file is closed. 235 // Map in spaces now also 236 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) { 237 size_t cds_total = core_spaces_size(); 238 cds_address = (address)mapinfo->region_addr(0); 239 #ifdef _LP64 240 if (Metaspace::using_class_space()) { 241 char* cds_end = (char*)(cds_address + cds_total); 242 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment()); 243 // If UseCompressedClassPointers is set then allocate the metaspace area 244 // above the heap and above the CDS area (if it exists). 245 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 246 // map_heap_regions() compares the current narrow oop and klass encodings 247 // with the archived ones, so it must be done after all encodings are determined. 248 mapinfo->map_heap_regions(); 249 } 250 Universe::set_narrow_klass_range(CompressedClassSpaceSize); 251 #endif // _LP64 252 } else { 253 assert(!mapinfo->is_open() && !UseSharedSpaces, 254 "archive file not closed or shared spaces not disabled."); 255 } 256 } 257 258 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 259 assert(DumpSharedSpaces, "should be called for dump time only"); 260 const size_t reserve_alignment = Metaspace::reserve_alignment(); 261 bool large_pages = false; // No large pages when dumping the CDS archive. 262 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 263 264 #ifdef _LP64 265 // On 64-bit VM, the heap and class space layout will be the same as if 266 // you're running in -Xshare:on mode: 267 // 268 // +-- SharedBaseAddress (default = 0x800000000) 269 // v 270 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 271 // | Heap | Archive | | MC | RW | RO | MD | OD | class space | 272 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 273 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->| 274 // 275 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 276 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 277 #else 278 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 279 size_t cds_total = align_down(256*M, reserve_alignment); 280 #endif 281 282 // First try to reserve the space at the specified SharedBaseAddress. 283 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); 284 if (_shared_rs.is_reserved()) { 285 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 286 } else { 287 // Get a mmap region anywhere if the SharedBaseAddress fails. 288 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); 289 } 290 if (!_shared_rs.is_reserved()) { 291 vm_exit_during_initialization("Unable to reserve memory for shared space", 292 err_msg(SIZE_FORMAT " bytes.", cds_total)); 293 } 294 295 #ifdef _LP64 296 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 297 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 298 // will store Klasses into this space. 299 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 300 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 301 // then the RO parts. 302 303 assert(UseCompressedOops && UseCompressedClassPointers, 304 "UseCompressedOops and UseCompressedClassPointers must be set"); 305 306 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 307 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 308 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 309 _shared_rs = _shared_rs.first_part(max_archive_size); 310 311 // Set up compress class pointers. 312 Universe::set_narrow_klass_base((address)_shared_rs.base()); 313 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 314 // with AOT. 315 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 316 // Set the range of klass addresses to 4GB. 317 Universe::set_narrow_klass_range(cds_total); 318 319 Metaspace::initialize_class_space(tmp_class_space); 320 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 321 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 322 323 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 324 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 325 #endif 326 327 // Start with 0 committed bytes. The memory will be committed as needed by 328 // MetaspaceShared::commit_shared_space_to(). 329 if (!_shared_vs.initialize(_shared_rs, 0)) { 330 vm_exit_during_initialization("Unable to allocate memory for shared space"); 331 } 332 333 _mc_region.init(&_shared_rs); 334 SharedBaseAddress = (size_t)_shared_rs.base(); 335 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 336 _shared_rs.size(), p2i(_shared_rs.base())); 337 } 338 339 // Called by universe_post_init() 340 void MetaspaceShared::post_initialize(TRAPS) { 341 if (UseSharedSpaces) { 342 int size = FileMapInfo::get_number_of_shared_paths(); 343 if (size > 0) { 344 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 345 FileMapHeader* header = FileMapInfo::current_info()->header(); 346 ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index); 347 ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index); 348 } 349 } 350 } 351 352 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 353 HashtableTextDump reader(filename); 354 reader.check_version("VERSION: 1.0"); 355 356 while (reader.remain() > 0) { 357 int utf8_length; 358 int prefix_type = reader.scan_prefix(&utf8_length); 359 ResourceMark rm(THREAD); 360 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, utf8_length); 361 reader.get_utf8(utf8_buffer, utf8_length); 362 363 if (prefix_type == HashtableTextDump::SymbolPrefix) { 364 SymbolTable::new_symbol(utf8_buffer, utf8_length, THREAD); 365 } else{ 366 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 367 utf8_buffer[utf8_length] = '\0'; 368 oop s = StringTable::intern(utf8_buffer, THREAD); 369 } 370 } 371 } 372 373 void MetaspaceShared::commit_shared_space_to(char* newtop) { 374 assert(DumpSharedSpaces, "dump-time only"); 375 char* base = _shared_rs.base(); 376 size_t need_committed_size = newtop - base; 377 size_t has_committed_size = _shared_vs.committed_size(); 378 if (need_committed_size < has_committed_size) { 379 return; 380 } 381 382 size_t min_bytes = need_committed_size - has_committed_size; 383 size_t preferred_bytes = 1 * M; 384 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 385 386 size_t commit = MAX2(min_bytes, preferred_bytes); 387 assert(commit <= uncommitted, "sanity"); 388 389 bool result = _shared_vs.expand_by(commit, false); 390 if (!result) { 391 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 392 need_committed_size)); 393 } 394 395 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 396 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 397 } 398 399 // Read/write a data stream for restoring/preserving metadata pointers and 400 // miscellaneous data from/to the shared archive file. 401 402 void MetaspaceShared::serialize(SerializeClosure* soc) { 403 int tag = 0; 404 soc->do_tag(--tag); 405 406 // Verify the sizes of various metadata in the system. 407 soc->do_tag(sizeof(Method)); 408 soc->do_tag(sizeof(ConstMethod)); 409 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 410 soc->do_tag(sizeof(ConstantPool)); 411 soc->do_tag(sizeof(ConstantPoolCache)); 412 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 413 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 414 soc->do_tag(sizeof(Symbol)); 415 416 // Dump/restore miscellaneous metadata. 417 Universe::serialize(soc); 418 soc->do_tag(--tag); 419 420 // Dump/restore references to commonly used names and signatures. 421 vmSymbols::serialize(soc); 422 soc->do_tag(--tag); 423 424 // Dump/restore the symbol/string/subgraph_info tables 425 SymbolTable::serialize_shared_table_header(soc); 426 StringTable::serialize_shared_table_header(soc); 427 HeapShared::serialize_subgraph_info_table_header(soc); 428 SystemDictionaryShared::serialize_dictionary_headers(soc); 429 430 JavaClasses::serialize_offsets(soc); 431 InstanceMirrorKlass::serialize_offsets(soc); 432 soc->do_tag(--tag); 433 434 soc->do_tag(666); 435 } 436 437 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { 438 if (DumpSharedSpaces) { 439 if (_cds_i2i_entry_code_buffers == NULL) { 440 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 441 _cds_i2i_entry_code_buffers_size = total_size; 442 } 443 } else if (UseSharedSpaces) { 444 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized"); 445 } else { 446 return NULL; 447 } 448 449 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change"); 450 return _cds_i2i_entry_code_buffers; 451 } 452 453 // CDS code for dumping shared archive. 454 455 // Global object for holding classes that have been loaded. Since this 456 // is run at a safepoint just before exit, this is the entire set of classes. 457 static GrowableArray<Klass*>* _global_klass_objects; 458 459 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 460 return _global_klass_objects; 461 } 462 463 static void collect_array_classes(Klass* k) { 464 _global_klass_objects->append_if_missing(k); 465 if (k->is_array_klass()) { 466 // Add in the array classes too 467 ArrayKlass* ak = ArrayKlass::cast(k); 468 Klass* h = ak->higher_dimension(); 469 if (h != NULL) { 470 h->array_klasses_do(collect_array_classes); 471 } 472 } 473 } 474 475 class CollectClassesClosure : public KlassClosure { 476 void do_klass(Klass* k) { 477 if (k->is_instance_klass() && 478 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 479 // Don't add to the _global_klass_objects 480 } else { 481 _global_klass_objects->append_if_missing(k); 482 } 483 if (k->is_array_klass()) { 484 // Add in the array classes too 485 ArrayKlass* ak = ArrayKlass::cast(k); 486 Klass* h = ak->higher_dimension(); 487 if (h != NULL) { 488 h->array_klasses_do(collect_array_classes); 489 } 490 } 491 } 492 }; 493 494 static void remove_unshareable_in_classes() { 495 for (int i = 0; i < _global_klass_objects->length(); i++) { 496 Klass* k = _global_klass_objects->at(i); 497 if (!k->is_objArray_klass()) { 498 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 499 // on their array classes. 500 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 501 k->remove_unshareable_info(); 502 } 503 } 504 } 505 506 static void remove_java_mirror_in_classes() { 507 for (int i = 0; i < _global_klass_objects->length(); i++) { 508 Klass* k = _global_klass_objects->at(i); 509 if (!k->is_objArray_klass()) { 510 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 511 // on their array classes. 512 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 513 k->remove_java_mirror(); 514 } 515 } 516 } 517 518 static void clear_basic_type_mirrors() { 519 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 520 Universe::set_int_mirror(NULL); 521 Universe::set_float_mirror(NULL); 522 Universe::set_double_mirror(NULL); 523 Universe::set_byte_mirror(NULL); 524 Universe::set_bool_mirror(NULL); 525 Universe::set_char_mirror(NULL); 526 Universe::set_long_mirror(NULL); 527 Universe::set_short_mirror(NULL); 528 Universe::set_void_mirror(NULL); 529 } 530 531 static void rewrite_nofast_bytecode(Method* method) { 532 BytecodeStream bcs(method); 533 while (!bcs.is_last_bytecode()) { 534 Bytecodes::Code opcode = bcs.next(); 535 switch (opcode) { 536 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 537 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 538 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 539 case Bytecodes::_iload: { 540 if (!bcs.is_wide()) { 541 *bcs.bcp() = Bytecodes::_nofast_iload; 542 } 543 break; 544 } 545 default: break; 546 } 547 } 548 } 549 550 // Walk all methods in the class list to ensure that they won't be modified at 551 // run time. This includes: 552 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 553 // at run time by RewriteBytecodes/RewriteFrequentPairs 554 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 555 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 556 for (int i = 0; i < _global_klass_objects->length(); i++) { 557 Klass* k = _global_klass_objects->at(i); 558 if (k->is_instance_klass()) { 559 InstanceKlass* ik = InstanceKlass::cast(k); 560 for (int i = 0; i < ik->methods()->length(); i++) { 561 Method* m = ik->methods()->at(i); 562 rewrite_nofast_bytecode(m); 563 Fingerprinter fp(m); 564 // The side effect of this call sets method's fingerprint field. 565 fp.fingerprint(); 566 } 567 } 568 } 569 } 570 571 static void relocate_cached_class_file() { 572 for (int i = 0; i < _global_klass_objects->length(); i++) { 573 Klass* k = _global_klass_objects->at(i); 574 if (k->is_instance_klass()) { 575 InstanceKlass* ik = InstanceKlass::cast(k); 576 JvmtiCachedClassFileData* p = ik->get_archived_class_data(); 577 if (p != NULL) { 578 int size = offset_of(JvmtiCachedClassFileData, data) + p->length; 579 JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size); 580 q->length = p->length; 581 memcpy(q->data, p->data, p->length); 582 ik->set_archived_class_data(q); 583 } 584 } 585 } 586 } 587 588 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 589 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 590 // 591 // Addresses of the vtables and the methods may be different across JVM runs, 592 // if libjvm.so is dynamically loaded at a different base address. 593 // 594 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 595 // 596 // + at dump time: we redirect the _vptr to point to our own vtables inside 597 // the CDS image 598 // + at run time: we clone the actual contents of the vtables from libjvm.so 599 // into our own tables. 600 601 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 602 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 603 f(ConstantPool) \ 604 f(InstanceKlass) \ 605 f(InstanceClassLoaderKlass) \ 606 f(InstanceMirrorKlass) \ 607 f(InstanceRefKlass) \ 608 f(Method) \ 609 f(ObjArrayKlass) \ 610 f(TypeArrayKlass) 611 612 class CppVtableInfo { 613 intptr_t _vtable_size; 614 intptr_t _cloned_vtable[1]; 615 public: 616 static int num_slots(int vtable_size) { 617 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 618 } 619 int vtable_size() { return int(uintx(_vtable_size)); } 620 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 621 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 622 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 623 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 624 static size_t byte_size(int vtable_size) { 625 CppVtableInfo i; 626 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 627 } 628 }; 629 630 template <class T> class CppVtableCloner : public T { 631 static intptr_t* vtable_of(Metadata& m) { 632 return *((intptr_t**)&m); 633 } 634 static CppVtableInfo* _info; 635 636 static int get_vtable_length(const char* name); 637 638 public: 639 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 640 static intptr_t* allocate(const char* name); 641 642 // Clone the vtable to ... 643 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 644 645 static void zero_vtable_clone() { 646 assert(DumpSharedSpaces, "dump-time only"); 647 _info->zero(); 648 } 649 650 // Switch the vtable pointer to point to the cloned vtable. 651 static void patch(Metadata* obj) { 652 assert(DumpSharedSpaces, "dump-time only"); 653 *(void**)obj = (void*)(_info->cloned_vtable()); 654 } 655 656 static bool is_valid_shared_object(const T* obj) { 657 intptr_t* vptr = *(intptr_t**)obj; 658 return vptr == _info->cloned_vtable(); 659 } 660 }; 661 662 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 663 664 template <class T> 665 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 666 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 667 int n = get_vtable_length(name); 668 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 669 _info->set_vtable_size(n); 670 671 intptr_t* p = clone_vtable(name, _info); 672 assert((char*)p == _md_region.top(), "must be"); 673 674 return p; 675 } 676 677 template <class T> 678 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 679 if (!DumpSharedSpaces) { 680 assert(_info == 0, "_info is initialized only at dump time"); 681 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 682 } 683 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 684 int n = info->vtable_size(); 685 intptr_t* srcvtable = vtable_of(tmp); 686 intptr_t* dstvtable = info->cloned_vtable(); 687 688 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 689 // safe to do memcpy. 690 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 691 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 692 return dstvtable + n; 693 } 694 695 // To determine the size of the vtable for each type, we use the following 696 // trick by declaring 2 subclasses: 697 // 698 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 699 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 700 // 701 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 702 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 703 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 704 // - Their last entry is different. 705 // 706 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 707 // and find the first entry that's different. 708 // 709 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 710 // esoteric compilers. 711 712 template <class T> class CppVtableTesterB: public T { 713 public: 714 virtual int last_virtual_method() {return 1;} 715 }; 716 717 template <class T> class CppVtableTesterA : public T { 718 public: 719 virtual void* last_virtual_method() { 720 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 721 // compiler/linker won't alias the two functions. 722 return NULL; 723 } 724 }; 725 726 template <class T> 727 int CppVtableCloner<T>::get_vtable_length(const char* name) { 728 CppVtableTesterA<T> a; 729 CppVtableTesterB<T> b; 730 731 intptr_t* avtable = vtable_of(a); 732 intptr_t* bvtable = vtable_of(b); 733 734 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 735 int vtable_len = 1; 736 for (; ; vtable_len++) { 737 if (avtable[vtable_len] != bvtable[vtable_len]) { 738 break; 739 } 740 } 741 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 742 743 return vtable_len; 744 } 745 746 #define ALLOC_CPP_VTABLE_CLONE(c) \ 747 CppVtableCloner<c>::allocate(#c); 748 749 #define CLONE_CPP_VTABLE(c) \ 750 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 751 752 #define ZERO_CPP_VTABLE(c) \ 753 CppVtableCloner<c>::zero_vtable_clone(); 754 755 // This can be called at both dump time and run time. 756 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 757 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 758 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 759 return p; 760 } 761 762 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 763 assert(DumpSharedSpaces, "dump-time only"); 764 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 765 } 766 767 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 768 void MetaspaceShared::allocate_cpp_vtable_clones() { 769 assert(DumpSharedSpaces, "dump-time only"); 770 // Layout (each slot is a intptr_t): 771 // [number of slots in the first vtable = n1] 772 // [ <n1> slots for the first vtable] 773 // [number of slots in the first second = n2] 774 // [ <n2> slots for the second vtable] 775 // ... 776 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 777 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 778 } 779 780 // Switch the vtable pointer to point to the cloned vtable. We assume the 781 // vtable pointer is in first slot in object. 782 void MetaspaceShared::patch_cpp_vtable_pointers() { 783 int n = _global_klass_objects->length(); 784 for (int i = 0; i < n; i++) { 785 Klass* obj = _global_klass_objects->at(i); 786 if (obj->is_instance_klass()) { 787 InstanceKlass* ik = InstanceKlass::cast(obj); 788 if (ik->is_class_loader_instance_klass()) { 789 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 790 } else if (ik->is_reference_instance_klass()) { 791 CppVtableCloner<InstanceRefKlass>::patch(ik); 792 } else if (ik->is_mirror_instance_klass()) { 793 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 794 } else { 795 CppVtableCloner<InstanceKlass>::patch(ik); 796 } 797 ConstantPool* cp = ik->constants(); 798 CppVtableCloner<ConstantPool>::patch(cp); 799 for (int j = 0; j < ik->methods()->length(); j++) { 800 Method* m = ik->methods()->at(j); 801 CppVtableCloner<Method>::patch(m); 802 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 803 } 804 } else if (obj->is_objArray_klass()) { 805 CppVtableCloner<ObjArrayKlass>::patch(obj); 806 } else { 807 assert(obj->is_typeArray_klass(), "sanity"); 808 CppVtableCloner<TypeArrayKlass>::patch(obj); 809 } 810 } 811 } 812 813 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 814 assert(is_in_shared_metaspace(m), "must be"); 815 return CppVtableCloner<Method>::is_valid_shared_object(m); 816 } 817 818 // Closure for serializing initialization data out to a data area to be 819 // written to the shared file. 820 821 class WriteClosure : public SerializeClosure { 822 private: 823 DumpRegion* _dump_region; 824 825 public: 826 WriteClosure(DumpRegion* r) { 827 _dump_region = r; 828 } 829 830 void do_ptr(void** p) { 831 _dump_region->append_intptr_t((intptr_t)*p); 832 } 833 834 void do_u4(u4* p) { 835 void* ptr = (void*)(uintx(*p)); 836 do_ptr(&ptr); 837 } 838 839 void do_tag(int tag) { 840 _dump_region->append_intptr_t((intptr_t)tag); 841 } 842 843 void do_oop(oop* o) { 844 if (*o == NULL) { 845 _dump_region->append_intptr_t(0); 846 } else { 847 assert(HeapShared::is_heap_object_archiving_allowed(), 848 "Archiving heap object is not allowed"); 849 _dump_region->append_intptr_t( 850 (intptr_t)CompressedOops::encode_not_null(*o)); 851 } 852 } 853 854 void do_region(u_char* start, size_t size) { 855 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 856 assert(size % sizeof(intptr_t) == 0, "bad size"); 857 do_tag((int)size); 858 while (size > 0) { 859 _dump_region->append_intptr_t(*(intptr_t*)start); 860 start += sizeof(intptr_t); 861 size -= sizeof(intptr_t); 862 } 863 } 864 865 bool reading() const { return false; } 866 }; 867 868 // This is for dumping detailed statistics for the allocations 869 // in the shared spaces. 870 class DumpAllocStats : public ResourceObj { 871 public: 872 873 // Here's poor man's enum inheritance 874 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 875 METASPACE_OBJ_TYPES_DO(f) \ 876 f(SymbolHashentry) \ 877 f(SymbolBucket) \ 878 f(StringHashentry) \ 879 f(StringBucket) \ 880 f(Other) 881 882 enum Type { 883 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 884 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 885 _number_of_types 886 }; 887 888 static const char * type_name(Type type) { 889 switch(type) { 890 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 891 default: 892 ShouldNotReachHere(); 893 return NULL; 894 } 895 } 896 897 public: 898 enum { RO = 0, RW = 1 }; 899 900 int _counts[2][_number_of_types]; 901 int _bytes [2][_number_of_types]; 902 903 DumpAllocStats() { 904 memset(_counts, 0, sizeof(_counts)); 905 memset(_bytes, 0, sizeof(_bytes)); 906 }; 907 908 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 909 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 910 int which = (read_only) ? RO : RW; 911 _counts[which][type] ++; 912 _bytes [which][type] += byte_size; 913 } 914 915 void record_other_type(int byte_size, bool read_only) { 916 int which = (read_only) ? RO : RW; 917 _bytes [which][OtherType] += byte_size; 918 } 919 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 920 }; 921 922 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 923 // Calculate size of data that was not allocated by Metaspace::allocate() 924 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 925 926 // symbols 927 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 928 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 929 930 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 931 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 932 933 // strings 934 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 935 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 936 937 _counts[RO][StringBucketType] = stats->string.bucket_count; 938 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 939 940 // TODO: count things like dictionary, vtable, etc 941 _bytes[RW][OtherType] += mc_all + md_all; 942 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 943 944 // prevent divide-by-zero 945 if (ro_all < 1) { 946 ro_all = 1; 947 } 948 if (rw_all < 1) { 949 rw_all = 1; 950 } 951 952 int all_ro_count = 0; 953 int all_ro_bytes = 0; 954 int all_rw_count = 0; 955 int all_rw_bytes = 0; 956 957 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 958 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 959 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 960 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 961 962 LogMessage(cds) msg; 963 964 msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); 965 msg.info("%s", hdr); 966 msg.info("%s", sep); 967 for (int type = 0; type < int(_number_of_types); type ++) { 968 const char *name = type_name((Type)type); 969 int ro_count = _counts[RO][type]; 970 int ro_bytes = _bytes [RO][type]; 971 int rw_count = _counts[RW][type]; 972 int rw_bytes = _bytes [RW][type]; 973 int count = ro_count + rw_count; 974 int bytes = ro_bytes + rw_bytes; 975 976 double ro_perc = percent_of(ro_bytes, ro_all); 977 double rw_perc = percent_of(rw_bytes, rw_all); 978 double perc = percent_of(bytes, ro_all + rw_all); 979 980 msg.info(fmt_stats, name, 981 ro_count, ro_bytes, ro_perc, 982 rw_count, rw_bytes, rw_perc, 983 count, bytes, perc); 984 985 all_ro_count += ro_count; 986 all_ro_bytes += ro_bytes; 987 all_rw_count += rw_count; 988 all_rw_bytes += rw_bytes; 989 } 990 991 int all_count = all_ro_count + all_rw_count; 992 int all_bytes = all_ro_bytes + all_rw_bytes; 993 994 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 995 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 996 double all_perc = percent_of(all_bytes, ro_all + rw_all); 997 998 msg.info("%s", sep); 999 msg.info(fmt_stats, "Total", 1000 all_ro_count, all_ro_bytes, all_ro_perc, 1001 all_rw_count, all_rw_bytes, all_rw_perc, 1002 all_count, all_bytes, all_perc); 1003 1004 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1005 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1006 1007 #undef fmt_stats 1008 } 1009 1010 // Populate the shared space. 1011 1012 class VM_PopulateDumpSharedSpace: public VM_Operation { 1013 private: 1014 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1015 GrowableArray<MemRegion> *_open_archive_heap_regions; 1016 1017 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1018 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1019 1020 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1021 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1022 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1023 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1024 void dump_symbols(); 1025 char* dump_read_only_tables(); 1026 void print_region_stats(); 1027 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1028 const char *name, const size_t total_size); 1029 public: 1030 1031 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1032 void doit(); // outline because gdb sucks 1033 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec); 1034 bool allow_nested_vm_operations() const { return true; } 1035 }; // class VM_PopulateDumpSharedSpace 1036 1037 class SortedSymbolClosure: public SymbolClosure { 1038 GrowableArray<Symbol*> _symbols; 1039 virtual void do_symbol(Symbol** sym) { 1040 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1041 _symbols.append(*sym); 1042 } 1043 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1044 if (a[0] < b[0]) { 1045 return -1; 1046 } else if (a[0] == b[0]) { 1047 return 0; 1048 } else { 1049 return 1; 1050 } 1051 } 1052 1053 public: 1054 SortedSymbolClosure() { 1055 SymbolTable::symbols_do(this); 1056 _symbols.sort(compare_symbols_by_address); 1057 } 1058 GrowableArray<Symbol*>* get_sorted_symbols() { 1059 return &_symbols; 1060 } 1061 }; 1062 1063 // ArchiveCompactor -- 1064 // 1065 // This class is the central piece of shared archive compaction -- all metaspace data are 1066 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1067 // metaspace data into their final location in the shared regions. 1068 1069 class ArchiveCompactor : AllStatic { 1070 static DumpAllocStats* _alloc_stats; 1071 static SortedSymbolClosure* _ssc; 1072 1073 static unsigned my_hash(const address& a) { 1074 return primitive_hash<address>(a); 1075 } 1076 static bool my_equals(const address& a0, const address& a1) { 1077 return primitive_equals<address>(a0, a1); 1078 } 1079 typedef ResourceHashtable< 1080 address, address, 1081 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address> 1082 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address> 1083 16384, ResourceObj::C_HEAP> RelocationTable; 1084 static RelocationTable* _new_loc_table; 1085 1086 public: 1087 static void initialize() { 1088 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1089 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable; 1090 } 1091 static DumpAllocStats* alloc_stats() { 1092 return _alloc_stats; 1093 } 1094 1095 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1096 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1097 // that are allocated in the RO space. 1098 class OtherROAllocMark { 1099 char* _oldtop; 1100 public: 1101 OtherROAllocMark() { 1102 _oldtop = _ro_region.top(); 1103 } 1104 ~OtherROAllocMark() { 1105 char* newtop = _ro_region.top(); 1106 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1107 } 1108 }; 1109 1110 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1111 address obj = ref->obj(); 1112 int bytes = ref->size() * BytesPerWord; 1113 char* p; 1114 size_t alignment = BytesPerWord; 1115 char* oldtop; 1116 char* newtop; 1117 1118 if (read_only) { 1119 oldtop = _ro_region.top(); 1120 p = _ro_region.allocate(bytes, alignment); 1121 newtop = _ro_region.top(); 1122 } else { 1123 oldtop = _rw_region.top(); 1124 if (ref->msotype() == MetaspaceObj::ClassType) { 1125 // Save a pointer immediate in front of an InstanceKlass, so 1126 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1127 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1128 // in systemDictionaryShared.cpp. 1129 Klass* klass = (Klass*)obj; 1130 if (klass->is_instance_klass()) { 1131 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1132 _rw_region.allocate(sizeof(address), BytesPerWord); 1133 } 1134 } 1135 p = _rw_region.allocate(bytes, alignment); 1136 newtop = _rw_region.top(); 1137 } 1138 memcpy(p, obj, bytes); 1139 bool isnew = _new_loc_table->put(obj, (address)p); 1140 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1141 assert(isnew, "must be"); 1142 1143 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1144 } 1145 1146 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1147 address* pp = _new_loc_table->get(ref->obj()); 1148 assert(pp != NULL, "must be"); 1149 return *pp; 1150 } 1151 1152 private: 1153 // Makes a shallow copy of visited MetaspaceObj's 1154 class ShallowCopier: public UniqueMetaspaceClosure { 1155 bool _read_only; 1156 public: 1157 ShallowCopier(bool read_only) : _read_only(read_only) {} 1158 1159 virtual void do_unique_ref(Ref* ref, bool read_only) { 1160 if (read_only == _read_only) { 1161 allocate(ref, read_only); 1162 } 1163 } 1164 }; 1165 1166 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1167 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1168 public: 1169 virtual void do_unique_ref(Ref* ref, bool read_only) { 1170 address new_loc = get_new_loc(ref); 1171 RefRelocator refer; 1172 ref->metaspace_pointers_do_at(&refer, new_loc); 1173 } 1174 }; 1175 1176 // Relocate a reference to point to its shallow copy 1177 class RefRelocator: public MetaspaceClosure { 1178 public: 1179 virtual bool do_ref(Ref* ref, bool read_only) { 1180 if (ref->not_null()) { 1181 ref->update(get_new_loc(ref)); 1182 } 1183 return false; // Do not recurse. 1184 } 1185 }; 1186 1187 #ifdef ASSERT 1188 class IsRefInArchiveChecker: public MetaspaceClosure { 1189 public: 1190 virtual bool do_ref(Ref* ref, bool read_only) { 1191 if (ref->not_null()) { 1192 char* obj = (char*)ref->obj(); 1193 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1194 "must be relocated to point to CDS archive"); 1195 } 1196 return false; // Do not recurse. 1197 } 1198 }; 1199 #endif 1200 1201 public: 1202 static void copy_and_compact() { 1203 ResourceMark rm; 1204 SortedSymbolClosure the_ssc; // StackObj 1205 _ssc = &the_ssc; 1206 1207 tty->print_cr("Scanning all metaspace objects ... "); 1208 { 1209 // allocate and shallow-copy RW objects, immediately following the MC region 1210 tty->print_cr("Allocating RW objects ... "); 1211 _mc_region.pack(&_rw_region); 1212 1213 ResourceMark rm; 1214 ShallowCopier rw_copier(false); 1215 iterate_roots(&rw_copier); 1216 } 1217 { 1218 // allocate and shallow-copy of RO object, immediately following the RW region 1219 tty->print_cr("Allocating RO objects ... "); 1220 _rw_region.pack(&_ro_region); 1221 1222 ResourceMark rm; 1223 ShallowCopier ro_copier(true); 1224 iterate_roots(&ro_copier); 1225 } 1226 { 1227 tty->print_cr("Relocating embedded pointers ... "); 1228 ResourceMark rm; 1229 ShallowCopyEmbeddedRefRelocator emb_reloc; 1230 iterate_roots(&emb_reloc); 1231 } 1232 { 1233 tty->print_cr("Relocating external roots ... "); 1234 ResourceMark rm; 1235 RefRelocator ext_reloc; 1236 iterate_roots(&ext_reloc); 1237 } 1238 1239 #ifdef ASSERT 1240 { 1241 tty->print_cr("Verifying external roots ... "); 1242 ResourceMark rm; 1243 IsRefInArchiveChecker checker; 1244 iterate_roots(&checker); 1245 } 1246 #endif 1247 1248 1249 // cleanup 1250 _ssc = NULL; 1251 } 1252 1253 // We must relocate the System::_well_known_klasses only after we have copied the 1254 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1255 // old objects which assert that their klass is the original klass. 1256 static void relocate_well_known_klasses() { 1257 { 1258 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1259 ResourceMark rm; 1260 RefRelocator ext_reloc; 1261 SystemDictionary::well_known_klasses_do(&ext_reloc); 1262 } 1263 // NOTE: after this point, we shouldn't have any globals that can reach the old 1264 // objects. 1265 1266 // We cannot use any of the objects in the heap anymore (except for the 1267 // shared strings) because their headers no longer point to valid Klasses. 1268 } 1269 1270 static void iterate_roots(MetaspaceClosure* it) { 1271 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1272 for (int i=0; i<symbols->length(); i++) { 1273 it->push(symbols->adr_at(i)); 1274 } 1275 if (_global_klass_objects != NULL) { 1276 // Need to fix up the pointers 1277 for (int i = 0; i < _global_klass_objects->length(); i++) { 1278 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1279 it->push(_global_klass_objects->adr_at(i)); 1280 } 1281 } 1282 FileMapInfo::metaspace_pointers_do(it); 1283 SystemDictionaryShared::dumptime_classes_do(it); 1284 Universe::metaspace_pointers_do(it); 1285 SymbolTable::metaspace_pointers_do(it); 1286 vmSymbols::metaspace_pointers_do(it); 1287 } 1288 1289 static Klass* get_relocated_klass(Klass* orig_klass) { 1290 assert(DumpSharedSpaces, "dump time only"); 1291 address* pp = _new_loc_table->get((address)orig_klass); 1292 assert(pp != NULL, "must be"); 1293 Klass* klass = (Klass*)(*pp); 1294 assert(klass->is_klass(), "must be"); 1295 return klass; 1296 } 1297 }; 1298 1299 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1300 SortedSymbolClosure* ArchiveCompactor::_ssc; 1301 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1302 1303 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, 1304 DumpRegion* dump_region, bool read_only, bool allow_exec) { 1305 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1306 } 1307 1308 void VM_PopulateDumpSharedSpace::dump_symbols() { 1309 tty->print_cr("Dumping symbol table ..."); 1310 1311 NOT_PRODUCT(SymbolTable::verify()); 1312 SymbolTable::write_to_archive(); 1313 } 1314 1315 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1316 ArchiveCompactor::OtherROAllocMark mark; 1317 1318 tty->print("Removing java_mirror ... "); 1319 if (!HeapShared::is_heap_object_archiving_allowed()) { 1320 clear_basic_type_mirrors(); 1321 } 1322 remove_java_mirror_in_classes(); 1323 tty->print_cr("done. "); 1324 1325 SystemDictionaryShared::write_to_archive(); 1326 1327 char* start = _ro_region.top(); 1328 1329 // Write the other data to the output array. 1330 WriteClosure wc(&_ro_region); 1331 MetaspaceShared::serialize(&wc); 1332 1333 // Write the bitmaps for patching the archive heap regions 1334 dump_archive_heap_oopmaps(); 1335 1336 return start; 1337 } 1338 1339 void VM_PopulateDumpSharedSpace::doit() { 1340 // We should no longer allocate anything from the metaspace, so that: 1341 // 1342 // (1) Metaspace::allocate might trigger GC if we have run out of 1343 // committed metaspace, but we can't GC because we're running 1344 // in the VM thread. 1345 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1346 Metaspace::freeze(); 1347 1348 Thread* THREAD = VMThread::vm_thread(); 1349 1350 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1351 1352 NOT_PRODUCT(SystemDictionary::verify();) 1353 // The following guarantee is meant to ensure that no loader constraints 1354 // exist yet, since the constraints table is not shared. This becomes 1355 // more important now that we don't re-initialize vtables/itables for 1356 // shared classes at runtime, where constraints were previously created. 1357 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1358 "loader constraints are not saved"); 1359 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1360 "placeholders are not saved"); 1361 1362 // At this point, many classes have been loaded. 1363 // Gather systemDictionary classes in a global array and do everything to 1364 // that so we don't have to walk the SystemDictionary again. 1365 SystemDictionaryShared::check_excluded_classes(); 1366 _global_klass_objects = new GrowableArray<Klass*>(1000); 1367 CollectClassesClosure collect_classes; 1368 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1369 1370 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1371 { 1372 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1373 for (int i = 0; i < _global_klass_objects->length(); i++) { 1374 Klass* k = _global_klass_objects->at(i); 1375 if (k->is_instance_klass()) { 1376 num_inst ++; 1377 } else if (k->is_objArray_klass()) { 1378 num_obj_array ++; 1379 } else { 1380 assert(k->is_typeArray_klass(), "sanity"); 1381 num_type_array ++; 1382 } 1383 } 1384 tty->print_cr(" instance classes = %5d", num_inst); 1385 tty->print_cr(" obj array classes = %5d", num_obj_array); 1386 tty->print_cr(" type array classes = %5d", num_type_array); 1387 } 1388 1389 // Ensure the ConstMethods won't be modified at run-time 1390 tty->print("Updating ConstMethods ... "); 1391 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1392 tty->print_cr("done. "); 1393 1394 // Remove all references outside the metadata 1395 tty->print("Removing unshareable information ... "); 1396 remove_unshareable_in_classes(); 1397 tty->print_cr("done. "); 1398 1399 ArchiveCompactor::initialize(); 1400 ArchiveCompactor::copy_and_compact(); 1401 1402 dump_symbols(); 1403 1404 // Dump supported java heap objects 1405 _closed_archive_heap_regions = NULL; 1406 _open_archive_heap_regions = NULL; 1407 dump_java_heap_objects(); 1408 1409 ArchiveCompactor::relocate_well_known_klasses(); 1410 1411 char* read_only_tables_start = dump_read_only_tables(); 1412 _ro_region.pack(&_md_region); 1413 1414 char* vtbl_list = _md_region.top(); 1415 MetaspaceShared::allocate_cpp_vtable_clones(); 1416 _md_region.pack(&_od_region); 1417 1418 // Relocate the archived class file data into the od region 1419 relocate_cached_class_file(); 1420 _od_region.pack(); 1421 1422 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size 1423 // is just the spaces between the two ends. 1424 size_t core_spaces_size = _od_region.end() - _mc_region.base(); 1425 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), 1426 "should already be aligned"); 1427 1428 // During patching, some virtual methods may be called, so at this point 1429 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1430 MetaspaceShared::patch_cpp_vtable_pointers(); 1431 1432 // The vtable clones contain addresses of the current process. 1433 // We don't want to write these addresses into the archive. 1434 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1435 1436 // Create and write the archive file that maps the shared spaces. 1437 1438 FileMapInfo* mapinfo = new FileMapInfo(); 1439 mapinfo->populate_header(os::vm_allocation_granularity()); 1440 mapinfo->set_read_only_tables_start(read_only_tables_start); 1441 mapinfo->set_misc_data_patching_start(vtbl_list); 1442 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); 1443 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); 1444 mapinfo->set_core_spaces_size(core_spaces_size); 1445 1446 for (int pass=1; pass<=2; pass++) { 1447 bool print_archive_log = (pass==1); 1448 if (pass == 1) { 1449 // The first pass doesn't actually write the data to disk. All it 1450 // does is to update the fields in the mapinfo->_header. 1451 } else { 1452 // After the first pass, the contents of mapinfo->_header are finalized, 1453 // so we can compute the header's CRC, and write the contents of the header 1454 // and the regions into disk. 1455 mapinfo->open_for_write(); 1456 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1457 } 1458 mapinfo->write_header(); 1459 1460 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1461 // so it needs to be read/write. 1462 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1463 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1464 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1465 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1466 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); 1467 1468 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1469 _closed_archive_heap_regions, 1470 _closed_archive_heap_oopmaps, 1471 MetaspaceShared::first_closed_archive_heap_region, 1472 MetaspaceShared::max_closed_archive_heap_region, 1473 print_archive_log); 1474 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1475 _open_archive_heap_regions, 1476 _open_archive_heap_oopmaps, 1477 MetaspaceShared::first_open_archive_heap_region, 1478 MetaspaceShared::max_open_archive_heap_region, 1479 print_archive_log); 1480 } 1481 1482 mapinfo->close(); 1483 1484 // Restore the vtable in case we invoke any virtual methods. 1485 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1486 1487 print_region_stats(); 1488 1489 if (log_is_enabled(Info, cds)) { 1490 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1491 int(_mc_region.used()), int(_md_region.used())); 1492 } 1493 1494 if (PrintSystemDictionaryAtExit) { 1495 SystemDictionary::print(); 1496 } 1497 // There may be other pending VM operations that operate on the InstanceKlasses, 1498 // which will fail because InstanceKlasses::remove_unshareable_info() 1499 // has been called. Forget these operations and exit the VM directly. 1500 vm_direct_exit(0); 1501 } 1502 1503 void VM_PopulateDumpSharedSpace::print_region_stats() { 1504 // Print statistics of all the regions 1505 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1506 _mc_region.reserved() + _md_region.reserved() + 1507 _od_region.reserved() + 1508 _total_closed_archive_region_size + 1509 _total_open_archive_region_size; 1510 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1511 _mc_region.used() + _md_region.used() + 1512 _od_region.used() + 1513 _total_closed_archive_region_size + 1514 _total_open_archive_region_size; 1515 const double total_u_perc = percent_of(total_bytes, total_reserved); 1516 1517 _mc_region.print(total_reserved); 1518 _rw_region.print(total_reserved); 1519 _ro_region.print(total_reserved); 1520 _md_region.print(total_reserved); 1521 _od_region.print(total_reserved); 1522 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1523 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1524 1525 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1526 total_bytes, total_reserved, total_u_perc); 1527 } 1528 1529 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1530 const char *name, const size_t total_size) { 1531 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1532 for (int i = 0; i < arr_len; i++) { 1533 char* start = (char*)heap_mem->at(i).start(); 1534 size_t size = heap_mem->at(i).byte_size(); 1535 char* top = start + size; 1536 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1537 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1538 1539 } 1540 } 1541 1542 // Update a Java object to point its Klass* to the new location after 1543 // shared archive has been compacted. 1544 void MetaspaceShared::relocate_klass_ptr(oop o) { 1545 assert(DumpSharedSpaces, "sanity"); 1546 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1547 o->set_klass(k); 1548 } 1549 1550 Klass* MetaspaceShared::get_relocated_klass(Klass *k) { 1551 assert(DumpSharedSpaces, "sanity"); 1552 return ArchiveCompactor::get_relocated_klass(k); 1553 } 1554 1555 class LinkSharedClassesClosure : public KlassClosure { 1556 Thread* THREAD; 1557 bool _made_progress; 1558 public: 1559 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1560 1561 void reset() { _made_progress = false; } 1562 bool made_progress() const { return _made_progress; } 1563 1564 void do_klass(Klass* k) { 1565 if (k->is_instance_klass()) { 1566 InstanceKlass* ik = InstanceKlass::cast(k); 1567 // Link the class to cause the bytecodes to be rewritten and the 1568 // cpcache to be created. Class verification is done according 1569 // to -Xverify setting. 1570 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1571 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1572 1573 ik->constants()->resolve_class_constants(THREAD); 1574 } 1575 } 1576 }; 1577 1578 class CheckSharedClassesClosure : public KlassClosure { 1579 bool _made_progress; 1580 public: 1581 CheckSharedClassesClosure() : _made_progress(false) {} 1582 1583 void reset() { _made_progress = false; } 1584 bool made_progress() const { return _made_progress; } 1585 void do_klass(Klass* k) { 1586 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1587 _made_progress = true; 1588 } 1589 } 1590 }; 1591 1592 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1593 // We need to iterate because verification may cause additional classes 1594 // to be loaded. 1595 LinkSharedClassesClosure link_closure(THREAD); 1596 do { 1597 link_closure.reset(); 1598 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1599 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1600 } while (link_closure.made_progress()); 1601 1602 if (_has_error_classes) { 1603 // Mark all classes whose super class or interfaces failed verification. 1604 CheckSharedClassesClosure check_closure; 1605 do { 1606 // Not completely sure if we need to do this iteratively. Anyway, 1607 // we should come here only if there are unverifiable classes, which 1608 // shouldn't happen in normal cases. So better safe than sorry. 1609 check_closure.reset(); 1610 ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); 1611 } while (check_closure.made_progress()); 1612 } 1613 } 1614 1615 void MetaspaceShared::prepare_for_dumping() { 1616 Arguments::check_unsupported_dumping_properties(); 1617 ClassLoader::initialize_shared_path(); 1618 } 1619 1620 // Preload classes from a list, populate the shared spaces and dump to a 1621 // file. 1622 void MetaspaceShared::preload_and_dump(TRAPS) { 1623 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1624 ResourceMark rm; 1625 char class_list_path_str[JVM_MAXPATHLEN]; 1626 // Preload classes to be shared. 1627 // Should use some os:: method rather than fopen() here. aB. 1628 const char* class_list_path; 1629 if (SharedClassListFile == NULL) { 1630 // Construct the path to the class list (in jre/lib) 1631 // Walk up two directories from the location of the VM and 1632 // optionally tack on "lib" (depending on platform) 1633 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1634 for (int i = 0; i < 3; i++) { 1635 char *end = strrchr(class_list_path_str, *os::file_separator()); 1636 if (end != NULL) *end = '\0'; 1637 } 1638 int class_list_path_len = (int)strlen(class_list_path_str); 1639 if (class_list_path_len >= 3) { 1640 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1641 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1642 jio_snprintf(class_list_path_str + class_list_path_len, 1643 sizeof(class_list_path_str) - class_list_path_len, 1644 "%slib", os::file_separator()); 1645 class_list_path_len += 4; 1646 } 1647 } 1648 } 1649 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1650 jio_snprintf(class_list_path_str + class_list_path_len, 1651 sizeof(class_list_path_str) - class_list_path_len, 1652 "%sclasslist", os::file_separator()); 1653 } 1654 class_list_path = class_list_path_str; 1655 } else { 1656 class_list_path = SharedClassListFile; 1657 } 1658 1659 tty->print_cr("Loading classes to share ..."); 1660 _has_error_classes = false; 1661 int class_count = preload_classes(class_list_path, THREAD); 1662 if (ExtraSharedClassListFile) { 1663 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1664 } 1665 tty->print_cr("Loading classes to share: done."); 1666 1667 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1668 1669 if (SharedArchiveConfigFile) { 1670 tty->print_cr("Reading extra data from %s ...", SharedArchiveConfigFile); 1671 read_extra_data(SharedArchiveConfigFile, THREAD); 1672 } 1673 tty->print_cr("Reading extra data: done."); 1674 1675 HeapShared::init_subgraph_entry_fields(THREAD); 1676 1677 // Rewrite and link classes 1678 tty->print_cr("Rewriting and linking classes ..."); 1679 1680 // Link any classes which got missed. This would happen if we have loaded classes that 1681 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1682 // fails verification, all other interfaces that were not specified in the classlist but 1683 // are implemented by K are not verified. 1684 link_and_cleanup_shared_classes(CATCH); 1685 tty->print_cr("Rewriting and linking classes: done"); 1686 1687 VM_PopulateDumpSharedSpace op; 1688 VMThread::execute(&op); 1689 } 1690 } 1691 1692 1693 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1694 ClassListParser parser(class_list_path); 1695 int class_count = 0; 1696 1697 while (parser.parse_one_line()) { 1698 Klass* klass = parser.load_current_class(THREAD); 1699 if (HAS_PENDING_EXCEPTION) { 1700 if (klass == NULL && 1701 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1702 // print a warning only when the pending exception is class not found 1703 tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name()); 1704 } 1705 CLEAR_PENDING_EXCEPTION; 1706 } 1707 if (klass != NULL) { 1708 if (log_is_enabled(Trace, cds)) { 1709 ResourceMark rm; 1710 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1711 } 1712 1713 if (klass->is_instance_klass()) { 1714 InstanceKlass* ik = InstanceKlass::cast(klass); 1715 1716 // Link the class to cause the bytecodes to be rewritten and the 1717 // cpcache to be created. The linking is done as soon as classes 1718 // are loaded in order that the related data structures (klass and 1719 // cpCache) are located together. 1720 try_link_class(ik, THREAD); 1721 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1722 } 1723 1724 class_count++; 1725 } 1726 } 1727 1728 return class_count; 1729 } 1730 1731 // Returns true if the class's status has changed 1732 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1733 assert(DumpSharedSpaces, "should only be called during dumping"); 1734 if (ik->init_state() < InstanceKlass::linked) { 1735 bool saved = BytecodeVerificationLocal; 1736 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1737 // The verification decision is based on BytecodeVerificationRemote 1738 // for non-system classes. Since we are using the NULL classloader 1739 // to load non-system classes for customized class loaders during dumping, 1740 // we need to temporarily change BytecodeVerificationLocal to be the same as 1741 // BytecodeVerificationRemote. Note this can cause the parent system 1742 // classes also being verified. The extra overhead is acceptable during 1743 // dumping. 1744 BytecodeVerificationLocal = BytecodeVerificationRemote; 1745 } 1746 ik->link_class(THREAD); 1747 if (HAS_PENDING_EXCEPTION) { 1748 ResourceMark rm; 1749 tty->print_cr("Preload Warning: Verification failed for %s", 1750 ik->external_name()); 1751 CLEAR_PENDING_EXCEPTION; 1752 ik->set_in_error_state(); 1753 _has_error_classes = true; 1754 } 1755 BytecodeVerificationLocal = saved; 1756 return true; 1757 } else { 1758 return false; 1759 } 1760 } 1761 1762 #if INCLUDE_CDS_JAVA_HEAP 1763 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1764 // The closed and open archive heap space has maximum two regions. 1765 // See FileMapInfo::write_archive_heap_regions() for details. 1766 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1767 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1768 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 1769 _open_archive_heap_regions); 1770 ArchiveCompactor::OtherROAllocMark mark; 1771 HeapShared::write_subgraph_info_table(); 1772 } 1773 1774 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 1775 if (HeapShared::is_heap_object_archiving_allowed()) { 1776 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1777 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 1778 1779 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1780 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 1781 } 1782 } 1783 1784 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1785 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 1786 for (int i=0; i<regions->length(); i++) { 1787 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 1788 size_t size_in_bits = oopmap.size(); 1789 size_t size_in_bytes = oopmap.size_in_bytes(); 1790 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); 1791 oopmap.write_to(buffer, size_in_bytes); 1792 log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 1793 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 1794 p2i(buffer), size_in_bytes, 1795 p2i(regions->at(i).start()), regions->at(i).byte_size()); 1796 1797 ArchiveHeapOopmapInfo info; 1798 info._oopmap = (address)buffer; 1799 info._oopmap_size_in_bits = size_in_bits; 1800 oopmaps->append(info); 1801 } 1802 } 1803 #endif // INCLUDE_CDS_JAVA_HEAP 1804 1805 // Closure for serializing initialization data in from a data area 1806 // (ptr_array) read from the shared file. 1807 1808 class ReadClosure : public SerializeClosure { 1809 private: 1810 intptr_t** _ptr_array; 1811 1812 inline intptr_t nextPtr() { 1813 return *(*_ptr_array)++; 1814 } 1815 1816 public: 1817 ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; } 1818 1819 void do_ptr(void** p) { 1820 assert(*p == NULL, "initializing previous initialized pointer."); 1821 intptr_t obj = nextPtr(); 1822 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1823 "hit tag while initializing ptrs."); 1824 *p = (void*)obj; 1825 } 1826 1827 void do_u4(u4* p) { 1828 intptr_t obj = nextPtr(); 1829 *p = (u4)(uintx(obj)); 1830 } 1831 1832 void do_tag(int tag) { 1833 int old_tag; 1834 old_tag = (int)(intptr_t)nextPtr(); 1835 // do_int(&old_tag); 1836 assert(tag == old_tag, "old tag doesn't match"); 1837 FileMapInfo::assert_mark(tag == old_tag); 1838 } 1839 1840 void do_oop(oop *p) { 1841 narrowOop o = (narrowOop)nextPtr(); 1842 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 1843 p = NULL; 1844 } else { 1845 assert(HeapShared::is_heap_object_archiving_allowed(), 1846 "Archived heap object is not allowed"); 1847 assert(HeapShared::open_archive_heap_region_mapped(), 1848 "Open archive heap region is not mapped"); 1849 *p = HeapShared::decode_from_archive(o); 1850 } 1851 } 1852 1853 void do_region(u_char* start, size_t size) { 1854 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1855 assert(size % sizeof(intptr_t) == 0, "bad size"); 1856 do_tag((int)size); 1857 while (size > 0) { 1858 *(intptr_t*)start = nextPtr(); 1859 start += sizeof(intptr_t); 1860 size -= sizeof(intptr_t); 1861 } 1862 } 1863 1864 bool reading() const { return true; } 1865 }; 1866 1867 // Return true if given address is in the misc data region 1868 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 1869 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 1870 } 1871 1872 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 1873 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 1874 return true; 1875 } 1876 return false; 1877 } 1878 1879 // Map shared spaces at requested addresses and return if succeeded. 1880 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { 1881 size_t image_alignment = mapinfo->alignment(); 1882 1883 #ifndef _WINDOWS 1884 // Map in the shared memory and then map the regions on top of it. 1885 // On Windows, don't map the memory here because it will cause the 1886 // mappings of the regions to fail. 1887 ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); 1888 if (!shared_rs.is_reserved()) return false; 1889 #endif 1890 1891 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 1892 1893 char* ro_base = NULL; char* ro_top; 1894 char* rw_base = NULL; char* rw_top; 1895 char* mc_base = NULL; char* mc_top; 1896 char* md_base = NULL; char* md_top; 1897 char* od_base = NULL; char* od_top; 1898 1899 // Map each shared region 1900 if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL && 1901 (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL && 1902 (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL && 1903 (md_base = mapinfo->map_region(md, &md_top)) != NULL && 1904 (od_base = mapinfo->map_region(od, &od_top)) != NULL && 1905 (image_alignment == (size_t)os::vm_allocation_granularity()) && 1906 mapinfo->validate_shared_path_table()) { 1907 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for 1908 // fast checking in MetaspaceShared::is_in_shared_metaspace() and 1909 // MetaspaceObj::is_shared(). 1910 // 1911 // We require that mc->rw->ro->md->od to be laid out consecutively, with no 1912 // gaps between them. That way, we can ensure that the OS won't be able to 1913 // allocate any new memory spaces inside _shared_metaspace_{base,top}, which 1914 // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace(). 1915 assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be"); 1916 assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be"); 1917 assert(mc_top == rw_base, "must be"); 1918 assert(rw_top == ro_base, "must be"); 1919 assert(ro_top == md_base, "must be"); 1920 assert(md_top == od_base, "must be"); 1921 1922 _core_spaces_size = mapinfo->core_spaces_size(); 1923 MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)od_top); 1924 return true; 1925 } else { 1926 // If there was a failure in mapping any of the spaces, unmap the ones 1927 // that succeeded 1928 if (ro_base != NULL) mapinfo->unmap_region(ro); 1929 if (rw_base != NULL) mapinfo->unmap_region(rw); 1930 if (mc_base != NULL) mapinfo->unmap_region(mc); 1931 if (md_base != NULL) mapinfo->unmap_region(md); 1932 if (od_base != NULL) mapinfo->unmap_region(od); 1933 #ifndef _WINDOWS 1934 // Release the entire mapped region 1935 shared_rs.release(); 1936 #endif 1937 // If -Xshare:on is specified, print out the error message and exit VM, 1938 // otherwise, set UseSharedSpaces to false and continue. 1939 if (RequireSharedSpaces || PrintSharedArchiveAndExit) { 1940 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); 1941 } else { 1942 FLAG_SET_DEFAULT(UseSharedSpaces, false); 1943 } 1944 return false; 1945 } 1946 } 1947 1948 // Read the miscellaneous data from the shared file, and 1949 // serialize it out to its various destinations. 1950 1951 void MetaspaceShared::initialize_shared_spaces() { 1952 FileMapInfo *mapinfo = FileMapInfo::current_info(); 1953 _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers(); 1954 _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size(); 1955 // _core_spaces_size is loaded from the shared archive immediatelly after mapping 1956 assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity"); 1957 char* buffer = mapinfo->misc_data_patching_start(); 1958 clone_cpp_vtables((intptr_t*)buffer); 1959 1960 // The rest of the data is now stored in the RW region 1961 buffer = mapinfo->read_only_tables_start(); 1962 1963 // Verify various attributes of the archive, plus initialize the 1964 // shared string/symbol tables 1965 intptr_t* array = (intptr_t*)buffer; 1966 ReadClosure rc(&array); 1967 serialize(&rc); 1968 1969 // Initialize the run-time symbol table. 1970 SymbolTable::create_table(); 1971 1972 mapinfo->patch_archived_heap_embedded_pointers(); 1973 1974 // Close the mapinfo file 1975 mapinfo->close(); 1976 1977 if (PrintSharedArchiveAndExit) { 1978 if (PrintSharedDictionary) { 1979 tty->print_cr("\nShared classes:\n"); 1980 SystemDictionaryShared::print_on(tty); 1981 } 1982 if (_archive_loading_failed) { 1983 tty->print_cr("archive is invalid"); 1984 vm_exit(1); 1985 } else { 1986 tty->print_cr("archive is valid"); 1987 vm_exit(0); 1988 } 1989 } 1990 } 1991 1992 // JVM/TI RedefineClasses() support: 1993 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 1994 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1995 1996 if (UseSharedSpaces) { 1997 // remap the shared readonly space to shared readwrite, private 1998 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1999 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2000 return false; 2001 } 2002 _remapped_readwrite = true; 2003 } 2004 return true; 2005 } 2006 2007 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2008 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2009 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2010 // or so. 2011 _mc_region.print_out_of_space_msg(name, needed_bytes); 2012 _rw_region.print_out_of_space_msg(name, needed_bytes); 2013 _ro_region.print_out_of_space_msg(name, needed_bytes); 2014 _md_region.print_out_of_space_msg(name, needed_bytes); 2015 _od_region.print_out_of_space_msg(name, needed_bytes); 2016 2017 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2018 "Please reduce the number of shared classes."); 2019 }