1 /* 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/classListParser.hpp" 29 #include "classfile/classLoaderExt.hpp" 30 #include "classfile/dictionary.hpp" 31 #include "classfile/loaderConstraints.hpp" 32 #include "classfile/javaClasses.inline.hpp" 33 #include "classfile/placeholders.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/systemDictionary.hpp" 37 #include "classfile/systemDictionaryShared.hpp" 38 #include "code/codeCache.hpp" 39 #include "interpreter/bytecodeStream.hpp" 40 #include "interpreter/bytecodes.hpp" 41 #include "logging/log.hpp" 42 #include "logging/logMessage.hpp" 43 #include "memory/filemap.hpp" 44 #include "memory/heapShared.inline.hpp" 45 #include "memory/metaspace.hpp" 46 #include "memory/metaspaceClosure.hpp" 47 #include "memory/metaspaceShared.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/instanceClassLoaderKlass.hpp" 51 #include "oops/instanceMirrorKlass.hpp" 52 #include "oops/instanceRefKlass.hpp" 53 #include "oops/objArrayKlass.hpp" 54 #include "oops/objArrayOop.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "oops/typeArrayKlass.hpp" 57 #include "prims/jvmtiRedefineClasses.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/os.hpp" 60 #include "runtime/safepointVerifiers.hpp" 61 #include "runtime/signature.hpp" 62 #include "runtime/timerTrace.hpp" 63 #include "runtime/vmThread.hpp" 64 #include "runtime/vmOperations.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/bitMap.hpp" 67 #include "utilities/defaultStream.hpp" 68 #include "utilities/hashtable.inline.hpp" 69 #if INCLUDE_G1GC 70 #include "gc/g1/g1CollectedHeap.hpp" 71 #endif 72 73 ReservedSpace MetaspaceShared::_shared_rs; 74 VirtualSpace MetaspaceShared::_shared_vs; 75 MetaspaceSharedStats MetaspaceShared::_stats; 76 bool MetaspaceShared::_has_error_classes; 77 bool MetaspaceShared::_archive_loading_failed = false; 78 bool MetaspaceShared::_remapped_readwrite = false; 79 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; 80 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; 81 size_t MetaspaceShared::_core_spaces_size = 0; 82 83 // The CDS archive is divided into the following regions: 84 // mc - misc code (the method entry trampolines) 85 // rw - read-write metadata 86 // ro - read-only metadata and read-only tables 87 // md - misc data (the c++ vtables) 88 // od - optional data (original class files) 89 // 90 // ca0 - closed archive heap space #0 91 // ca1 - closed archive heap space #1 (may be empty) 92 // oa0 - open archive heap space #0 93 // oa1 - open archive heap space #1 (may be empty) 94 // 95 // The mc, rw, ro, md and od regions are linearly allocated, starting from 96 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions 97 // are page-aligned, and there's no gap between any consecutive regions. 98 // 99 // These 5 regions are populated in the following steps: 100 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 101 // temporarily allocated outside of the shared regions. Only the method entry 102 // trampolines are written into the mc region. 103 // [2] ArchiveCompactor copies RW metadata into the rw region. 104 // [3] ArchiveCompactor copies RO metadata into the ro region. 105 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 106 // are copied into the ro region as read-only tables. 107 // [5] C++ vtables are copied into the md region. 108 // [6] Original class files are copied into the od region. 109 // 110 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. 111 // Their layout is independent of the other 5 regions. 112 113 class DumpRegion { 114 private: 115 const char* _name; 116 char* _base; 117 char* _top; 118 char* _end; 119 bool _is_packed; 120 121 char* expand_top_to(char* newtop) { 122 assert(is_allocatable(), "must be initialized and not packed"); 123 assert(newtop >= _top, "must not grow backwards"); 124 if (newtop > _end) { 125 MetaspaceShared::report_out_of_space(_name, newtop - _top); 126 ShouldNotReachHere(); 127 } 128 uintx delta = MetaspaceShared::object_delta_uintx(newtop); 129 if (delta > MAX_SHARED_DELTA) { 130 // This is just a sanity check and should not appear in any real world usage. This 131 // happens only if you allocate more than 2GB of shared objects and would require 132 // millions of shared classes. 133 vm_exit_during_initialization("Out of memory in the CDS archive", 134 "Please reduce the number of shared classes."); 135 } 136 137 MetaspaceShared::commit_shared_space_to(newtop); 138 _top = newtop; 139 return _top; 140 } 141 142 public: 143 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} 144 145 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { 146 char* p = (char*)align_up(_top, alignment); 147 char* newtop = p + align_up(num_bytes, alignment); 148 expand_top_to(newtop); 149 memset(p, 0, newtop - p); 150 return p; 151 } 152 153 void append_intptr_t(intptr_t n) { 154 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 155 intptr_t *p = (intptr_t*)_top; 156 char* newtop = _top + sizeof(intptr_t); 157 expand_top_to(newtop); 158 *p = n; 159 } 160 161 char* base() const { return _base; } 162 char* top() const { return _top; } 163 char* end() const { return _end; } 164 size_t reserved() const { return _end - _base; } 165 size_t used() const { return _top - _base; } 166 bool is_packed() const { return _is_packed; } 167 bool is_allocatable() const { 168 return !is_packed() && _base != NULL; 169 } 170 171 void print(size_t total_bytes) const { 172 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 173 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); 174 } 175 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 176 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 177 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 178 if (strcmp(_name, failing_region) == 0) { 179 tty->print_cr(" required = %d", int(needed_bytes)); 180 } else { 181 tty->cr(); 182 } 183 } 184 185 void init(const ReservedSpace* rs) { 186 _base = _top = rs->base(); 187 _end = rs->end(); 188 } 189 void init(char* b, char* t, char* e) { 190 _base = b; 191 _top = t; 192 _end = e; 193 } 194 195 void pack(DumpRegion* next = NULL) { 196 assert(!is_packed(), "sanity"); 197 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 198 _is_packed = true; 199 if (next != NULL) { 200 next->_base = next->_top = this->_end; 201 next->_end = MetaspaceShared::shared_rs()->end(); 202 } 203 } 204 bool contains(char* p) { 205 return base() <= p && p < top(); 206 } 207 }; 208 209 210 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); 211 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; 212 213 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 214 return _mc_region.allocate(num_bytes); 215 } 216 217 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 218 return _ro_region.allocate(num_bytes); 219 } 220 221 char* MetaspaceShared::read_only_space_top() { 222 return _ro_region.top(); 223 } 224 225 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 226 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 227 228 // If using shared space, open the file that contains the shared space 229 // and map in the memory before initializing the rest of metaspace (so 230 // the addresses don't conflict) 231 address cds_address = NULL; 232 FileMapInfo* mapinfo = new FileMapInfo(); 233 234 // Open the shared archive file, read and validate the header. If 235 // initialization fails, shared spaces [UseSharedSpaces] are 236 // disabled and the file is closed. 237 // Map in spaces now also 238 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) { 239 size_t cds_total = core_spaces_size(); 240 cds_address = (address)mapinfo->region_addr(0); 241 #ifdef _LP64 242 if (Metaspace::using_class_space()) { 243 char* cds_end = (char*)(cds_address + cds_total); 244 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment()); 245 // If UseCompressedClassPointers is set then allocate the metaspace area 246 // above the heap and above the CDS area (if it exists). 247 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 248 // map_heap_regions() compares the current narrow oop and klass encodings 249 // with the archived ones, so it must be done after all encodings are determined. 250 mapinfo->map_heap_regions(); 251 } 252 Universe::set_narrow_klass_range(CompressedClassSpaceSize); 253 #endif // _LP64 254 } else { 255 assert(!mapinfo->is_open() && !UseSharedSpaces, 256 "archive file not closed or shared spaces not disabled."); 257 } 258 } 259 260 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 261 assert(DumpSharedSpaces, "should be called for dump time only"); 262 const size_t reserve_alignment = Metaspace::reserve_alignment(); 263 bool large_pages = false; // No large pages when dumping the CDS archive. 264 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 265 266 #ifdef _LP64 267 // On 64-bit VM, the heap and class space layout will be the same as if 268 // you're running in -Xshare:on mode: 269 // 270 // +-- SharedBaseAddress (default = 0x800000000) 271 // v 272 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 273 // | Heap | Archive | | MC | RW | RO | MD | OD | class space | 274 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 275 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->| 276 // 277 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 278 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 279 #else 280 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 281 size_t cds_total = align_down(256*M, reserve_alignment); 282 #endif 283 284 // First try to reserve the space at the specified SharedBaseAddress. 285 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); 286 if (_shared_rs.is_reserved()) { 287 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 288 } else { 289 // Get a mmap region anywhere if the SharedBaseAddress fails. 290 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); 291 } 292 if (!_shared_rs.is_reserved()) { 293 vm_exit_during_initialization("Unable to reserve memory for shared space", 294 err_msg(SIZE_FORMAT " bytes.", cds_total)); 295 } 296 297 #ifdef _LP64 298 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 299 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 300 // will store Klasses into this space. 301 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 302 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 303 // then the RO parts. 304 305 assert(UseCompressedOops && UseCompressedClassPointers, 306 "UseCompressedOops and UseCompressedClassPointers must be set"); 307 308 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 309 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 310 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 311 _shared_rs = _shared_rs.first_part(max_archive_size); 312 313 // Set up compress class pointers. 314 Universe::set_narrow_klass_base((address)_shared_rs.base()); 315 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 316 // with AOT. 317 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 318 // Set the range of klass addresses to 4GB. 319 Universe::set_narrow_klass_range(cds_total); 320 321 Metaspace::initialize_class_space(tmp_class_space); 322 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 323 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 324 325 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 326 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 327 #endif 328 329 // Start with 0 committed bytes. The memory will be committed as needed by 330 // MetaspaceShared::commit_shared_space_to(). 331 if (!_shared_vs.initialize(_shared_rs, 0)) { 332 vm_exit_during_initialization("Unable to allocate memory for shared space"); 333 } 334 335 _mc_region.init(&_shared_rs); 336 SharedBaseAddress = (size_t)_shared_rs.base(); 337 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 338 _shared_rs.size(), p2i(_shared_rs.base())); 339 } 340 341 // Called by universe_post_init() 342 void MetaspaceShared::post_initialize(TRAPS) { 343 if (UseSharedSpaces) { 344 int size = FileMapInfo::get_number_of_shared_paths(); 345 if (size > 0) { 346 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 347 FileMapHeader* header = FileMapInfo::current_info()->header(); 348 ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index); 349 ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index); 350 } 351 } 352 } 353 354 static GrowableArray<Handle>* _extra_interned_strings = NULL; 355 356 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 357 _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); 358 359 HashtableTextDump reader(filename); 360 reader.check_version("VERSION: 1.0"); 361 362 while (reader.remain() > 0) { 363 int utf8_length; 364 int prefix_type = reader.scan_prefix(&utf8_length); 365 ResourceMark rm(THREAD); 366 if (utf8_length == 0x7fffffff) { 367 // buf_len will overflown 32-bit value. 368 vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); 369 } 370 int buf_len = utf8_length+1; 371 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); 372 reader.get_utf8(utf8_buffer, utf8_length); 373 utf8_buffer[utf8_length] = '\0'; 374 375 if (prefix_type == HashtableTextDump::SymbolPrefix) { 376 SymbolTable::new_permanent_symbol(utf8_buffer, THREAD); 377 } else{ 378 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 379 oop s = StringTable::intern(utf8_buffer, THREAD); 380 381 if (HAS_PENDING_EXCEPTION) { 382 log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d", 383 reader.last_line_no(), utf8_length); 384 CLEAR_PENDING_EXCEPTION; 385 } else { 386 #if INCLUDE_G1GC 387 if (UseG1GC) { 388 typeArrayOop body = java_lang_String::value(s); 389 const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); 390 if (hr->is_humongous()) { 391 // Don't keep it alive, so it will be GC'ed before we dump the strings, in order 392 // to maximize free heap space and minimize fragmentation. 393 log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d", 394 reader.last_line_no(), utf8_length); 395 continue; 396 } 397 } 398 #endif 399 // Interned strings are GC'ed if there are no references to it, so let's 400 // add a reference to keep this string alive. 401 assert(s != NULL, "must succeed"); 402 Handle h(THREAD, s); 403 _extra_interned_strings->append(h); 404 } 405 } 406 } 407 } 408 409 void MetaspaceShared::commit_shared_space_to(char* newtop) { 410 assert(DumpSharedSpaces, "dump-time only"); 411 char* base = _shared_rs.base(); 412 size_t need_committed_size = newtop - base; 413 size_t has_committed_size = _shared_vs.committed_size(); 414 if (need_committed_size < has_committed_size) { 415 return; 416 } 417 418 size_t min_bytes = need_committed_size - has_committed_size; 419 size_t preferred_bytes = 1 * M; 420 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 421 422 size_t commit = MAX2(min_bytes, preferred_bytes); 423 assert(commit <= uncommitted, "sanity"); 424 425 bool result = _shared_vs.expand_by(commit, false); 426 if (!result) { 427 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 428 need_committed_size)); 429 } 430 431 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 432 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 433 } 434 435 // Read/write a data stream for restoring/preserving metadata pointers and 436 // miscellaneous data from/to the shared archive file. 437 438 void MetaspaceShared::serialize(SerializeClosure* soc) { 439 int tag = 0; 440 soc->do_tag(--tag); 441 442 // Verify the sizes of various metadata in the system. 443 soc->do_tag(sizeof(Method)); 444 soc->do_tag(sizeof(ConstMethod)); 445 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 446 soc->do_tag(sizeof(ConstantPool)); 447 soc->do_tag(sizeof(ConstantPoolCache)); 448 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 449 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 450 soc->do_tag(sizeof(Symbol)); 451 452 // Dump/restore miscellaneous metadata. 453 Universe::serialize(soc); 454 soc->do_tag(--tag); 455 456 // Dump/restore references to commonly used names and signatures. 457 vmSymbols::serialize(soc); 458 soc->do_tag(--tag); 459 460 // Dump/restore the symbol/string/subgraph_info tables 461 SymbolTable::serialize_shared_table_header(soc); 462 StringTable::serialize_shared_table_header(soc); 463 HeapShared::serialize_subgraph_info_table_header(soc); 464 SystemDictionaryShared::serialize_dictionary_headers(soc); 465 466 JavaClasses::serialize_offsets(soc); 467 InstanceMirrorKlass::serialize_offsets(soc); 468 soc->do_tag(--tag); 469 470 soc->do_tag(666); 471 } 472 473 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { 474 if (DumpSharedSpaces) { 475 if (_cds_i2i_entry_code_buffers == NULL) { 476 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 477 _cds_i2i_entry_code_buffers_size = total_size; 478 } 479 } else if (UseSharedSpaces) { 480 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized"); 481 } else { 482 return NULL; 483 } 484 485 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change"); 486 return _cds_i2i_entry_code_buffers; 487 } 488 489 // Global object for holding classes that have been loaded. Since this 490 // is run at a safepoint just before exit, this is the entire set of classes. 491 static GrowableArray<Klass*>* _global_klass_objects; 492 493 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { 494 return _global_klass_objects; 495 } 496 497 static void collect_array_classes(Klass* k) { 498 _global_klass_objects->append_if_missing(k); 499 if (k->is_array_klass()) { 500 // Add in the array classes too 501 ArrayKlass* ak = ArrayKlass::cast(k); 502 Klass* h = ak->higher_dimension(); 503 if (h != NULL) { 504 h->array_klasses_do(collect_array_classes); 505 } 506 } 507 } 508 509 class CollectClassesClosure : public KlassClosure { 510 void do_klass(Klass* k) { 511 if (k->is_instance_klass() && 512 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { 513 // Don't add to the _global_klass_objects 514 } else { 515 _global_klass_objects->append_if_missing(k); 516 } 517 if (k->is_array_klass()) { 518 // Add in the array classes too 519 ArrayKlass* ak = ArrayKlass::cast(k); 520 Klass* h = ak->higher_dimension(); 521 if (h != NULL) { 522 h->array_klasses_do(collect_array_classes); 523 } 524 } 525 } 526 }; 527 528 static void remove_unshareable_in_classes() { 529 for (int i = 0; i < _global_klass_objects->length(); i++) { 530 Klass* k = _global_klass_objects->at(i); 531 if (!k->is_objArray_klass()) { 532 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 533 // on their array classes. 534 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 535 k->remove_unshareable_info(); 536 } 537 } 538 } 539 540 static void remove_java_mirror_in_classes() { 541 for (int i = 0; i < _global_klass_objects->length(); i++) { 542 Klass* k = _global_klass_objects->at(i); 543 if (!k->is_objArray_klass()) { 544 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 545 // on their array classes. 546 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 547 k->remove_java_mirror(); 548 } 549 } 550 } 551 552 static void clear_basic_type_mirrors() { 553 assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity"); 554 Universe::set_int_mirror(NULL); 555 Universe::set_float_mirror(NULL); 556 Universe::set_double_mirror(NULL); 557 Universe::set_byte_mirror(NULL); 558 Universe::set_bool_mirror(NULL); 559 Universe::set_char_mirror(NULL); 560 Universe::set_long_mirror(NULL); 561 Universe::set_short_mirror(NULL); 562 Universe::set_void_mirror(NULL); 563 } 564 565 static void rewrite_nofast_bytecode(Method* method) { 566 BytecodeStream bcs(method); 567 while (!bcs.is_last_bytecode()) { 568 Bytecodes::Code opcode = bcs.next(); 569 switch (opcode) { 570 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 571 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 572 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 573 case Bytecodes::_iload: { 574 if (!bcs.is_wide()) { 575 *bcs.bcp() = Bytecodes::_nofast_iload; 576 } 577 break; 578 } 579 default: break; 580 } 581 } 582 } 583 584 // Walk all methods in the class list to ensure that they won't be modified at 585 // run time. This includes: 586 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 587 // at run time by RewriteBytecodes/RewriteFrequentPairs 588 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 589 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 590 for (int i = 0; i < _global_klass_objects->length(); i++) { 591 Klass* k = _global_klass_objects->at(i); 592 if (k->is_instance_klass()) { 593 InstanceKlass* ik = InstanceKlass::cast(k); 594 for (int i = 0; i < ik->methods()->length(); i++) { 595 Method* m = ik->methods()->at(i); 596 rewrite_nofast_bytecode(m); 597 Fingerprinter fp(m); 598 // The side effect of this call sets method's fingerprint field. 599 fp.fingerprint(); 600 } 601 } 602 } 603 } 604 605 static void relocate_cached_class_file() { 606 for (int i = 0; i < _global_klass_objects->length(); i++) { 607 Klass* k = _global_klass_objects->at(i); 608 if (k->is_instance_klass()) { 609 InstanceKlass* ik = InstanceKlass::cast(k); 610 JvmtiCachedClassFileData* p = ik->get_archived_class_data(); 611 if (p != NULL) { 612 int size = offset_of(JvmtiCachedClassFileData, data) + p->length; 613 JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size); 614 q->length = p->length; 615 memcpy(q->data, p->data, p->length); 616 ik->set_archived_class_data(q); 617 } 618 } 619 } 620 } 621 622 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 623 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 624 // 625 // Addresses of the vtables and the methods may be different across JVM runs, 626 // if libjvm.so is dynamically loaded at a different base address. 627 // 628 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 629 // 630 // + at dump time: we redirect the _vptr to point to our own vtables inside 631 // the CDS image 632 // + at run time: we clone the actual contents of the vtables from libjvm.so 633 // into our own tables. 634 635 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 636 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 637 f(ConstantPool) \ 638 f(InstanceKlass) \ 639 f(InstanceClassLoaderKlass) \ 640 f(InstanceMirrorKlass) \ 641 f(InstanceRefKlass) \ 642 f(Method) \ 643 f(ObjArrayKlass) \ 644 f(TypeArrayKlass) 645 646 class CppVtableInfo { 647 intptr_t _vtable_size; 648 intptr_t _cloned_vtable[1]; 649 public: 650 static int num_slots(int vtable_size) { 651 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 652 } 653 int vtable_size() { return int(uintx(_vtable_size)); } 654 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 655 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 656 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 657 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 658 static size_t byte_size(int vtable_size) { 659 CppVtableInfo i; 660 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 661 } 662 }; 663 664 template <class T> class CppVtableCloner : public T { 665 static intptr_t* vtable_of(Metadata& m) { 666 return *((intptr_t**)&m); 667 } 668 static CppVtableInfo* _info; 669 670 static int get_vtable_length(const char* name); 671 672 public: 673 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 674 static intptr_t* allocate(const char* name); 675 676 // Clone the vtable to ... 677 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 678 679 static void zero_vtable_clone() { 680 assert(DumpSharedSpaces, "dump-time only"); 681 _info->zero(); 682 } 683 684 // Switch the vtable pointer to point to the cloned vtable. 685 static void patch(Metadata* obj) { 686 assert(DumpSharedSpaces, "dump-time only"); 687 *(void**)obj = (void*)(_info->cloned_vtable()); 688 } 689 690 static bool is_valid_shared_object(const T* obj) { 691 intptr_t* vptr = *(intptr_t**)obj; 692 return vptr == _info->cloned_vtable(); 693 } 694 }; 695 696 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 697 698 template <class T> 699 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 700 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 701 int n = get_vtable_length(name); 702 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 703 _info->set_vtable_size(n); 704 705 intptr_t* p = clone_vtable(name, _info); 706 assert((char*)p == _md_region.top(), "must be"); 707 708 return p; 709 } 710 711 template <class T> 712 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 713 if (!DumpSharedSpaces) { 714 assert(_info == 0, "_info is initialized only at dump time"); 715 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 716 } 717 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 718 int n = info->vtable_size(); 719 intptr_t* srcvtable = vtable_of(tmp); 720 intptr_t* dstvtable = info->cloned_vtable(); 721 722 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 723 // safe to do memcpy. 724 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 725 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 726 return dstvtable + n; 727 } 728 729 // To determine the size of the vtable for each type, we use the following 730 // trick by declaring 2 subclasses: 731 // 732 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 733 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 734 // 735 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 736 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 737 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 738 // - Their last entry is different. 739 // 740 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 741 // and find the first entry that's different. 742 // 743 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 744 // esoteric compilers. 745 746 template <class T> class CppVtableTesterB: public T { 747 public: 748 virtual int last_virtual_method() {return 1;} 749 }; 750 751 template <class T> class CppVtableTesterA : public T { 752 public: 753 virtual void* last_virtual_method() { 754 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 755 // compiler/linker won't alias the two functions. 756 return NULL; 757 } 758 }; 759 760 template <class T> 761 int CppVtableCloner<T>::get_vtable_length(const char* name) { 762 CppVtableTesterA<T> a; 763 CppVtableTesterB<T> b; 764 765 intptr_t* avtable = vtable_of(a); 766 intptr_t* bvtable = vtable_of(b); 767 768 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 769 int vtable_len = 1; 770 for (; ; vtable_len++) { 771 if (avtable[vtable_len] != bvtable[vtable_len]) { 772 break; 773 } 774 } 775 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 776 777 return vtable_len; 778 } 779 780 #define ALLOC_CPP_VTABLE_CLONE(c) \ 781 CppVtableCloner<c>::allocate(#c); 782 783 #define CLONE_CPP_VTABLE(c) \ 784 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 785 786 #define ZERO_CPP_VTABLE(c) \ 787 CppVtableCloner<c>::zero_vtable_clone(); 788 789 // This can be called at both dump time and run time. 790 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 791 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 792 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 793 return p; 794 } 795 796 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 797 assert(DumpSharedSpaces, "dump-time only"); 798 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 799 } 800 801 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 802 void MetaspaceShared::allocate_cpp_vtable_clones() { 803 assert(DumpSharedSpaces, "dump-time only"); 804 // Layout (each slot is a intptr_t): 805 // [number of slots in the first vtable = n1] 806 // [ <n1> slots for the first vtable] 807 // [number of slots in the first second = n2] 808 // [ <n2> slots for the second vtable] 809 // ... 810 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 811 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 812 } 813 814 // Switch the vtable pointer to point to the cloned vtable. We assume the 815 // vtable pointer is in first slot in object. 816 void MetaspaceShared::patch_cpp_vtable_pointers() { 817 int n = _global_klass_objects->length(); 818 for (int i = 0; i < n; i++) { 819 Klass* obj = _global_klass_objects->at(i); 820 if (obj->is_instance_klass()) { 821 InstanceKlass* ik = InstanceKlass::cast(obj); 822 if (ik->is_class_loader_instance_klass()) { 823 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 824 } else if (ik->is_reference_instance_klass()) { 825 CppVtableCloner<InstanceRefKlass>::patch(ik); 826 } else if (ik->is_mirror_instance_klass()) { 827 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 828 } else { 829 CppVtableCloner<InstanceKlass>::patch(ik); 830 } 831 ConstantPool* cp = ik->constants(); 832 CppVtableCloner<ConstantPool>::patch(cp); 833 for (int j = 0; j < ik->methods()->length(); j++) { 834 Method* m = ik->methods()->at(j); 835 CppVtableCloner<Method>::patch(m); 836 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 837 } 838 } else if (obj->is_objArray_klass()) { 839 CppVtableCloner<ObjArrayKlass>::patch(obj); 840 } else { 841 assert(obj->is_typeArray_klass(), "sanity"); 842 CppVtableCloner<TypeArrayKlass>::patch(obj); 843 } 844 } 845 } 846 847 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 848 assert(is_in_shared_metaspace(m), "must be"); 849 return CppVtableCloner<Method>::is_valid_shared_object(m); 850 } 851 852 // Closure for serializing initialization data out to a data area to be 853 // written to the shared file. 854 855 class WriteClosure : public SerializeClosure { 856 private: 857 DumpRegion* _dump_region; 858 859 public: 860 WriteClosure(DumpRegion* r) { 861 _dump_region = r; 862 } 863 864 void do_ptr(void** p) { 865 _dump_region->append_intptr_t((intptr_t)*p); 866 } 867 868 void do_u4(u4* p) { 869 void* ptr = (void*)(uintx(*p)); 870 do_ptr(&ptr); 871 } 872 873 void do_tag(int tag) { 874 _dump_region->append_intptr_t((intptr_t)tag); 875 } 876 877 void do_oop(oop* o) { 878 if (*o == NULL) { 879 _dump_region->append_intptr_t(0); 880 } else { 881 assert(HeapShared::is_heap_object_archiving_allowed(), 882 "Archiving heap object is not allowed"); 883 _dump_region->append_intptr_t( 884 (intptr_t)CompressedOops::encode_not_null(*o)); 885 } 886 } 887 888 void do_region(u_char* start, size_t size) { 889 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 890 assert(size % sizeof(intptr_t) == 0, "bad size"); 891 do_tag((int)size); 892 while (size > 0) { 893 _dump_region->append_intptr_t(*(intptr_t*)start); 894 start += sizeof(intptr_t); 895 size -= sizeof(intptr_t); 896 } 897 } 898 899 bool reading() const { return false; } 900 }; 901 902 // This is for dumping detailed statistics for the allocations 903 // in the shared spaces. 904 class DumpAllocStats : public ResourceObj { 905 public: 906 907 // Here's poor man's enum inheritance 908 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 909 METASPACE_OBJ_TYPES_DO(f) \ 910 f(SymbolHashentry) \ 911 f(SymbolBucket) \ 912 f(StringHashentry) \ 913 f(StringBucket) \ 914 f(Other) 915 916 enum Type { 917 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 918 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 919 _number_of_types 920 }; 921 922 static const char * type_name(Type type) { 923 switch(type) { 924 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 925 default: 926 ShouldNotReachHere(); 927 return NULL; 928 } 929 } 930 931 public: 932 enum { RO = 0, RW = 1 }; 933 934 int _counts[2][_number_of_types]; 935 int _bytes [2][_number_of_types]; 936 937 DumpAllocStats() { 938 memset(_counts, 0, sizeof(_counts)); 939 memset(_bytes, 0, sizeof(_bytes)); 940 }; 941 942 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 943 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 944 int which = (read_only) ? RO : RW; 945 _counts[which][type] ++; 946 _bytes [which][type] += byte_size; 947 } 948 949 void record_other_type(int byte_size, bool read_only) { 950 int which = (read_only) ? RO : RW; 951 _bytes [which][OtherType] += byte_size; 952 } 953 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 954 }; 955 956 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 957 // Calculate size of data that was not allocated by Metaspace::allocate() 958 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 959 960 // symbols 961 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 962 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 963 964 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 965 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 966 967 // strings 968 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 969 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 970 971 _counts[RO][StringBucketType] = stats->string.bucket_count; 972 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 973 974 // TODO: count things like dictionary, vtable, etc 975 _bytes[RW][OtherType] += mc_all + md_all; 976 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 977 978 // prevent divide-by-zero 979 if (ro_all < 1) { 980 ro_all = 1; 981 } 982 if (rw_all < 1) { 983 rw_all = 1; 984 } 985 986 int all_ro_count = 0; 987 int all_ro_bytes = 0; 988 int all_rw_count = 0; 989 int all_rw_bytes = 0; 990 991 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 992 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 993 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 994 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 995 996 LogMessage(cds) msg; 997 998 msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); 999 msg.info("%s", hdr); 1000 msg.info("%s", sep); 1001 for (int type = 0; type < int(_number_of_types); type ++) { 1002 const char *name = type_name((Type)type); 1003 int ro_count = _counts[RO][type]; 1004 int ro_bytes = _bytes [RO][type]; 1005 int rw_count = _counts[RW][type]; 1006 int rw_bytes = _bytes [RW][type]; 1007 int count = ro_count + rw_count; 1008 int bytes = ro_bytes + rw_bytes; 1009 1010 double ro_perc = percent_of(ro_bytes, ro_all); 1011 double rw_perc = percent_of(rw_bytes, rw_all); 1012 double perc = percent_of(bytes, ro_all + rw_all); 1013 1014 msg.info(fmt_stats, name, 1015 ro_count, ro_bytes, ro_perc, 1016 rw_count, rw_bytes, rw_perc, 1017 count, bytes, perc); 1018 1019 all_ro_count += ro_count; 1020 all_ro_bytes += ro_bytes; 1021 all_rw_count += rw_count; 1022 all_rw_bytes += rw_bytes; 1023 } 1024 1025 int all_count = all_ro_count + all_rw_count; 1026 int all_bytes = all_ro_bytes + all_rw_bytes; 1027 1028 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1029 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1030 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1031 1032 msg.info("%s", sep); 1033 msg.info(fmt_stats, "Total", 1034 all_ro_count, all_ro_bytes, all_ro_perc, 1035 all_rw_count, all_rw_bytes, all_rw_perc, 1036 all_count, all_bytes, all_perc); 1037 1038 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1039 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1040 1041 #undef fmt_stats 1042 } 1043 1044 // Populate the shared space. 1045 1046 class VM_PopulateDumpSharedSpace: public VM_Operation { 1047 private: 1048 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1049 GrowableArray<MemRegion> *_open_archive_heap_regions; 1050 1051 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; 1052 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; 1053 1054 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1055 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; 1056 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1057 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); 1058 void dump_symbols(); 1059 char* dump_read_only_tables(); 1060 void print_region_stats(); 1061 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1062 const char *name, const size_t total_size); 1063 public: 1064 1065 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1066 void doit(); // outline because gdb sucks 1067 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec); 1068 bool allow_nested_vm_operations() const { return true; } 1069 }; // class VM_PopulateDumpSharedSpace 1070 1071 class SortedSymbolClosure: public SymbolClosure { 1072 GrowableArray<Symbol*> _symbols; 1073 virtual void do_symbol(Symbol** sym) { 1074 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1075 _symbols.append(*sym); 1076 } 1077 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1078 if (a[0] < b[0]) { 1079 return -1; 1080 } else if (a[0] == b[0]) { 1081 return 0; 1082 } else { 1083 return 1; 1084 } 1085 } 1086 1087 public: 1088 SortedSymbolClosure() { 1089 SymbolTable::symbols_do(this); 1090 _symbols.sort(compare_symbols_by_address); 1091 } 1092 GrowableArray<Symbol*>* get_sorted_symbols() { 1093 return &_symbols; 1094 } 1095 }; 1096 1097 // ArchiveCompactor -- 1098 // 1099 // This class is the central piece of shared archive compaction -- all metaspace data are 1100 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1101 // metaspace data into their final location in the shared regions. 1102 1103 class ArchiveCompactor : AllStatic { 1104 static const int INITIAL_TABLE_SIZE = 8087; 1105 static const int MAX_TABLE_SIZE = 1000000; 1106 1107 static DumpAllocStats* _alloc_stats; 1108 static SortedSymbolClosure* _ssc; 1109 1110 typedef KVHashtable<address, address, mtInternal> RelocationTable; 1111 static RelocationTable* _new_loc_table; 1112 1113 public: 1114 static void initialize() { 1115 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1116 _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); 1117 } 1118 static DumpAllocStats* alloc_stats() { 1119 return _alloc_stats; 1120 } 1121 1122 // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() 1123 // outside of ArchiveCompactor::allocate(). These are usually for misc tables 1124 // that are allocated in the RO space. 1125 class OtherROAllocMark { 1126 char* _oldtop; 1127 public: 1128 OtherROAllocMark() { 1129 _oldtop = _ro_region.top(); 1130 } 1131 ~OtherROAllocMark() { 1132 char* newtop = _ro_region.top(); 1133 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); 1134 } 1135 }; 1136 1137 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1138 address obj = ref->obj(); 1139 int bytes = ref->size() * BytesPerWord; 1140 char* p; 1141 size_t alignment = BytesPerWord; 1142 char* oldtop; 1143 char* newtop; 1144 1145 if (read_only) { 1146 oldtop = _ro_region.top(); 1147 p = _ro_region.allocate(bytes, alignment); 1148 newtop = _ro_region.top(); 1149 } else { 1150 oldtop = _rw_region.top(); 1151 if (ref->msotype() == MetaspaceObj::ClassType) { 1152 // Save a pointer immediate in front of an InstanceKlass, so 1153 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 1154 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 1155 // in systemDictionaryShared.cpp. 1156 Klass* klass = (Klass*)obj; 1157 if (klass->is_instance_klass()) { 1158 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 1159 _rw_region.allocate(sizeof(address), BytesPerWord); 1160 } 1161 } 1162 p = _rw_region.allocate(bytes, alignment); 1163 newtop = _rw_region.top(); 1164 } 1165 memcpy(p, obj, bytes); 1166 assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once"); 1167 _new_loc_table->add(obj, (address)p); 1168 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1169 if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { 1170 log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size()); 1171 } 1172 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1173 } 1174 1175 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1176 address* pp = _new_loc_table->lookup(ref->obj()); 1177 assert(pp != NULL, "must be"); 1178 return *pp; 1179 } 1180 1181 private: 1182 // Makes a shallow copy of visited MetaspaceObj's 1183 class ShallowCopier: public UniqueMetaspaceClosure { 1184 bool _read_only; 1185 public: 1186 ShallowCopier(bool read_only) : _read_only(read_only) {} 1187 1188 virtual void do_unique_ref(Ref* ref, bool read_only) { 1189 if (read_only == _read_only) { 1190 allocate(ref, read_only); 1191 } 1192 } 1193 }; 1194 1195 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1196 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1197 public: 1198 virtual void do_unique_ref(Ref* ref, bool read_only) { 1199 address new_loc = get_new_loc(ref); 1200 RefRelocator refer; 1201 ref->metaspace_pointers_do_at(&refer, new_loc); 1202 } 1203 }; 1204 1205 // Relocate a reference to point to its shallow copy 1206 class RefRelocator: public MetaspaceClosure { 1207 public: 1208 virtual bool do_ref(Ref* ref, bool read_only) { 1209 if (ref->not_null()) { 1210 ref->update(get_new_loc(ref)); 1211 } 1212 return false; // Do not recurse. 1213 } 1214 }; 1215 1216 #ifdef ASSERT 1217 class IsRefInArchiveChecker: public MetaspaceClosure { 1218 public: 1219 virtual bool do_ref(Ref* ref, bool read_only) { 1220 if (ref->not_null()) { 1221 char* obj = (char*)ref->obj(); 1222 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1223 "must be relocated to point to CDS archive"); 1224 } 1225 return false; // Do not recurse. 1226 } 1227 }; 1228 #endif 1229 1230 public: 1231 static void copy_and_compact() { 1232 ResourceMark rm; 1233 SortedSymbolClosure the_ssc; // StackObj 1234 _ssc = &the_ssc; 1235 1236 tty->print_cr("Scanning all metaspace objects ... "); 1237 { 1238 // allocate and shallow-copy RW objects, immediately following the MC region 1239 tty->print_cr("Allocating RW objects ... "); 1240 _mc_region.pack(&_rw_region); 1241 1242 ResourceMark rm; 1243 ShallowCopier rw_copier(false); 1244 iterate_roots(&rw_copier); 1245 } 1246 { 1247 // allocate and shallow-copy of RO object, immediately following the RW region 1248 tty->print_cr("Allocating RO objects ... "); 1249 _rw_region.pack(&_ro_region); 1250 1251 ResourceMark rm; 1252 ShallowCopier ro_copier(true); 1253 iterate_roots(&ro_copier); 1254 } 1255 { 1256 tty->print_cr("Relocating embedded pointers ... "); 1257 ResourceMark rm; 1258 ShallowCopyEmbeddedRefRelocator emb_reloc; 1259 iterate_roots(&emb_reloc); 1260 } 1261 { 1262 tty->print_cr("Relocating external roots ... "); 1263 ResourceMark rm; 1264 RefRelocator ext_reloc; 1265 iterate_roots(&ext_reloc); 1266 } 1267 1268 #ifdef ASSERT 1269 { 1270 tty->print_cr("Verifying external roots ... "); 1271 ResourceMark rm; 1272 IsRefInArchiveChecker checker; 1273 iterate_roots(&checker); 1274 } 1275 #endif 1276 1277 1278 // cleanup 1279 _ssc = NULL; 1280 } 1281 1282 // We must relocate the System::_well_known_klasses only after we have copied the 1283 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1284 // old objects which assert that their klass is the original klass. 1285 static void relocate_well_known_klasses() { 1286 { 1287 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1288 ResourceMark rm; 1289 RefRelocator ext_reloc; 1290 SystemDictionary::well_known_klasses_do(&ext_reloc); 1291 } 1292 // NOTE: after this point, we shouldn't have any globals that can reach the old 1293 // objects. 1294 1295 // We cannot use any of the objects in the heap anymore (except for the 1296 // shared strings) because their headers no longer point to valid Klasses. 1297 } 1298 1299 static void iterate_roots(MetaspaceClosure* it) { 1300 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1301 for (int i=0; i<symbols->length(); i++) { 1302 it->push(symbols->adr_at(i)); 1303 } 1304 if (_global_klass_objects != NULL) { 1305 // Need to fix up the pointers 1306 for (int i = 0; i < _global_klass_objects->length(); i++) { 1307 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1308 it->push(_global_klass_objects->adr_at(i)); 1309 } 1310 } 1311 FileMapInfo::metaspace_pointers_do(it); 1312 SystemDictionaryShared::dumptime_classes_do(it); 1313 Universe::metaspace_pointers_do(it); 1314 SymbolTable::metaspace_pointers_do(it); 1315 vmSymbols::metaspace_pointers_do(it); 1316 } 1317 1318 static Klass* get_relocated_klass(Klass* orig_klass) { 1319 assert(DumpSharedSpaces, "dump time only"); 1320 address* pp = _new_loc_table->lookup((address)orig_klass); 1321 assert(pp != NULL, "must be"); 1322 Klass* klass = (Klass*)(*pp); 1323 assert(klass->is_klass(), "must be"); 1324 return klass; 1325 } 1326 }; 1327 1328 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1329 SortedSymbolClosure* ArchiveCompactor::_ssc; 1330 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1331 1332 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, 1333 DumpRegion* dump_region, bool read_only, bool allow_exec) { 1334 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1335 } 1336 1337 void VM_PopulateDumpSharedSpace::dump_symbols() { 1338 tty->print_cr("Dumping symbol table ..."); 1339 1340 NOT_PRODUCT(SymbolTable::verify()); 1341 SymbolTable::write_to_archive(); 1342 } 1343 1344 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1345 ArchiveCompactor::OtherROAllocMark mark; 1346 1347 tty->print("Removing java_mirror ... "); 1348 if (!HeapShared::is_heap_object_archiving_allowed()) { 1349 clear_basic_type_mirrors(); 1350 } 1351 remove_java_mirror_in_classes(); 1352 tty->print_cr("done. "); 1353 1354 SystemDictionaryShared::write_to_archive(); 1355 1356 char* start = _ro_region.top(); 1357 1358 // Write the other data to the output array. 1359 WriteClosure wc(&_ro_region); 1360 MetaspaceShared::serialize(&wc); 1361 1362 // Write the bitmaps for patching the archive heap regions 1363 dump_archive_heap_oopmaps(); 1364 1365 return start; 1366 } 1367 1368 void VM_PopulateDumpSharedSpace::doit() { 1369 // We should no longer allocate anything from the metaspace, so that: 1370 // 1371 // (1) Metaspace::allocate might trigger GC if we have run out of 1372 // committed metaspace, but we can't GC because we're running 1373 // in the VM thread. 1374 // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. 1375 Metaspace::freeze(); 1376 1377 Thread* THREAD = VMThread::vm_thread(); 1378 1379 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1380 1381 NOT_PRODUCT(SystemDictionary::verify();) 1382 // The following guarantee is meant to ensure that no loader constraints 1383 // exist yet, since the constraints table is not shared. This becomes 1384 // more important now that we don't re-initialize vtables/itables for 1385 // shared classes at runtime, where constraints were previously created. 1386 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1387 "loader constraints are not saved"); 1388 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1389 "placeholders are not saved"); 1390 1391 // At this point, many classes have been loaded. 1392 // Gather systemDictionary classes in a global array and do everything to 1393 // that so we don't have to walk the SystemDictionary again. 1394 SystemDictionaryShared::check_excluded_classes(); 1395 _global_klass_objects = new GrowableArray<Klass*>(1000); 1396 CollectClassesClosure collect_classes; 1397 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1398 1399 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1400 { 1401 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1402 for (int i = 0; i < _global_klass_objects->length(); i++) { 1403 Klass* k = _global_klass_objects->at(i); 1404 if (k->is_instance_klass()) { 1405 num_inst ++; 1406 } else if (k->is_objArray_klass()) { 1407 num_obj_array ++; 1408 } else { 1409 assert(k->is_typeArray_klass(), "sanity"); 1410 num_type_array ++; 1411 } 1412 } 1413 tty->print_cr(" instance classes = %5d", num_inst); 1414 tty->print_cr(" obj array classes = %5d", num_obj_array); 1415 tty->print_cr(" type array classes = %5d", num_type_array); 1416 } 1417 1418 // Ensure the ConstMethods won't be modified at run-time 1419 tty->print("Updating ConstMethods ... "); 1420 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1421 tty->print_cr("done. "); 1422 1423 // Remove all references outside the metadata 1424 tty->print("Removing unshareable information ... "); 1425 remove_unshareable_in_classes(); 1426 tty->print_cr("done. "); 1427 1428 ArchiveCompactor::initialize(); 1429 ArchiveCompactor::copy_and_compact(); 1430 1431 dump_symbols(); 1432 1433 // Dump supported java heap objects 1434 _closed_archive_heap_regions = NULL; 1435 _open_archive_heap_regions = NULL; 1436 dump_java_heap_objects(); 1437 1438 ArchiveCompactor::relocate_well_known_klasses(); 1439 1440 char* read_only_tables_start = dump_read_only_tables(); 1441 _ro_region.pack(&_md_region); 1442 1443 char* vtbl_list = _md_region.top(); 1444 MetaspaceShared::allocate_cpp_vtable_clones(); 1445 _md_region.pack(&_od_region); 1446 1447 // Relocate the archived class file data into the od region 1448 relocate_cached_class_file(); 1449 _od_region.pack(); 1450 1451 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size 1452 // is just the spaces between the two ends. 1453 size_t core_spaces_size = _od_region.end() - _mc_region.base(); 1454 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), 1455 "should already be aligned"); 1456 1457 // During patching, some virtual methods may be called, so at this point 1458 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1459 MetaspaceShared::patch_cpp_vtable_pointers(); 1460 1461 // The vtable clones contain addresses of the current process. 1462 // We don't want to write these addresses into the archive. 1463 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1464 1465 // Create and write the archive file that maps the shared spaces. 1466 1467 FileMapInfo* mapinfo = new FileMapInfo(); 1468 mapinfo->populate_header(os::vm_allocation_granularity()); 1469 mapinfo->set_read_only_tables_start(read_only_tables_start); 1470 mapinfo->set_misc_data_patching_start(vtbl_list); 1471 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); 1472 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); 1473 mapinfo->set_core_spaces_size(core_spaces_size); 1474 1475 for (int pass=1; pass<=2; pass++) { 1476 bool print_archive_log = (pass==1); 1477 if (pass == 1) { 1478 // The first pass doesn't actually write the data to disk. All it 1479 // does is to update the fields in the mapinfo->_header. 1480 } else { 1481 // After the first pass, the contents of mapinfo->_header are finalized, 1482 // so we can compute the header's CRC, and write the contents of the header 1483 // and the regions into disk. 1484 mapinfo->open_for_write(); 1485 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1486 } 1487 mapinfo->write_header(); 1488 1489 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1490 // so it needs to be read/write. 1491 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1492 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1493 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1494 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1495 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); 1496 1497 _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( 1498 _closed_archive_heap_regions, 1499 _closed_archive_heap_oopmaps, 1500 MetaspaceShared::first_closed_archive_heap_region, 1501 MetaspaceShared::max_closed_archive_heap_region, 1502 print_archive_log); 1503 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1504 _open_archive_heap_regions, 1505 _open_archive_heap_oopmaps, 1506 MetaspaceShared::first_open_archive_heap_region, 1507 MetaspaceShared::max_open_archive_heap_region, 1508 print_archive_log); 1509 } 1510 1511 mapinfo->close(); 1512 1513 // Restore the vtable in case we invoke any virtual methods. 1514 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1515 1516 print_region_stats(); 1517 1518 if (log_is_enabled(Info, cds)) { 1519 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1520 int(_mc_region.used()), int(_md_region.used())); 1521 } 1522 1523 if (PrintSystemDictionaryAtExit) { 1524 SystemDictionary::print(); 1525 } 1526 1527 if (AllowArchivingWithJavaAgent) { 1528 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1529 "for testing purposes only and should not be used in a production environment"); 1530 } 1531 1532 // There may be other pending VM operations that operate on the InstanceKlasses, 1533 // which will fail because InstanceKlasses::remove_unshareable_info() 1534 // has been called. Forget these operations and exit the VM directly. 1535 vm_direct_exit(0); 1536 } 1537 1538 void VM_PopulateDumpSharedSpace::print_region_stats() { 1539 // Print statistics of all the regions 1540 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1541 _mc_region.reserved() + _md_region.reserved() + 1542 _od_region.reserved() + 1543 _total_closed_archive_region_size + 1544 _total_open_archive_region_size; 1545 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1546 _mc_region.used() + _md_region.used() + 1547 _od_region.used() + 1548 _total_closed_archive_region_size + 1549 _total_open_archive_region_size; 1550 const double total_u_perc = percent_of(total_bytes, total_reserved); 1551 1552 _mc_region.print(total_reserved); 1553 _rw_region.print(total_reserved); 1554 _ro_region.print(total_reserved); 1555 _md_region.print(total_reserved); 1556 _od_region.print(total_reserved); 1557 print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); 1558 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1559 1560 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1561 total_bytes, total_reserved, total_u_perc); 1562 } 1563 1564 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1565 const char *name, const size_t total_size) { 1566 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1567 for (int i = 0; i < arr_len; i++) { 1568 char* start = (char*)heap_mem->at(i).start(); 1569 size_t size = heap_mem->at(i).byte_size(); 1570 char* top = start + size; 1571 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1572 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1573 1574 } 1575 } 1576 1577 // Update a Java object to point its Klass* to the new location after 1578 // shared archive has been compacted. 1579 void MetaspaceShared::relocate_klass_ptr(oop o) { 1580 assert(DumpSharedSpaces, "sanity"); 1581 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1582 o->set_klass(k); 1583 } 1584 1585 Klass* MetaspaceShared::get_relocated_klass(Klass *k) { 1586 assert(DumpSharedSpaces, "sanity"); 1587 return ArchiveCompactor::get_relocated_klass(k); 1588 } 1589 1590 class LinkSharedClassesClosure : public KlassClosure { 1591 Thread* THREAD; 1592 bool _made_progress; 1593 public: 1594 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1595 1596 void reset() { _made_progress = false; } 1597 bool made_progress() const { return _made_progress; } 1598 1599 void do_klass(Klass* k) { 1600 if (k->is_instance_klass()) { 1601 InstanceKlass* ik = InstanceKlass::cast(k); 1602 // Link the class to cause the bytecodes to be rewritten and the 1603 // cpcache to be created. Class verification is done according 1604 // to -Xverify setting. 1605 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1606 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1607 1608 ik->constants()->resolve_class_constants(THREAD); 1609 } 1610 } 1611 }; 1612 1613 class CheckSharedClassesClosure : public KlassClosure { 1614 bool _made_progress; 1615 public: 1616 CheckSharedClassesClosure() : _made_progress(false) {} 1617 1618 void reset() { _made_progress = false; } 1619 bool made_progress() const { return _made_progress; } 1620 void do_klass(Klass* k) { 1621 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1622 _made_progress = true; 1623 } 1624 } 1625 }; 1626 1627 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1628 // We need to iterate because verification may cause additional classes 1629 // to be loaded. 1630 LinkSharedClassesClosure link_closure(THREAD); 1631 do { 1632 link_closure.reset(); 1633 ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); 1634 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1635 } while (link_closure.made_progress()); 1636 1637 if (_has_error_classes) { 1638 // Mark all classes whose super class or interfaces failed verification. 1639 CheckSharedClassesClosure check_closure; 1640 do { 1641 // Not completely sure if we need to do this iteratively. Anyway, 1642 // we should come here only if there are unverifiable classes, which 1643 // shouldn't happen in normal cases. So better safe than sorry. 1644 check_closure.reset(); 1645 ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); 1646 } while (check_closure.made_progress()); 1647 } 1648 } 1649 1650 void MetaspaceShared::prepare_for_dumping() { 1651 Arguments::check_unsupported_dumping_properties(); 1652 ClassLoader::initialize_shared_path(); 1653 } 1654 1655 // Preload classes from a list, populate the shared spaces and dump to a 1656 // file. 1657 void MetaspaceShared::preload_and_dump(TRAPS) { 1658 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1659 ResourceMark rm; 1660 char class_list_path_str[JVM_MAXPATHLEN]; 1661 // Preload classes to be shared. 1662 // Should use some os:: method rather than fopen() here. aB. 1663 const char* class_list_path; 1664 if (SharedClassListFile == NULL) { 1665 // Construct the path to the class list (in jre/lib) 1666 // Walk up two directories from the location of the VM and 1667 // optionally tack on "lib" (depending on platform) 1668 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1669 for (int i = 0; i < 3; i++) { 1670 char *end = strrchr(class_list_path_str, *os::file_separator()); 1671 if (end != NULL) *end = '\0'; 1672 } 1673 int class_list_path_len = (int)strlen(class_list_path_str); 1674 if (class_list_path_len >= 3) { 1675 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1676 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1677 jio_snprintf(class_list_path_str + class_list_path_len, 1678 sizeof(class_list_path_str) - class_list_path_len, 1679 "%slib", os::file_separator()); 1680 class_list_path_len += 4; 1681 } 1682 } 1683 } 1684 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1685 jio_snprintf(class_list_path_str + class_list_path_len, 1686 sizeof(class_list_path_str) - class_list_path_len, 1687 "%sclasslist", os::file_separator()); 1688 } 1689 class_list_path = class_list_path_str; 1690 } else { 1691 class_list_path = SharedClassListFile; 1692 } 1693 1694 tty->print_cr("Loading classes to share ..."); 1695 _has_error_classes = false; 1696 int class_count = preload_classes(class_list_path, THREAD); 1697 if (ExtraSharedClassListFile) { 1698 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1699 } 1700 tty->print_cr("Loading classes to share: done."); 1701 1702 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1703 1704 if (SharedArchiveConfigFile) { 1705 tty->print_cr("Reading extra data from %s ...", SharedArchiveConfigFile); 1706 read_extra_data(SharedArchiveConfigFile, THREAD); 1707 } 1708 tty->print_cr("Reading extra data: done."); 1709 1710 HeapShared::init_subgraph_entry_fields(THREAD); 1711 1712 // Rewrite and link classes 1713 tty->print_cr("Rewriting and linking classes ..."); 1714 1715 // Link any classes which got missed. This would happen if we have loaded classes that 1716 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1717 // fails verification, all other interfaces that were not specified in the classlist but 1718 // are implemented by K are not verified. 1719 link_and_cleanup_shared_classes(CATCH); 1720 tty->print_cr("Rewriting and linking classes: done"); 1721 1722 if (HeapShared::is_heap_object_archiving_allowed()) { 1723 // Avoid fragmentation while archiving heap objects. 1724 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); 1725 Universe::heap()->collect(GCCause::_archive_time_gc); 1726 Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); 1727 } 1728 1729 VM_PopulateDumpSharedSpace op; 1730 VMThread::execute(&op); 1731 } 1732 } 1733 1734 1735 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1736 ClassListParser parser(class_list_path); 1737 int class_count = 0; 1738 1739 while (parser.parse_one_line()) { 1740 Klass* klass = parser.load_current_class(THREAD); 1741 if (HAS_PENDING_EXCEPTION) { 1742 if (klass == NULL && 1743 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1744 // print a warning only when the pending exception is class not found 1745 tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name()); 1746 } 1747 CLEAR_PENDING_EXCEPTION; 1748 } 1749 if (klass != NULL) { 1750 if (log_is_enabled(Trace, cds)) { 1751 ResourceMark rm; 1752 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1753 } 1754 1755 if (klass->is_instance_klass()) { 1756 InstanceKlass* ik = InstanceKlass::cast(klass); 1757 1758 // Link the class to cause the bytecodes to be rewritten and the 1759 // cpcache to be created. The linking is done as soon as classes 1760 // are loaded in order that the related data structures (klass and 1761 // cpCache) are located together. 1762 try_link_class(ik, THREAD); 1763 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1764 } 1765 1766 class_count++; 1767 } 1768 } 1769 1770 return class_count; 1771 } 1772 1773 // Returns true if the class's status has changed 1774 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1775 assert(DumpSharedSpaces, "should only be called during dumping"); 1776 if (ik->init_state() < InstanceKlass::linked) { 1777 bool saved = BytecodeVerificationLocal; 1778 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1779 // The verification decision is based on BytecodeVerificationRemote 1780 // for non-system classes. Since we are using the NULL classloader 1781 // to load non-system classes for customized class loaders during dumping, 1782 // we need to temporarily change BytecodeVerificationLocal to be the same as 1783 // BytecodeVerificationRemote. Note this can cause the parent system 1784 // classes also being verified. The extra overhead is acceptable during 1785 // dumping. 1786 BytecodeVerificationLocal = BytecodeVerificationRemote; 1787 } 1788 ik->link_class(THREAD); 1789 if (HAS_PENDING_EXCEPTION) { 1790 ResourceMark rm; 1791 tty->print_cr("Preload Warning: Verification failed for %s", 1792 ik->external_name()); 1793 CLEAR_PENDING_EXCEPTION; 1794 ik->set_in_error_state(); 1795 _has_error_classes = true; 1796 } 1797 BytecodeVerificationLocal = saved; 1798 return true; 1799 } else { 1800 return false; 1801 } 1802 } 1803 1804 #if INCLUDE_CDS_JAVA_HEAP 1805 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1806 // The closed and open archive heap space has maximum two regions. 1807 // See FileMapInfo::write_archive_heap_regions() for details. 1808 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1809 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1810 HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, 1811 _open_archive_heap_regions); 1812 ArchiveCompactor::OtherROAllocMark mark; 1813 HeapShared::write_subgraph_info_table(); 1814 } 1815 1816 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { 1817 if (HeapShared::is_heap_object_archiving_allowed()) { 1818 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1819 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); 1820 1821 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); 1822 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); 1823 } 1824 } 1825 1826 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, 1827 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { 1828 for (int i=0; i<regions->length(); i++) { 1829 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); 1830 size_t size_in_bits = oopmap.size(); 1831 size_t size_in_bytes = oopmap.size_in_bytes(); 1832 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); 1833 oopmap.write_to(buffer, size_in_bytes); 1834 log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region " 1835 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)", 1836 p2i(buffer), size_in_bytes, 1837 p2i(regions->at(i).start()), regions->at(i).byte_size()); 1838 1839 ArchiveHeapOopmapInfo info; 1840 info._oopmap = (address)buffer; 1841 info._oopmap_size_in_bits = size_in_bits; 1842 oopmaps->append(info); 1843 } 1844 } 1845 #endif // INCLUDE_CDS_JAVA_HEAP 1846 1847 // Closure for serializing initialization data in from a data area 1848 // (ptr_array) read from the shared file. 1849 1850 class ReadClosure : public SerializeClosure { 1851 private: 1852 intptr_t** _ptr_array; 1853 1854 inline intptr_t nextPtr() { 1855 return *(*_ptr_array)++; 1856 } 1857 1858 public: 1859 ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; } 1860 1861 void do_ptr(void** p) { 1862 assert(*p == NULL, "initializing previous initialized pointer."); 1863 intptr_t obj = nextPtr(); 1864 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1865 "hit tag while initializing ptrs."); 1866 *p = (void*)obj; 1867 } 1868 1869 void do_u4(u4* p) { 1870 intptr_t obj = nextPtr(); 1871 *p = (u4)(uintx(obj)); 1872 } 1873 1874 void do_tag(int tag) { 1875 int old_tag; 1876 old_tag = (int)(intptr_t)nextPtr(); 1877 // do_int(&old_tag); 1878 assert(tag == old_tag, "old tag doesn't match"); 1879 FileMapInfo::assert_mark(tag == old_tag); 1880 } 1881 1882 void do_oop(oop *p) { 1883 narrowOop o = (narrowOop)nextPtr(); 1884 if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { 1885 p = NULL; 1886 } else { 1887 assert(HeapShared::is_heap_object_archiving_allowed(), 1888 "Archived heap object is not allowed"); 1889 assert(HeapShared::open_archive_heap_region_mapped(), 1890 "Open archive heap region is not mapped"); 1891 *p = HeapShared::decode_from_archive(o); 1892 } 1893 } 1894 1895 void do_region(u_char* start, size_t size) { 1896 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 1897 assert(size % sizeof(intptr_t) == 0, "bad size"); 1898 do_tag((int)size); 1899 while (size > 0) { 1900 *(intptr_t*)start = nextPtr(); 1901 start += sizeof(intptr_t); 1902 size -= sizeof(intptr_t); 1903 } 1904 } 1905 1906 bool reading() const { return true; } 1907 }; 1908 1909 // Return true if given address is in the misc data region 1910 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 1911 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 1912 } 1913 1914 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 1915 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 1916 return true; 1917 } 1918 return false; 1919 } 1920 1921 // Map shared spaces at requested addresses and return if succeeded. 1922 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { 1923 size_t image_alignment = mapinfo->alignment(); 1924 1925 #ifndef _WINDOWS 1926 // Map in the shared memory and then map the regions on top of it. 1927 // On Windows, don't map the memory here because it will cause the 1928 // mappings of the regions to fail. 1929 ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); 1930 if (!shared_rs.is_reserved()) return false; 1931 #endif 1932 1933 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 1934 1935 char* ro_base = NULL; char* ro_top; 1936 char* rw_base = NULL; char* rw_top; 1937 char* mc_base = NULL; char* mc_top; 1938 char* md_base = NULL; char* md_top; 1939 char* od_base = NULL; char* od_top; 1940 1941 // Map each shared region 1942 if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL && 1943 (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL && 1944 (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL && 1945 (md_base = mapinfo->map_region(md, &md_top)) != NULL && 1946 (od_base = mapinfo->map_region(od, &od_top)) != NULL && 1947 (image_alignment == (size_t)os::vm_allocation_granularity()) && 1948 mapinfo->validate_shared_path_table()) { 1949 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for 1950 // fast checking in MetaspaceShared::is_in_shared_metaspace() and 1951 // MetaspaceObj::is_shared(). 1952 // 1953 // We require that mc->rw->ro->md->od to be laid out consecutively, with no 1954 // gaps between them. That way, we can ensure that the OS won't be able to 1955 // allocate any new memory spaces inside _shared_metaspace_{base,top}, which 1956 // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace(). 1957 assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be"); 1958 assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be"); 1959 assert(mc_top == rw_base, "must be"); 1960 assert(rw_top == ro_base, "must be"); 1961 assert(ro_top == md_base, "must be"); 1962 assert(md_top == od_base, "must be"); 1963 1964 _core_spaces_size = mapinfo->core_spaces_size(); 1965 MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)od_top); 1966 return true; 1967 } else { 1968 // If there was a failure in mapping any of the spaces, unmap the ones 1969 // that succeeded 1970 if (ro_base != NULL) mapinfo->unmap_region(ro); 1971 if (rw_base != NULL) mapinfo->unmap_region(rw); 1972 if (mc_base != NULL) mapinfo->unmap_region(mc); 1973 if (md_base != NULL) mapinfo->unmap_region(md); 1974 if (od_base != NULL) mapinfo->unmap_region(od); 1975 #ifndef _WINDOWS 1976 // Release the entire mapped region 1977 shared_rs.release(); 1978 #endif 1979 // If -Xshare:on is specified, print out the error message and exit VM, 1980 // otherwise, set UseSharedSpaces to false and continue. 1981 if (RequireSharedSpaces || PrintSharedArchiveAndExit) { 1982 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); 1983 } else { 1984 FLAG_SET_DEFAULT(UseSharedSpaces, false); 1985 } 1986 return false; 1987 } 1988 } 1989 1990 // Read the miscellaneous data from the shared file, and 1991 // serialize it out to its various destinations. 1992 1993 void MetaspaceShared::initialize_shared_spaces() { 1994 FileMapInfo *mapinfo = FileMapInfo::current_info(); 1995 _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers(); 1996 _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size(); 1997 // _core_spaces_size is loaded from the shared archive immediatelly after mapping 1998 assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity"); 1999 char* buffer = mapinfo->misc_data_patching_start(); 2000 clone_cpp_vtables((intptr_t*)buffer); 2001 2002 // The rest of the data is now stored in the RW region 2003 buffer = mapinfo->read_only_tables_start(); 2004 2005 // Verify various attributes of the archive, plus initialize the 2006 // shared string/symbol tables 2007 intptr_t* array = (intptr_t*)buffer; 2008 ReadClosure rc(&array); 2009 serialize(&rc); 2010 2011 // Initialize the run-time symbol table. 2012 SymbolTable::create_table(); 2013 2014 mapinfo->patch_archived_heap_embedded_pointers(); 2015 2016 // Close the mapinfo file 2017 mapinfo->close(); 2018 2019 if (PrintSharedArchiveAndExit) { 2020 if (PrintSharedDictionary) { 2021 tty->print_cr("\nShared classes:\n"); 2022 SystemDictionaryShared::print_on(tty); 2023 } 2024 if (_archive_loading_failed) { 2025 tty->print_cr("archive is invalid"); 2026 vm_exit(1); 2027 } else { 2028 tty->print_cr("archive is valid"); 2029 vm_exit(0); 2030 } 2031 } 2032 } 2033 2034 // JVM/TI RedefineClasses() support: 2035 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2036 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2037 2038 if (UseSharedSpaces) { 2039 // remap the shared readonly space to shared readwrite, private 2040 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2041 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2042 return false; 2043 } 2044 _remapped_readwrite = true; 2045 } 2046 return true; 2047 } 2048 2049 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2050 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2051 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2052 // or so. 2053 _mc_region.print_out_of_space_msg(name, needed_bytes); 2054 _rw_region.print_out_of_space_msg(name, needed_bytes); 2055 _ro_region.print_out_of_space_msg(name, needed_bytes); 2056 _md_region.print_out_of_space_msg(name, needed_bytes); 2057 _od_region.print_out_of_space_msg(name, needed_bytes); 2058 2059 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2060 "Please reduce the number of shared classes."); 2061 }