1 /* 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classListParser.hpp" 28 #include "classfile/classLoaderExt.hpp" 29 #include "classfile/dictionary.hpp" 30 #include "classfile/loaderConstraints.hpp" 31 #include "classfile/placeholders.hpp" 32 #include "classfile/symbolTable.hpp" 33 #include "classfile/stringTable.hpp" 34 #include "classfile/systemDictionary.hpp" 35 #include "classfile/systemDictionaryShared.hpp" 36 #include "code/codeCache.hpp" 37 #include "interpreter/bytecodeStream.hpp" 38 #include "interpreter/bytecodes.hpp" 39 #include "logging/log.hpp" 40 #include "logging/logMessage.hpp" 41 #include "memory/filemap.hpp" 42 #include "memory/heapShared.hpp" 43 #include "memory/metaspace.hpp" 44 #include "memory/metaspaceClosure.hpp" 45 #include "memory/metaspaceShared.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/compressedOops.inline.hpp" 48 #include "oops/instanceClassLoaderKlass.hpp" 49 #include "oops/instanceMirrorKlass.hpp" 50 #include "oops/instanceRefKlass.hpp" 51 #include "oops/objArrayKlass.hpp" 52 #include "oops/objArrayOop.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "oops/typeArrayKlass.hpp" 55 #include "prims/jvmtiRedefineClasses.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/os.hpp" 58 #include "runtime/safepointVerifiers.hpp" 59 #include "runtime/signature.hpp" 60 #include "runtime/timerTrace.hpp" 61 #include "runtime/vmThread.hpp" 62 #include "runtime/vm_operations.hpp" 63 #include "utilities/align.hpp" 64 #include "utilities/defaultStream.hpp" 65 #include "utilities/hashtable.inline.hpp" 66 #if INCLUDE_G1GC 67 #include "gc/g1/g1Allocator.inline.hpp" 68 #include "gc/g1/g1CollectedHeap.hpp" 69 #endif 70 71 ReservedSpace MetaspaceShared::_shared_rs; 72 VirtualSpace MetaspaceShared::_shared_vs; 73 MetaspaceSharedStats MetaspaceShared::_stats; 74 bool MetaspaceShared::_has_error_classes; 75 bool MetaspaceShared::_archive_loading_failed = false; 76 bool MetaspaceShared::_remapped_readwrite = false; 77 bool MetaspaceShared::_open_archive_heap_region_mapped = false; 78 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; 79 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; 80 size_t MetaspaceShared::_core_spaces_size = 0; 81 82 // The CDS archive is divided into the following regions: 83 // mc - misc code (the method entry trampolines) 84 // rw - read-write metadata 85 // ro - read-only metadata and read-only tables 86 // md - misc data (the c++ vtables) 87 // od - optional data (original class files) 88 // 89 // s0 - shared strings(closed archive heap space) #0 90 // s1 - shared strings(closed archive heap space) #1 (may be empty) 91 // oa0 - open archive heap space #0 92 // oa1 - open archive heap space #1 (may be empty) 93 // 94 // The mc, rw, ro, md and od regions are linearly allocated, starting from 95 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions 96 // are page-aligned, and there's no gap between any consecutive regions. 97 // 98 // These 5 regions are populated in the following steps: 99 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are 100 // temporarily allocated outside of the shared regions. Only the method entry 101 // trampolines are written into the mc region. 102 // [2] ArchiveCompactor copies RW metadata into the rw region. 103 // [3] ArchiveCompactor copies RO metadata into the ro region. 104 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data 105 // are copied into the ro region as read-only tables. 106 // [5] C++ vtables are copied into the md region. 107 // [6] Original class files are copied into the od region. 108 // 109 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects. 110 // Their layout is independent of the other 5 regions. 111 112 class DumpRegion { 113 private: 114 const char* _name; 115 char* _base; 116 char* _top; 117 char* _end; 118 bool _is_packed; 119 120 char* expand_top_to(char* newtop) { 121 assert(is_allocatable(), "must be initialized and not packed"); 122 assert(newtop >= _top, "must not grow backwards"); 123 if (newtop > _end) { 124 MetaspaceShared::report_out_of_space(_name, newtop - _top); 125 ShouldNotReachHere(); 126 } 127 MetaspaceShared::commit_shared_space_to(newtop); 128 _top = newtop; 129 return _top; 130 } 131 132 public: 133 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} 134 135 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) { 136 char* p = (char*)align_up(_top, alignment); 137 char* newtop = p + align_up(num_bytes, alignment); 138 expand_top_to(newtop); 139 memset(p, 0, newtop - p); 140 return p; 141 } 142 143 void append_intptr_t(intptr_t n) { 144 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 145 intptr_t *p = (intptr_t*)_top; 146 char* newtop = _top + sizeof(intptr_t); 147 expand_top_to(newtop); 148 *p = n; 149 } 150 151 char* base() const { return _base; } 152 char* top() const { return _top; } 153 char* end() const { return _end; } 154 size_t reserved() const { return _end - _base; } 155 size_t used() const { return _top - _base; } 156 bool is_packed() const { return _is_packed; } 157 bool is_allocatable() const { 158 return !is_packed() && _base != NULL; 159 } 160 161 void print(size_t total_bytes) const { 162 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, 163 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); 164 } 165 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 166 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 167 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 168 if (strcmp(_name, failing_region) == 0) { 169 tty->print_cr(" required = %d", int(needed_bytes)); 170 } else { 171 tty->cr(); 172 } 173 } 174 175 void init(const ReservedSpace* rs) { 176 _base = _top = rs->base(); 177 _end = rs->end(); 178 } 179 void init(char* b, char* t, char* e) { 180 _base = b; 181 _top = t; 182 _end = e; 183 } 184 185 void pack(DumpRegion* next = NULL) { 186 assert(!is_packed(), "sanity"); 187 _end = (char*)align_up(_top, Metaspace::reserve_alignment()); 188 _is_packed = true; 189 if (next != NULL) { 190 next->_base = next->_top = this->_end; 191 next->_end = MetaspaceShared::shared_rs()->end(); 192 } 193 } 194 bool contains(char* p) { 195 return base() <= p && p < top(); 196 } 197 }; 198 199 200 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od"); 201 size_t _total_string_region_size = 0, _total_open_archive_region_size = 0; 202 203 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { 204 return _mc_region.allocate(num_bytes); 205 } 206 207 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { 208 return _ro_region.allocate(num_bytes); 209 } 210 211 char* MetaspaceShared::read_only_space_top() { 212 return _ro_region.top(); 213 } 214 215 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { 216 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); 217 218 // If using shared space, open the file that contains the shared space 219 // and map in the memory before initializing the rest of metaspace (so 220 // the addresses don't conflict) 221 address cds_address = NULL; 222 FileMapInfo* mapinfo = new FileMapInfo(); 223 224 // Open the shared archive file, read and validate the header. If 225 // initialization fails, shared spaces [UseSharedSpaces] are 226 // disabled and the file is closed. 227 // Map in spaces now also 228 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) { 229 size_t cds_total = core_spaces_size(); 230 cds_address = (address)mapinfo->header()->region_addr(0); 231 #ifdef _LP64 232 if (Metaspace::using_class_space()) { 233 char* cds_end = (char*)(cds_address + cds_total); 234 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment()); 235 // If UseCompressedClassPointers is set then allocate the metaspace area 236 // above the heap and above the CDS area (if it exists). 237 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 238 // map_heap_regions() compares the current narrow oop and klass encodings 239 // with the archived ones, so it must be done after all encodings are determined. 240 mapinfo->map_heap_regions(); 241 } 242 Universe::set_narrow_klass_range(CompressedClassSpaceSize); 243 #endif // _LP64 244 } else { 245 assert(!mapinfo->is_open() && !UseSharedSpaces, 246 "archive file not closed or shared spaces not disabled."); 247 } 248 } 249 250 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { 251 assert(DumpSharedSpaces, "should be called for dump time only"); 252 const size_t reserve_alignment = Metaspace::reserve_alignment(); 253 bool large_pages = false; // No large pages when dumping the CDS archive. 254 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); 255 256 #ifdef _LP64 257 // On 64-bit VM, the heap and class space layout will be the same as if 258 // you're running in -Xshare:on mode: 259 // 260 // +-- SharedBaseAddress (default = 0x800000000) 261 // v 262 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 263 // | Heap | Archive | | MC | RW | RO | MD | OD | class space | 264 // +-..---------+---------+ ... +----+----+----+----+----+---------------+ 265 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->| 266 // 267 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 268 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); 269 #else 270 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. 271 size_t cds_total = align_down(256*M, reserve_alignment); 272 #endif 273 274 // First try to reserve the space at the specified SharedBaseAddress. 275 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); 276 if (_shared_rs.is_reserved()) { 277 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); 278 } else { 279 // Get a mmap region anywhere if the SharedBaseAddress fails. 280 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); 281 } 282 if (!_shared_rs.is_reserved()) { 283 vm_exit_during_initialization("Unable to reserve memory for shared space", 284 err_msg(SIZE_FORMAT " bytes.", cds_total)); 285 } 286 287 #ifdef _LP64 288 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: 289 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() 290 // will store Klasses into this space. 291 // + The lower 3 GB is used for the archive -- when preload_classes() is done, 292 // ArchiveCompactor will copy the class metadata into this space, first the RW parts, 293 // then the RO parts. 294 295 assert(UseCompressedOops && UseCompressedClassPointers, 296 "UseCompressedOops and UseCompressedClassPointers must be set"); 297 298 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); 299 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); 300 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); 301 _shared_rs = _shared_rs.first_part(max_archive_size); 302 303 // Set up compress class pointers. 304 Universe::set_narrow_klass_base((address)_shared_rs.base()); 305 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent 306 // with AOT. 307 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 308 // Set the range of klass addresses to 4GB. 309 Universe::set_narrow_klass_range(cds_total); 310 311 Metaspace::initialize_class_space(tmp_class_space); 312 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", 313 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 314 315 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 316 CompressedClassSpaceSize, p2i(tmp_class_space.base())); 317 #endif 318 319 // Start with 0 committed bytes. The memory will be committed as needed by 320 // MetaspaceShared::commit_shared_space_to(). 321 if (!_shared_vs.initialize(_shared_rs, 0)) { 322 vm_exit_during_initialization("Unable to allocate memory for shared space"); 323 } 324 325 _mc_region.init(&_shared_rs); 326 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT, 327 _shared_rs.size(), p2i(_shared_rs.base())); 328 } 329 330 // Called by universe_post_init() 331 void MetaspaceShared::post_initialize(TRAPS) { 332 if (UseSharedSpaces) { 333 int size = FileMapInfo::get_number_of_shared_paths(); 334 if (size > 0) { 335 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); 336 FileMapInfo::FileMapHeader* header = FileMapInfo::current_info()->header(); 337 ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index); 338 ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index); 339 } 340 } 341 342 if (DumpSharedSpaces) { 343 if (SharedArchiveConfigFile) { 344 read_extra_data(SharedArchiveConfigFile, THREAD); 345 } 346 } 347 } 348 349 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { 350 HashtableTextDump reader(filename); 351 reader.check_version("VERSION: 1.0"); 352 353 while (reader.remain() > 0) { 354 int utf8_length; 355 int prefix_type = reader.scan_prefix(&utf8_length); 356 ResourceMark rm(THREAD); 357 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, utf8_length); 358 reader.get_utf8(utf8_buffer, utf8_length); 359 360 if (prefix_type == HashtableTextDump::SymbolPrefix) { 361 SymbolTable::new_symbol(utf8_buffer, utf8_length, THREAD); 362 } else{ 363 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity"); 364 utf8_buffer[utf8_length] = '\0'; 365 oop s = StringTable::intern(utf8_buffer, THREAD); 366 } 367 } 368 } 369 370 void MetaspaceShared::commit_shared_space_to(char* newtop) { 371 assert(DumpSharedSpaces, "dump-time only"); 372 char* base = _shared_rs.base(); 373 size_t need_committed_size = newtop - base; 374 size_t has_committed_size = _shared_vs.committed_size(); 375 if (need_committed_size < has_committed_size) { 376 return; 377 } 378 379 size_t min_bytes = need_committed_size - has_committed_size; 380 size_t preferred_bytes = 1 * M; 381 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; 382 383 size_t commit = MAX2(min_bytes, preferred_bytes); 384 assert(commit <= uncommitted, "sanity"); 385 386 bool result = _shared_vs.expand_by(commit, false); 387 if (!result) { 388 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", 389 need_committed_size)); 390 } 391 392 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", 393 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); 394 } 395 396 // Read/write a data stream for restoring/preserving metadata pointers and 397 // miscellaneous data from/to the shared archive file. 398 399 void MetaspaceShared::serialize(SerializeClosure* soc) { 400 int tag = 0; 401 soc->do_tag(--tag); 402 403 // Verify the sizes of various metadata in the system. 404 soc->do_tag(sizeof(Method)); 405 soc->do_tag(sizeof(ConstMethod)); 406 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); 407 soc->do_tag(sizeof(ConstantPool)); 408 soc->do_tag(sizeof(ConstantPoolCache)); 409 soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); 410 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); 411 soc->do_tag(sizeof(Symbol)); 412 413 // Dump/restore miscellaneous metadata. 414 Universe::serialize(soc, true); 415 soc->do_tag(--tag); 416 417 // Dump/restore references to commonly used names and signatures. 418 vmSymbols::serialize(soc); 419 soc->do_tag(--tag); 420 421 // Dump/restore the symbol and string tables 422 SymbolTable::serialize(soc); 423 StringTable::serialize(soc); 424 soc->do_tag(--tag); 425 426 serialize_well_known_classes(soc); 427 soc->do_tag(--tag); 428 429 soc->do_tag(666); 430 } 431 432 void MetaspaceShared::serialize_well_known_classes(SerializeClosure* soc) { 433 java_lang_Class::serialize(soc); 434 java_lang_String::serialize(soc); 435 java_lang_System::serialize(soc); 436 java_lang_ClassLoader::serialize(soc); 437 java_lang_Throwable::serialize(soc); 438 java_lang_Thread::serialize(soc); 439 java_lang_ThreadGroup::serialize(soc); 440 java_lang_AssertionStatusDirectives::serialize(soc); 441 java_lang_module_Configuration::serialize(soc); 442 java_lang_ref_SoftReference::serialize(soc); 443 java_lang_invoke_MethodHandle::serialize(soc); 444 java_lang_invoke_DirectMethodHandle::serialize(soc); 445 java_lang_invoke_MemberName::serialize(soc); 446 java_lang_invoke_ResolvedMethodName::serialize(soc); 447 java_lang_invoke_LambdaForm::serialize(soc); 448 java_lang_invoke_MethodType::serialize(soc); 449 java_lang_invoke_CallSite::serialize(soc); 450 java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(soc); 451 java_security_AccessControlContext::serialize(soc); 452 java_lang_reflect_AccessibleObject::serialize(soc); 453 java_lang_reflect_Method::serialize(soc); 454 java_lang_reflect_Constructor::serialize(soc); 455 java_lang_reflect_Field::serialize(soc); 456 java_nio_Buffer::serialize(soc); 457 reflect_ConstantPool::serialize(soc); 458 reflect_UnsafeStaticFieldAccessorImpl::serialize(soc); 459 java_lang_reflect_Parameter::serialize(soc); 460 java_lang_Module::serialize(soc); 461 java_lang_StackTraceElement::serialize(soc); 462 java_lang_StackFrameInfo::serialize(soc); 463 java_lang_LiveStackFrameInfo::serialize(soc); 464 java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(soc); 465 java_util_ImmutableCollections_ListN::serialize(soc); 466 java_util_ImmutableCollections_MapN::serialize(soc); 467 java_util_ImmutableCollections_SetN::serialize(soc); 468 jdk_internal_module_ArchivedModuleGraph::serialize(soc); 469 } 470 471 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { 472 if (DumpSharedSpaces) { 473 if (_cds_i2i_entry_code_buffers == NULL) { 474 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); 475 _cds_i2i_entry_code_buffers_size = total_size; 476 } 477 } else if (UseSharedSpaces) { 478 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized"); 479 } else { 480 return NULL; 481 } 482 483 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change"); 484 return _cds_i2i_entry_code_buffers; 485 } 486 487 // CDS code for dumping shared archive. 488 489 // Global object for holding classes that have been loaded. Since this 490 // is run at a safepoint just before exit, this is the entire set of classes. 491 static GrowableArray<Klass*>* _global_klass_objects; 492 493 static void collect_array_classes(Klass* k) { 494 _global_klass_objects->append_if_missing(k); 495 if (k->is_array_klass()) { 496 // Add in the array classes too 497 ArrayKlass* ak = ArrayKlass::cast(k); 498 Klass* h = ak->higher_dimension(); 499 if (h != NULL) { 500 h->array_klasses_do(collect_array_classes); 501 } 502 } 503 } 504 505 class CollectClassesClosure : public KlassClosure { 506 void do_klass(Klass* k) { 507 if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) { 508 if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) { 509 // Mark any class with signers and don't add to the _global_klass_objects 510 k->set_has_signer_and_not_archived(); 511 } else { 512 _global_klass_objects->append_if_missing(k); 513 } 514 } 515 if (k->is_array_klass()) { 516 // Add in the array classes too 517 ArrayKlass* ak = ArrayKlass::cast(k); 518 Klass* h = ak->higher_dimension(); 519 if (h != NULL) { 520 h->array_klasses_do(collect_array_classes); 521 } 522 } 523 } 524 }; 525 526 static void remove_unshareable_in_classes() { 527 for (int i = 0; i < _global_klass_objects->length(); i++) { 528 Klass* k = _global_klass_objects->at(i); 529 if (!k->is_objArray_klass()) { 530 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 531 // on their array classes. 532 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 533 k->remove_unshareable_info(); 534 } 535 } 536 } 537 538 static void remove_java_mirror_in_classes() { 539 for (int i = 0; i < _global_klass_objects->length(); i++) { 540 Klass* k = _global_klass_objects->at(i); 541 if (!k->is_objArray_klass()) { 542 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info 543 // on their array classes. 544 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be"); 545 k->remove_java_mirror(); 546 } 547 } 548 } 549 550 static void clear_basic_type_mirrors() { 551 assert(!MetaspaceShared::is_heap_object_archiving_allowed(), "Sanity"); 552 Universe::set_int_mirror(NULL); 553 Universe::set_float_mirror(NULL); 554 Universe::set_double_mirror(NULL); 555 Universe::set_byte_mirror(NULL); 556 Universe::set_bool_mirror(NULL); 557 Universe::set_char_mirror(NULL); 558 Universe::set_long_mirror(NULL); 559 Universe::set_short_mirror(NULL); 560 Universe::set_void_mirror(NULL); 561 } 562 563 static void rewrite_nofast_bytecode(Method* method) { 564 BytecodeStream bcs(method); 565 while (!bcs.is_last_bytecode()) { 566 Bytecodes::Code opcode = bcs.next(); 567 switch (opcode) { 568 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; 569 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; 570 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; 571 case Bytecodes::_iload: { 572 if (!bcs.is_wide()) { 573 *bcs.bcp() = Bytecodes::_nofast_iload; 574 } 575 break; 576 } 577 default: break; 578 } 579 } 580 } 581 582 // Walk all methods in the class list to ensure that they won't be modified at 583 // run time. This includes: 584 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified 585 // at run time by RewriteBytecodes/RewriteFrequentPairs 586 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. 587 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { 588 for (int i = 0; i < _global_klass_objects->length(); i++) { 589 Klass* k = _global_klass_objects->at(i); 590 if (k->is_instance_klass()) { 591 InstanceKlass* ik = InstanceKlass::cast(k); 592 for (int i = 0; i < ik->methods()->length(); i++) { 593 Method* m = ik->methods()->at(i); 594 rewrite_nofast_bytecode(m); 595 Fingerprinter fp(m); 596 // The side effect of this call sets method's fingerprint field. 597 fp.fingerprint(); 598 } 599 } 600 } 601 } 602 603 static void relocate_cached_class_file() { 604 for (int i = 0; i < _global_klass_objects->length(); i++) { 605 Klass* k = _global_klass_objects->at(i); 606 if (k->is_instance_klass()) { 607 InstanceKlass* ik = InstanceKlass::cast(k); 608 JvmtiCachedClassFileData* p = ik->get_archived_class_data(); 609 if (p != NULL) { 610 int size = offset_of(JvmtiCachedClassFileData, data) + p->length; 611 JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size); 612 q->length = p->length; 613 memcpy(q->data, p->data, p->length); 614 ik->set_archived_class_data(q); 615 } 616 } 617 } 618 } 619 620 NOT_PRODUCT( 621 static void assert_not_anonymous_class(InstanceKlass* k) { 622 assert(!(k->is_anonymous()), "cannot archive anonymous classes"); 623 } 624 625 // Anonymous classes are not stored inside any dictionaries. They are created by 626 // SystemDictionary::parse_stream() with a non-null host_klass. 627 static void assert_no_anonymoys_classes_in_dictionaries() { 628 ClassLoaderDataGraph::dictionary_classes_do(assert_not_anonymous_class); 629 }) 630 631 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. 632 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) 633 // 634 // Addresses of the vtables and the methods may be different across JVM runs, 635 // if libjvm.so is dynamically loaded at a different base address. 636 // 637 // To ensure that the Metadata objects in the CDS archive always have the correct vtable: 638 // 639 // + at dump time: we redirect the _vptr to point to our own vtables inside 640 // the CDS image 641 // + at run time: we clone the actual contents of the vtables from libjvm.so 642 // into our own tables. 643 644 // Currently, the archive contain ONLY the following types of objects that have C++ vtables. 645 #define CPP_VTABLE_PATCH_TYPES_DO(f) \ 646 f(ConstantPool) \ 647 f(InstanceKlass) \ 648 f(InstanceClassLoaderKlass) \ 649 f(InstanceMirrorKlass) \ 650 f(InstanceRefKlass) \ 651 f(Method) \ 652 f(ObjArrayKlass) \ 653 f(TypeArrayKlass) 654 655 class CppVtableInfo { 656 intptr_t _vtable_size; 657 intptr_t _cloned_vtable[1]; 658 public: 659 static int num_slots(int vtable_size) { 660 return 1 + vtable_size; // Need to add the space occupied by _vtable_size; 661 } 662 int vtable_size() { return int(uintx(_vtable_size)); } 663 void set_vtable_size(int n) { _vtable_size = intptr_t(n); } 664 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; } 665 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } 666 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo 667 static size_t byte_size(int vtable_size) { 668 CppVtableInfo i; 669 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); 670 } 671 }; 672 673 template <class T> class CppVtableCloner : public T { 674 static intptr_t* vtable_of(Metadata& m) { 675 return *((intptr_t**)&m); 676 } 677 static CppVtableInfo* _info; 678 679 static int get_vtable_length(const char* name); 680 681 public: 682 // Allocate and initialize the C++ vtable, starting from top, but do not go past end. 683 static intptr_t* allocate(const char* name); 684 685 // Clone the vtable to ... 686 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); 687 688 static void zero_vtable_clone() { 689 assert(DumpSharedSpaces, "dump-time only"); 690 _info->zero(); 691 } 692 693 // Switch the vtable pointer to point to the cloned vtable. 694 static void patch(Metadata* obj) { 695 assert(DumpSharedSpaces, "dump-time only"); 696 *(void**)obj = (void*)(_info->cloned_vtable()); 697 } 698 699 static bool is_valid_shared_object(const T* obj) { 700 intptr_t* vptr = *(intptr_t**)obj; 701 return vptr == _info->cloned_vtable(); 702 } 703 }; 704 705 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; 706 707 template <class T> 708 intptr_t* CppVtableCloner<T>::allocate(const char* name) { 709 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment"); 710 int n = get_vtable_length(name); 711 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); 712 _info->set_vtable_size(n); 713 714 intptr_t* p = clone_vtable(name, _info); 715 assert((char*)p == _md_region.top(), "must be"); 716 717 return p; 718 } 719 720 template <class T> 721 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { 722 if (!DumpSharedSpaces) { 723 assert(_info == 0, "_info is initialized only at dump time"); 724 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() 725 } 726 T tmp; // Allocate temporary dummy metadata object to get to the original vtable. 727 int n = info->vtable_size(); 728 intptr_t* srcvtable = vtable_of(tmp); 729 intptr_t* dstvtable = info->cloned_vtable(); 730 731 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are 732 // safe to do memcpy. 733 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name); 734 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); 735 return dstvtable + n; 736 } 737 738 // To determine the size of the vtable for each type, we use the following 739 // trick by declaring 2 subclasses: 740 // 741 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} }; 742 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; 743 // 744 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: 745 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) 746 // - The first N entries have are exactly the same as in InstanceKlass's vtable. 747 // - Their last entry is different. 748 // 749 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables 750 // and find the first entry that's different. 751 // 752 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more 753 // esoteric compilers. 754 755 template <class T> class CppVtableTesterB: public T { 756 public: 757 virtual int last_virtual_method() {return 1;} 758 }; 759 760 template <class T> class CppVtableTesterA : public T { 761 public: 762 virtual void* last_virtual_method() { 763 // Make this different than CppVtableTesterB::last_virtual_method so the C++ 764 // compiler/linker won't alias the two functions. 765 return NULL; 766 } 767 }; 768 769 template <class T> 770 int CppVtableCloner<T>::get_vtable_length(const char* name) { 771 CppVtableTesterA<T> a; 772 CppVtableTesterB<T> b; 773 774 intptr_t* avtable = vtable_of(a); 775 intptr_t* bvtable = vtable_of(b); 776 777 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) 778 int vtable_len = 1; 779 for (; ; vtable_len++) { 780 if (avtable[vtable_len] != bvtable[vtable_len]) { 781 break; 782 } 783 } 784 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name); 785 786 return vtable_len; 787 } 788 789 #define ALLOC_CPP_VTABLE_CLONE(c) \ 790 CppVtableCloner<c>::allocate(#c); 791 792 #define CLONE_CPP_VTABLE(c) \ 793 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); 794 795 #define ZERO_CPP_VTABLE(c) \ 796 CppVtableCloner<c>::zero_vtable_clone(); 797 798 // This can be called at both dump time and run time. 799 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { 800 assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); 801 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); 802 return p; 803 } 804 805 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { 806 assert(DumpSharedSpaces, "dump-time only"); 807 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); 808 } 809 810 // Allocate and initialize the C++ vtables, starting from top, but do not go past end. 811 void MetaspaceShared::allocate_cpp_vtable_clones() { 812 assert(DumpSharedSpaces, "dump-time only"); 813 // Layout (each slot is a intptr_t): 814 // [number of slots in the first vtable = n1] 815 // [ <n1> slots for the first vtable] 816 // [number of slots in the first second = n2] 817 // [ <n2> slots for the second vtable] 818 // ... 819 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. 820 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); 821 } 822 823 // Switch the vtable pointer to point to the cloned vtable. We assume the 824 // vtable pointer is in first slot in object. 825 void MetaspaceShared::patch_cpp_vtable_pointers() { 826 int n = _global_klass_objects->length(); 827 for (int i = 0; i < n; i++) { 828 Klass* obj = _global_klass_objects->at(i); 829 if (obj->is_instance_klass()) { 830 InstanceKlass* ik = InstanceKlass::cast(obj); 831 if (ik->is_class_loader_instance_klass()) { 832 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); 833 } else if (ik->is_reference_instance_klass()) { 834 CppVtableCloner<InstanceRefKlass>::patch(ik); 835 } else if (ik->is_mirror_instance_klass()) { 836 CppVtableCloner<InstanceMirrorKlass>::patch(ik); 837 } else { 838 CppVtableCloner<InstanceKlass>::patch(ik); 839 } 840 ConstantPool* cp = ik->constants(); 841 CppVtableCloner<ConstantPool>::patch(cp); 842 for (int j = 0; j < ik->methods()->length(); j++) { 843 Method* m = ik->methods()->at(j); 844 CppVtableCloner<Method>::patch(m); 845 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be"); 846 } 847 } else if (obj->is_objArray_klass()) { 848 CppVtableCloner<ObjArrayKlass>::patch(obj); 849 } else { 850 assert(obj->is_typeArray_klass(), "sanity"); 851 CppVtableCloner<TypeArrayKlass>::patch(obj); 852 } 853 } 854 } 855 856 bool MetaspaceShared::is_valid_shared_method(const Method* m) { 857 assert(is_in_shared_metaspace(m), "must be"); 858 return CppVtableCloner<Method>::is_valid_shared_object(m); 859 } 860 861 // Closure for serializing initialization data out to a data area to be 862 // written to the shared file. 863 864 class WriteClosure : public SerializeClosure { 865 private: 866 DumpRegion* _dump_region; 867 868 public: 869 WriteClosure(DumpRegion* r) { 870 _dump_region = r; 871 } 872 873 void do_ptr(void** p) { 874 _dump_region->append_intptr_t((intptr_t)*p); 875 } 876 877 void do_u4(u4* p) { 878 void* ptr = (void*)(uintx(*p)); 879 do_ptr(&ptr); 880 } 881 882 void do_tag(int tag) { 883 _dump_region->append_intptr_t((intptr_t)tag); 884 } 885 886 void do_oop(oop* o) { 887 if (*o == NULL) { 888 _dump_region->append_intptr_t(0); 889 } else { 890 assert(MetaspaceShared::is_heap_object_archiving_allowed(), 891 "Archiving heap object is not allowed"); 892 _dump_region->append_intptr_t( 893 (intptr_t)CompressedOops::encode_not_null(*o)); 894 } 895 } 896 897 void do_region(u_char* start, size_t size) { 898 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 899 assert(size % sizeof(intptr_t) == 0, "bad size"); 900 do_tag((int)size); 901 while (size > 0) { 902 _dump_region->append_intptr_t(*(intptr_t*)start); 903 start += sizeof(intptr_t); 904 size -= sizeof(intptr_t); 905 } 906 } 907 908 bool reading() const { return false; } 909 }; 910 911 // This is for dumping detailed statistics for the allocations 912 // in the shared spaces. 913 class DumpAllocStats : public ResourceObj { 914 public: 915 916 // Here's poor man's enum inheritance 917 #define SHAREDSPACE_OBJ_TYPES_DO(f) \ 918 METASPACE_OBJ_TYPES_DO(f) \ 919 f(SymbolHashentry) \ 920 f(SymbolBucket) \ 921 f(StringHashentry) \ 922 f(StringBucket) \ 923 f(Other) 924 925 enum Type { 926 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 927 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 928 _number_of_types 929 }; 930 931 static const char * type_name(Type type) { 932 switch(type) { 933 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 934 default: 935 ShouldNotReachHere(); 936 return NULL; 937 } 938 } 939 940 public: 941 enum { RO = 0, RW = 1 }; 942 943 int _counts[2][_number_of_types]; 944 int _bytes [2][_number_of_types]; 945 946 DumpAllocStats() { 947 memset(_counts, 0, sizeof(_counts)); 948 memset(_bytes, 0, sizeof(_bytes)); 949 }; 950 951 void record(MetaspaceObj::Type type, int byte_size, bool read_only) { 952 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); 953 int which = (read_only) ? RO : RW; 954 _counts[which][type] ++; 955 _bytes [which][type] += byte_size; 956 } 957 958 void record_other_type(int byte_size, bool read_only) { 959 int which = (read_only) ? RO : RW; 960 _bytes [which][OtherType] += byte_size; 961 } 962 void print_stats(int ro_all, int rw_all, int mc_all, int md_all); 963 }; 964 965 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { 966 // Calculate size of data that was not allocated by Metaspace::allocate() 967 MetaspaceSharedStats *stats = MetaspaceShared::stats(); 968 969 // symbols 970 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; 971 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; 972 973 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; 974 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; 975 976 // strings 977 _counts[RO][StringHashentryType] = stats->string.hashentry_count; 978 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; 979 980 _counts[RO][StringBucketType] = stats->string.bucket_count; 981 _bytes [RO][StringBucketType] = stats->string.bucket_bytes; 982 983 // TODO: count things like dictionary, vtable, etc 984 _bytes[RW][OtherType] += mc_all + md_all; 985 rw_all += mc_all + md_all; // mc/md are mapped Read/Write 986 987 // prevent divide-by-zero 988 if (ro_all < 1) { 989 ro_all = 1; 990 } 991 if (rw_all < 1) { 992 rw_all = 1; 993 } 994 995 int all_ro_count = 0; 996 int all_ro_bytes = 0; 997 int all_rw_count = 0; 998 int all_rw_bytes = 0; 999 1000 // To make fmt_stats be a syntactic constant (for format warnings), use #define. 1001 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" 1002 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; 1003 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; 1004 1005 LogMessage(cds) msg; 1006 1007 msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); 1008 msg.info("%s", hdr); 1009 msg.info("%s", sep); 1010 for (int type = 0; type < int(_number_of_types); type ++) { 1011 const char *name = type_name((Type)type); 1012 int ro_count = _counts[RO][type]; 1013 int ro_bytes = _bytes [RO][type]; 1014 int rw_count = _counts[RW][type]; 1015 int rw_bytes = _bytes [RW][type]; 1016 int count = ro_count + rw_count; 1017 int bytes = ro_bytes + rw_bytes; 1018 1019 double ro_perc = percent_of(ro_bytes, ro_all); 1020 double rw_perc = percent_of(rw_bytes, rw_all); 1021 double perc = percent_of(bytes, ro_all + rw_all); 1022 1023 msg.info(fmt_stats, name, 1024 ro_count, ro_bytes, ro_perc, 1025 rw_count, rw_bytes, rw_perc, 1026 count, bytes, perc); 1027 1028 all_ro_count += ro_count; 1029 all_ro_bytes += ro_bytes; 1030 all_rw_count += rw_count; 1031 all_rw_bytes += rw_bytes; 1032 } 1033 1034 int all_count = all_ro_count + all_rw_count; 1035 int all_bytes = all_ro_bytes + all_rw_bytes; 1036 1037 double all_ro_perc = percent_of(all_ro_bytes, ro_all); 1038 double all_rw_perc = percent_of(all_rw_bytes, rw_all); 1039 double all_perc = percent_of(all_bytes, ro_all + rw_all); 1040 1041 msg.info("%s", sep); 1042 msg.info(fmt_stats, "Total", 1043 all_ro_count, all_ro_bytes, all_ro_perc, 1044 all_rw_count, all_rw_bytes, all_rw_perc, 1045 all_count, all_bytes, all_perc); 1046 1047 assert(all_ro_bytes == ro_all, "everything should have been counted"); 1048 assert(all_rw_bytes == rw_all, "everything should have been counted"); 1049 1050 #undef fmt_stats 1051 } 1052 1053 // Populate the shared space. 1054 1055 class VM_PopulateDumpSharedSpace: public VM_Operation { 1056 private: 1057 GrowableArray<MemRegion> *_closed_archive_heap_regions; 1058 GrowableArray<MemRegion> *_open_archive_heap_regions; 1059 1060 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; 1061 void dump_symbols(); 1062 char* dump_read_only_tables(); 1063 void print_region_stats(); 1064 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1065 const char *name, const size_t total_size); 1066 public: 1067 1068 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1069 void doit(); // outline because gdb sucks 1070 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec); 1071 bool allow_nested_vm_operations() const { return true; } 1072 }; // class VM_PopulateDumpSharedSpace 1073 1074 class SortedSymbolClosure: public SymbolClosure { 1075 GrowableArray<Symbol*> _symbols; 1076 virtual void do_symbol(Symbol** sym) { 1077 assert((*sym)->is_permanent(), "archived symbols must be permanent"); 1078 _symbols.append(*sym); 1079 } 1080 static int compare_symbols_by_address(Symbol** a, Symbol** b) { 1081 if (a[0] < b[0]) { 1082 return -1; 1083 } else if (a[0] == b[0]) { 1084 return 0; 1085 } else { 1086 return 1; 1087 } 1088 } 1089 1090 public: 1091 SortedSymbolClosure() { 1092 SymbolTable::symbols_do(this); 1093 _symbols.sort(compare_symbols_by_address); 1094 } 1095 GrowableArray<Symbol*>* get_sorted_symbols() { 1096 return &_symbols; 1097 } 1098 }; 1099 1100 // ArchiveCompactor -- 1101 // 1102 // This class is the central piece of shared archive compaction -- all metaspace data are 1103 // initially allocated outside of the shared regions. ArchiveCompactor copies the 1104 // metaspace data into their final location in the shared regions. 1105 1106 class ArchiveCompactor : AllStatic { 1107 static DumpAllocStats* _alloc_stats; 1108 static SortedSymbolClosure* _ssc; 1109 1110 static unsigned my_hash(const address& a) { 1111 return primitive_hash<address>(a); 1112 } 1113 static bool my_equals(const address& a0, const address& a1) { 1114 return primitive_equals<address>(a0, a1); 1115 } 1116 typedef ResourceHashtable< 1117 address, address, 1118 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address> 1119 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address> 1120 16384, ResourceObj::C_HEAP> RelocationTable; 1121 static RelocationTable* _new_loc_table; 1122 1123 public: 1124 static void initialize() { 1125 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; 1126 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable; 1127 } 1128 static DumpAllocStats* alloc_stats() { 1129 return _alloc_stats; 1130 } 1131 1132 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { 1133 address obj = ref->obj(); 1134 int bytes = ref->size() * BytesPerWord; 1135 char* p; 1136 size_t alignment = BytesPerWord; 1137 char* oldtop; 1138 char* newtop; 1139 1140 if (read_only) { 1141 oldtop = _ro_region.top(); 1142 p = _ro_region.allocate(bytes, alignment); 1143 newtop = _ro_region.top(); 1144 } else { 1145 oldtop = _rw_region.top(); 1146 p = _rw_region.allocate(bytes, alignment); 1147 newtop = _rw_region.top(); 1148 } 1149 memcpy(p, obj, bytes); 1150 bool isnew = _new_loc_table->put(obj, (address)p); 1151 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes); 1152 assert(isnew, "must be"); 1153 1154 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); 1155 if (ref->msotype() == MetaspaceObj::SymbolType) { 1156 uintx delta = MetaspaceShared::object_delta(p); 1157 if (delta > MAX_SHARED_DELTA) { 1158 // This is just a sanity check and should not appear in any real world usage. This 1159 // happens only if you allocate more than 2GB of Symbols and would require 1160 // millions of shared classes. 1161 vm_exit_during_initialization("Too many Symbols in the CDS archive", 1162 "Please reduce the number of shared classes."); 1163 } 1164 } 1165 } 1166 1167 static address get_new_loc(MetaspaceClosure::Ref* ref) { 1168 address* pp = _new_loc_table->get(ref->obj()); 1169 assert(pp != NULL, "must be"); 1170 return *pp; 1171 } 1172 1173 private: 1174 // Makes a shallow copy of visited MetaspaceObj's 1175 class ShallowCopier: public UniqueMetaspaceClosure { 1176 bool _read_only; 1177 public: 1178 ShallowCopier(bool read_only) : _read_only(read_only) {} 1179 1180 virtual void do_unique_ref(Ref* ref, bool read_only) { 1181 if (read_only == _read_only) { 1182 allocate(ref, read_only); 1183 } 1184 } 1185 }; 1186 1187 // Relocate embedded pointers within a MetaspaceObj's shallow copy 1188 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 1189 public: 1190 virtual void do_unique_ref(Ref* ref, bool read_only) { 1191 address new_loc = get_new_loc(ref); 1192 RefRelocator refer; 1193 ref->metaspace_pointers_do_at(&refer, new_loc); 1194 } 1195 }; 1196 1197 // Relocate a reference to point to its shallow copy 1198 class RefRelocator: public MetaspaceClosure { 1199 public: 1200 virtual bool do_ref(Ref* ref, bool read_only) { 1201 if (ref->not_null()) { 1202 ref->update(get_new_loc(ref)); 1203 } 1204 return false; // Do not recurse. 1205 } 1206 }; 1207 1208 #ifdef ASSERT 1209 class IsRefInArchiveChecker: public MetaspaceClosure { 1210 public: 1211 virtual bool do_ref(Ref* ref, bool read_only) { 1212 if (ref->not_null()) { 1213 char* obj = (char*)ref->obj(); 1214 assert(_ro_region.contains(obj) || _rw_region.contains(obj), 1215 "must be relocated to point to CDS archive"); 1216 } 1217 return false; // Do not recurse. 1218 } 1219 }; 1220 #endif 1221 1222 public: 1223 static void copy_and_compact() { 1224 // We should no longer allocate anything from the metaspace, so that 1225 // we can have a stable set of MetaspaceObjs to work with. 1226 Metaspace::freeze(); 1227 1228 ResourceMark rm; 1229 SortedSymbolClosure the_ssc; // StackObj 1230 _ssc = &the_ssc; 1231 1232 tty->print_cr("Scanning all metaspace objects ... "); 1233 { 1234 // allocate and shallow-copy RW objects, immediately following the MC region 1235 tty->print_cr("Allocating RW objects ... "); 1236 _mc_region.pack(&_rw_region); 1237 1238 ResourceMark rm; 1239 ShallowCopier rw_copier(false); 1240 iterate_roots(&rw_copier); 1241 } 1242 { 1243 // allocate and shallow-copy of RO object, immediately following the RW region 1244 tty->print_cr("Allocating RO objects ... "); 1245 _rw_region.pack(&_ro_region); 1246 1247 ResourceMark rm; 1248 ShallowCopier ro_copier(true); 1249 iterate_roots(&ro_copier); 1250 } 1251 { 1252 tty->print_cr("Relocating embedded pointers ... "); 1253 ResourceMark rm; 1254 ShallowCopyEmbeddedRefRelocator emb_reloc; 1255 iterate_roots(&emb_reloc); 1256 } 1257 { 1258 tty->print_cr("Relocating external roots ... "); 1259 ResourceMark rm; 1260 RefRelocator ext_reloc; 1261 iterate_roots(&ext_reloc); 1262 } 1263 1264 #ifdef ASSERT 1265 { 1266 tty->print_cr("Verifying external roots ... "); 1267 ResourceMark rm; 1268 IsRefInArchiveChecker checker; 1269 iterate_roots(&checker); 1270 } 1271 #endif 1272 1273 1274 // cleanup 1275 _ssc = NULL; 1276 } 1277 1278 // We must relocate the System::_well_known_klasses only after we have copied the 1279 // java objects in during dump_java_heap_objects(): during the object copy, we operate on 1280 // old objects which assert that their klass is the original klass. 1281 static void relocate_well_known_klasses() { 1282 { 1283 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... "); 1284 ResourceMark rm; 1285 RefRelocator ext_reloc; 1286 SystemDictionary::well_known_klasses_do(&ext_reloc); 1287 } 1288 // NOTE: after this point, we shouldn't have any globals that can reach the old 1289 // objects. 1290 1291 // We cannot use any of the objects in the heap anymore (except for the objects 1292 // in the CDS shared string regions) because their headers no longer point to 1293 // valid Klasses. 1294 } 1295 1296 static void iterate_roots(MetaspaceClosure* it) { 1297 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); 1298 for (int i=0; i<symbols->length(); i++) { 1299 it->push(symbols->adr_at(i)); 1300 } 1301 if (_global_klass_objects != NULL) { 1302 // Need to fix up the pointers 1303 for (int i = 0; i < _global_klass_objects->length(); i++) { 1304 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. 1305 it->push(_global_klass_objects->adr_at(i)); 1306 } 1307 } 1308 FileMapInfo::metaspace_pointers_do(it); 1309 SystemDictionary::classes_do(it); 1310 Universe::metaspace_pointers_do(it); 1311 SymbolTable::metaspace_pointers_do(it); 1312 vmSymbols::metaspace_pointers_do(it); 1313 } 1314 1315 static Klass* get_relocated_klass(Klass* orig_klass) { 1316 assert(DumpSharedSpaces, "dump time only"); 1317 address* pp = _new_loc_table->get((address)orig_klass); 1318 assert(pp != NULL, "must be"); 1319 Klass* klass = (Klass*)(*pp); 1320 assert(klass->is_klass(), "must be"); 1321 return klass; 1322 } 1323 }; 1324 1325 DumpAllocStats* ArchiveCompactor::_alloc_stats; 1326 SortedSymbolClosure* ArchiveCompactor::_ssc; 1327 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; 1328 1329 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, 1330 DumpRegion* dump_region, bool read_only, bool allow_exec) { 1331 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); 1332 } 1333 1334 void VM_PopulateDumpSharedSpace::dump_symbols() { 1335 tty->print_cr("Dumping symbol table ..."); 1336 1337 NOT_PRODUCT(SymbolTable::verify()); 1338 SymbolTable::write_to_archive(); 1339 } 1340 1341 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { 1342 char* oldtop = _ro_region.top(); 1343 // Reorder the system dictionary. Moving the symbols affects 1344 // how the hash table indices are calculated. 1345 SystemDictionary::reorder_dictionary_for_sharing(); 1346 1347 tty->print("Removing java_mirror ... "); 1348 if (!MetaspaceShared::is_heap_object_archiving_allowed()) { 1349 clear_basic_type_mirrors(); 1350 } 1351 remove_java_mirror_in_classes(); 1352 tty->print_cr("done. "); 1353 NOT_PRODUCT(SystemDictionary::verify();) 1354 1355 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets(); 1356 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t)); 1357 SystemDictionary::copy_buckets(buckets_top, _ro_region.top()); 1358 1359 size_t table_bytes = SystemDictionary::count_bytes_for_table(); 1360 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t)); 1361 SystemDictionary::copy_table(table_top, _ro_region.top()); 1362 1363 // Write the archived object sub-graph infos. For each klass with sub-graphs, 1364 // the info includes the static fields (sub-graph entry points) and Klasses 1365 // of objects included in the sub-graph. 1366 HeapShared::write_archived_subgraph_infos(); 1367 1368 // Write the other data to the output array. 1369 WriteClosure wc(&_ro_region); 1370 MetaspaceShared::serialize(&wc); 1371 1372 char* newtop = _ro_region.top(); 1373 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true); 1374 return buckets_top; 1375 } 1376 1377 void VM_PopulateDumpSharedSpace::doit() { 1378 Thread* THREAD = VMThread::vm_thread(); 1379 1380 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1381 1382 NOT_PRODUCT(SystemDictionary::verify();) 1383 // The following guarantee is meant to ensure that no loader constraints 1384 // exist yet, since the constraints table is not shared. This becomes 1385 // more important now that we don't re-initialize vtables/itables for 1386 // shared classes at runtime, where constraints were previously created. 1387 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 1388 "loader constraints are not saved"); 1389 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, 1390 "placeholders are not saved"); 1391 // Revisit and implement this if we prelink method handle call sites: 1392 guarantee(SystemDictionary::invoke_method_table() == NULL || 1393 SystemDictionary::invoke_method_table()->number_of_entries() == 0, 1394 "invoke method table is not saved"); 1395 1396 // At this point, many classes have been loaded. 1397 // Gather systemDictionary classes in a global array and do everything to 1398 // that so we don't have to walk the SystemDictionary again. 1399 _global_klass_objects = new GrowableArray<Klass*>(1000); 1400 CollectClassesClosure collect_classes; 1401 ClassLoaderDataGraph::loaded_classes_do(&collect_classes); 1402 1403 tty->print_cr("Number of classes %d", _global_klass_objects->length()); 1404 { 1405 int num_type_array = 0, num_obj_array = 0, num_inst = 0; 1406 for (int i = 0; i < _global_klass_objects->length(); i++) { 1407 Klass* k = _global_klass_objects->at(i); 1408 if (k->is_instance_klass()) { 1409 num_inst ++; 1410 } else if (k->is_objArray_klass()) { 1411 num_obj_array ++; 1412 } else { 1413 assert(k->is_typeArray_klass(), "sanity"); 1414 num_type_array ++; 1415 } 1416 } 1417 tty->print_cr(" instance classes = %5d", num_inst); 1418 tty->print_cr(" obj array classes = %5d", num_obj_array); 1419 tty->print_cr(" type array classes = %5d", num_type_array); 1420 } 1421 1422 // Ensure the ConstMethods won't be modified at run-time 1423 tty->print("Updating ConstMethods ... "); 1424 rewrite_nofast_bytecodes_and_calculate_fingerprints(); 1425 tty->print_cr("done. "); 1426 1427 // Move classes from platform/system dictionaries into the boot dictionary 1428 SystemDictionary::combine_shared_dictionaries(); 1429 1430 // Make sure all classes have a correct loader type. 1431 ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(MetaspaceShared::check_shared_class_loader_type); 1432 1433 // Remove all references outside the metadata 1434 tty->print("Removing unshareable information ... "); 1435 remove_unshareable_in_classes(); 1436 tty->print_cr("done. "); 1437 1438 // We don't support archiving anonymous classes. Verify that they are not stored in 1439 // the any dictionaries. 1440 NOT_PRODUCT(assert_no_anonymoys_classes_in_dictionaries()); 1441 1442 SystemDictionaryShared::finalize_verification_constraints(); 1443 1444 ArchiveCompactor::initialize(); 1445 ArchiveCompactor::copy_and_compact(); 1446 1447 dump_symbols(); 1448 1449 // Dump supported java heap objects 1450 _closed_archive_heap_regions = NULL; 1451 _open_archive_heap_regions = NULL; 1452 dump_java_heap_objects(); 1453 1454 ArchiveCompactor::relocate_well_known_klasses(); 1455 1456 char* read_only_tables_start = dump_read_only_tables(); 1457 _ro_region.pack(&_md_region); 1458 1459 char* vtbl_list = _md_region.top(); 1460 MetaspaceShared::allocate_cpp_vtable_clones(); 1461 _md_region.pack(&_od_region); 1462 1463 // Relocate the archived class file data into the od region 1464 relocate_cached_class_file(); 1465 _od_region.pack(); 1466 1467 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size 1468 // is just the spaces between the two ends. 1469 size_t core_spaces_size = _od_region.end() - _mc_region.base(); 1470 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), 1471 "should already be aligned"); 1472 1473 // During patching, some virtual methods may be called, so at this point 1474 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). 1475 MetaspaceShared::patch_cpp_vtable_pointers(); 1476 1477 // The vtable clones contain addresses of the current process. 1478 // We don't want to write these addresses into the archive. 1479 MetaspaceShared::zero_cpp_vtable_clones_for_writing(); 1480 1481 // Create and write the archive file that maps the shared spaces. 1482 1483 FileMapInfo* mapinfo = new FileMapInfo(); 1484 mapinfo->populate_header(os::vm_allocation_granularity()); 1485 mapinfo->set_read_only_tables_start(read_only_tables_start); 1486 mapinfo->set_misc_data_patching_start(vtbl_list); 1487 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); 1488 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); 1489 mapinfo->set_core_spaces_size(core_spaces_size); 1490 1491 for (int pass=1; pass<=2; pass++) { 1492 if (pass == 1) { 1493 // The first pass doesn't actually write the data to disk. All it 1494 // does is to update the fields in the mapinfo->_header. 1495 } else { 1496 // After the first pass, the contents of mapinfo->_header are finalized, 1497 // so we can compute the header's CRC, and write the contents of the header 1498 // and the regions into disk. 1499 mapinfo->open_for_write(); 1500 mapinfo->set_header_crc(mapinfo->compute_header_crc()); 1501 } 1502 mapinfo->write_header(); 1503 1504 // NOTE: md contains the trampoline code for method entries, which are patched at run time, 1505 // so it needs to be read/write. 1506 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); 1507 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); 1508 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); 1509 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); 1510 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false); 1511 1512 _total_string_region_size = mapinfo->write_archive_heap_regions( 1513 _closed_archive_heap_regions, 1514 MetaspaceShared::first_string, 1515 MetaspaceShared::max_strings); 1516 _total_open_archive_region_size = mapinfo->write_archive_heap_regions( 1517 _open_archive_heap_regions, 1518 MetaspaceShared::first_open_archive_heap_region, 1519 MetaspaceShared::max_open_archive_heap_region); 1520 } 1521 1522 mapinfo->close(); 1523 1524 // Restore the vtable in case we invoke any virtual methods. 1525 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); 1526 1527 print_region_stats(); 1528 1529 if (log_is_enabled(Info, cds)) { 1530 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), 1531 int(_mc_region.used()), int(_md_region.used())); 1532 } 1533 1534 if (PrintSystemDictionaryAtExit) { 1535 SystemDictionary::print(); 1536 } 1537 // There may be other pending VM operations that operate on the InstanceKlasses, 1538 // which will fail because InstanceKlasses::remove_unshareable_info() 1539 // has been called. Forget these operations and exit the VM directly. 1540 vm_direct_exit(0); 1541 } 1542 1543 void VM_PopulateDumpSharedSpace::print_region_stats() { 1544 // Print statistics of all the regions 1545 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + 1546 _mc_region.reserved() + _md_region.reserved() + 1547 _od_region.reserved() + 1548 _total_string_region_size + 1549 _total_open_archive_region_size; 1550 const size_t total_bytes = _ro_region.used() + _rw_region.used() + 1551 _mc_region.used() + _md_region.used() + 1552 _od_region.used() + 1553 _total_string_region_size + 1554 _total_open_archive_region_size; 1555 const double total_u_perc = percent_of(total_bytes, total_reserved); 1556 1557 _mc_region.print(total_reserved); 1558 _rw_region.print(total_reserved); 1559 _ro_region.print(total_reserved); 1560 _md_region.print(total_reserved); 1561 _od_region.print(total_reserved); 1562 print_heap_region_stats(_closed_archive_heap_regions, "st", total_reserved); 1563 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); 1564 1565 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", 1566 total_bytes, total_reserved, total_u_perc); 1567 } 1568 1569 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, 1570 const char *name, const size_t total_size) { 1571 int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); 1572 for (int i = 0; i < arr_len; i++) { 1573 char* start = (char*)heap_mem->at(i).start(); 1574 size_t size = heap_mem->at(i).byte_size(); 1575 char* top = start + size; 1576 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, 1577 name, i, size, size/double(total_size)*100.0, size, p2i(start)); 1578 1579 } 1580 } 1581 1582 // Update a Java object to point its Klass* to the new location after 1583 // shared archive has been compacted. 1584 void MetaspaceShared::relocate_klass_ptr(oop o) { 1585 assert(DumpSharedSpaces, "sanity"); 1586 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); 1587 o->set_klass(k); 1588 } 1589 1590 Klass* MetaspaceShared::get_relocated_klass(Klass *k) { 1591 assert(DumpSharedSpaces, "sanity"); 1592 return ArchiveCompactor::get_relocated_klass(k); 1593 } 1594 1595 class LinkSharedClassesClosure : public KlassClosure { 1596 Thread* THREAD; 1597 bool _made_progress; 1598 public: 1599 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} 1600 1601 void reset() { _made_progress = false; } 1602 bool made_progress() const { return _made_progress; } 1603 1604 void do_klass(Klass* k) { 1605 if (k->is_instance_klass()) { 1606 InstanceKlass* ik = InstanceKlass::cast(k); 1607 // Link the class to cause the bytecodes to be rewritten and the 1608 // cpcache to be created. Class verification is done according 1609 // to -Xverify setting. 1610 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); 1611 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1612 1613 ik->constants()->resolve_class_constants(THREAD); 1614 } 1615 } 1616 }; 1617 1618 class CheckSharedClassesClosure : public KlassClosure { 1619 bool _made_progress; 1620 public: 1621 CheckSharedClassesClosure() : _made_progress(false) {} 1622 1623 void reset() { _made_progress = false; } 1624 bool made_progress() const { return _made_progress; } 1625 void do_klass(Klass* k) { 1626 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { 1627 _made_progress = true; 1628 } 1629 } 1630 }; 1631 1632 void MetaspaceShared::check_shared_class_loader_type(InstanceKlass* ik) { 1633 ResourceMark rm; 1634 if (ik->shared_classpath_index() == UNREGISTERED_INDEX) { 1635 guarantee(ik->loader_type() == 0, 1636 "Class loader type must not be set for this class %s", ik->name()->as_C_string()); 1637 } else { 1638 guarantee(ik->loader_type() != 0, 1639 "Class loader type must be set for this class %s", ik->name()->as_C_string()); 1640 } 1641 } 1642 1643 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { 1644 // We need to iterate because verification may cause additional classes 1645 // to be loaded. 1646 LinkSharedClassesClosure link_closure(THREAD); 1647 do { 1648 link_closure.reset(); 1649 ClassLoaderDataGraph::loaded_classes_do(&link_closure); 1650 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1651 } while (link_closure.made_progress()); 1652 1653 if (_has_error_classes) { 1654 // Mark all classes whose super class or interfaces failed verification. 1655 CheckSharedClassesClosure check_closure; 1656 do { 1657 // Not completely sure if we need to do this iteratively. Anyway, 1658 // we should come here only if there are unverifiable classes, which 1659 // shouldn't happen in normal cases. So better safe than sorry. 1660 check_closure.reset(); 1661 ClassLoaderDataGraph::loaded_classes_do(&check_closure); 1662 } while (check_closure.made_progress()); 1663 1664 if (IgnoreUnverifiableClassesDuringDump) { 1665 // This is useful when running JCK or SQE tests. You should not 1666 // enable this when running real apps. 1667 SystemDictionary::remove_classes_in_error_state(); 1668 } else { 1669 tty->print_cr("Please remove the unverifiable classes from your class list and try again"); 1670 exit(1); 1671 } 1672 } 1673 } 1674 1675 void MetaspaceShared::prepare_for_dumping() { 1676 Arguments::check_unsupported_dumping_properties(); 1677 ClassLoader::initialize_shared_path(); 1678 } 1679 1680 // Preload classes from a list, populate the shared spaces and dump to a 1681 // file. 1682 void MetaspaceShared::preload_and_dump(TRAPS) { 1683 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); 1684 ResourceMark rm; 1685 char class_list_path_str[JVM_MAXPATHLEN]; 1686 // Preload classes to be shared. 1687 // Should use some os:: method rather than fopen() here. aB. 1688 const char* class_list_path; 1689 if (SharedClassListFile == NULL) { 1690 // Construct the path to the class list (in jre/lib) 1691 // Walk up two directories from the location of the VM and 1692 // optionally tack on "lib" (depending on platform) 1693 os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); 1694 for (int i = 0; i < 3; i++) { 1695 char *end = strrchr(class_list_path_str, *os::file_separator()); 1696 if (end != NULL) *end = '\0'; 1697 } 1698 int class_list_path_len = (int)strlen(class_list_path_str); 1699 if (class_list_path_len >= 3) { 1700 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { 1701 if (class_list_path_len < JVM_MAXPATHLEN - 4) { 1702 jio_snprintf(class_list_path_str + class_list_path_len, 1703 sizeof(class_list_path_str) - class_list_path_len, 1704 "%slib", os::file_separator()); 1705 class_list_path_len += 4; 1706 } 1707 } 1708 } 1709 if (class_list_path_len < JVM_MAXPATHLEN - 10) { 1710 jio_snprintf(class_list_path_str + class_list_path_len, 1711 sizeof(class_list_path_str) - class_list_path_len, 1712 "%sclasslist", os::file_separator()); 1713 } 1714 class_list_path = class_list_path_str; 1715 } else { 1716 class_list_path = SharedClassListFile; 1717 } 1718 1719 tty->print_cr("Loading classes to share ..."); 1720 _has_error_classes = false; 1721 int class_count = preload_classes(class_list_path, THREAD); 1722 if (ExtraSharedClassListFile) { 1723 class_count += preload_classes(ExtraSharedClassListFile, THREAD); 1724 } 1725 tty->print_cr("Loading classes to share: done."); 1726 1727 log_info(cds)("Shared spaces: preloaded %d classes", class_count); 1728 1729 // Rewrite and link classes 1730 tty->print_cr("Rewriting and linking classes ..."); 1731 1732 // Link any classes which got missed. This would happen if we have loaded classes that 1733 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K 1734 // fails verification, all other interfaces that were not specified in the classlist but 1735 // are implemented by K are not verified. 1736 link_and_cleanup_shared_classes(CATCH); 1737 tty->print_cr("Rewriting and linking classes: done"); 1738 1739 SystemDictionary::clear_invoke_method_table(); 1740 1741 VM_PopulateDumpSharedSpace op; 1742 VMThread::execute(&op); 1743 } 1744 } 1745 1746 1747 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { 1748 ClassListParser parser(class_list_path); 1749 int class_count = 0; 1750 1751 while (parser.parse_one_line()) { 1752 Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD); 1753 if (HAS_PENDING_EXCEPTION) { 1754 if (klass == NULL && 1755 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { 1756 // print a warning only when the pending exception is class not found 1757 tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name()); 1758 } 1759 CLEAR_PENDING_EXCEPTION; 1760 } 1761 if (klass != NULL) { 1762 if (log_is_enabled(Trace, cds)) { 1763 ResourceMark rm; 1764 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name()); 1765 } 1766 1767 if (klass->is_instance_klass()) { 1768 InstanceKlass* ik = InstanceKlass::cast(klass); 1769 1770 // Link the class to cause the bytecodes to be rewritten and the 1771 // cpcache to be created. The linking is done as soon as classes 1772 // are loaded in order that the related data structures (klass and 1773 // cpCache) are located together. 1774 try_link_class(ik, THREAD); 1775 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); 1776 } 1777 1778 class_count++; 1779 } 1780 } 1781 1782 return class_count; 1783 } 1784 1785 // Returns true if the class's status has changed 1786 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { 1787 assert(DumpSharedSpaces, "should only be called during dumping"); 1788 if (ik->init_state() < InstanceKlass::linked) { 1789 bool saved = BytecodeVerificationLocal; 1790 if (ik->loader_type() == 0 && ik->class_loader() == NULL) { 1791 // The verification decision is based on BytecodeVerificationRemote 1792 // for non-system classes. Since we are using the NULL classloader 1793 // to load non-system classes for customized class loaders during dumping, 1794 // we need to temporarily change BytecodeVerificationLocal to be the same as 1795 // BytecodeVerificationRemote. Note this can cause the parent system 1796 // classes also being verified. The extra overhead is acceptable during 1797 // dumping. 1798 BytecodeVerificationLocal = BytecodeVerificationRemote; 1799 } 1800 ik->link_class(THREAD); 1801 if (HAS_PENDING_EXCEPTION) { 1802 ResourceMark rm; 1803 tty->print_cr("Preload Warning: Verification failed for %s", 1804 ik->external_name()); 1805 CLEAR_PENDING_EXCEPTION; 1806 ik->set_in_error_state(); 1807 _has_error_classes = true; 1808 } 1809 BytecodeVerificationLocal = saved; 1810 return true; 1811 } else { 1812 return false; 1813 } 1814 } 1815 1816 #if INCLUDE_CDS_JAVA_HEAP 1817 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { 1818 if (!MetaspaceShared::is_heap_object_archiving_allowed()) { 1819 if (log_is_enabled(Info, cds)) { 1820 log_info(cds)( 1821 "Archived java heap is not supported as UseG1GC, " 1822 "UseCompressedOops and UseCompressedClassPointers are required." 1823 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.", 1824 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops), 1825 BOOL_TO_STR(UseCompressedClassPointers)); 1826 } 1827 return; 1828 } 1829 1830 { 1831 NoSafepointVerifier nsv; 1832 1833 // Cache for recording where the archived objects are copied to 1834 MetaspaceShared::create_archive_object_cache(); 1835 1836 tty->print_cr("Dumping objects to closed archive heap region ..."); 1837 NOT_PRODUCT(StringTable::verify()); 1838 // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details. 1839 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); 1840 MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions); 1841 1842 tty->print_cr("Dumping objects to open archive heap region ..."); 1843 _open_archive_heap_regions = new GrowableArray<MemRegion>(2); 1844 MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions); 1845 1846 MetaspaceShared::destroy_archive_object_cache(); 1847 } 1848 1849 G1HeapVerifier::verify_archive_regions(); 1850 } 1851 1852 void MetaspaceShared::dump_closed_archive_heap_objects( 1853 GrowableArray<MemRegion> * closed_archive) { 1854 assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects"); 1855 1856 Thread* THREAD = Thread::current(); 1857 G1CollectedHeap::heap()->begin_archive_alloc_range(); 1858 1859 // Archive interned string objects 1860 StringTable::write_to_archive(); 1861 1862 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 1863 os::vm_allocation_granularity()); 1864 } 1865 1866 void MetaspaceShared::dump_open_archive_heap_objects( 1867 GrowableArray<MemRegion> * open_archive) { 1868 assert(UseG1GC, "Only support G1 GC"); 1869 assert(UseCompressedOops && UseCompressedClassPointers, 1870 "Only support UseCompressedOops and UseCompressedClassPointers enabled"); 1871 1872 Thread* THREAD = Thread::current(); 1873 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 1874 1875 java_lang_Class::archive_basic_type_mirrors(THREAD); 1876 1877 MetaspaceShared::archive_klass_objects(THREAD); 1878 1879 HeapShared::archive_module_graph_objects(THREAD); 1880 1881 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 1882 os::vm_allocation_granularity()); 1883 } 1884 1885 unsigned MetaspaceShared::obj_hash(oop const& p) { 1886 assert(!p->mark()->has_bias_pattern(), 1887 "this object should never have been locked"); // so identity_hash won't safepoin 1888 unsigned hash = (unsigned)p->identity_hash(); 1889 return hash; 1890 } 1891 1892 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL; 1893 oop MetaspaceShared::find_archived_heap_object(oop obj) { 1894 assert(DumpSharedSpaces, "dump-time only"); 1895 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache(); 1896 oop* p = cache->get(obj); 1897 if (p != NULL) { 1898 return *p; 1899 } else { 1900 return NULL; 1901 } 1902 } 1903 1904 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) { 1905 assert(DumpSharedSpaces, "dump-time only"); 1906 1907 oop ao = find_archived_heap_object(obj); 1908 if (ao != NULL) { 1909 // already archived 1910 return ao; 1911 } 1912 1913 int len = obj->size(); 1914 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 1915 return NULL; 1916 } 1917 1918 int hash = obj->identity_hash(); 1919 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); 1920 if (archived_oop != NULL) { 1921 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len); 1922 relocate_klass_ptr(archived_oop); 1923 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache(); 1924 cache->put(obj, archived_oop); 1925 } 1926 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, 1927 p2i(obj), p2i(archived_oop)); 1928 return archived_oop; 1929 } 1930 1931 oop MetaspaceShared::materialize_archived_object(oop obj) { 1932 if (obj != NULL) { 1933 return G1CollectedHeap::heap()->materialize_archived_object(obj); 1934 } 1935 return NULL; 1936 } 1937 1938 void MetaspaceShared::archive_klass_objects(Thread* THREAD) { 1939 int i; 1940 for (i = 0; i < _global_klass_objects->length(); i++) { 1941 Klass* k = _global_klass_objects->at(i); 1942 1943 // archive mirror object 1944 java_lang_Class::archive_mirror(k, CHECK); 1945 1946 // archive the resolved_referenes array 1947 if (k->is_instance_klass()) { 1948 InstanceKlass* ik = InstanceKlass::cast(k); 1949 ik->constants()->archive_resolved_references(THREAD); 1950 } 1951 } 1952 } 1953 1954 bool MetaspaceShared::is_archive_object(oop p) { 1955 return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p); 1956 } 1957 1958 void MetaspaceShared::fixup_mapped_heap_regions() { 1959 FileMapInfo *mapinfo = FileMapInfo::current_info(); 1960 mapinfo->fixup_mapped_heap_regions(); 1961 } 1962 #endif // INCLUDE_CDS_JAVA_HEAP 1963 1964 // Closure for serializing initialization data in from a data area 1965 // (ptr_array) read from the shared file. 1966 1967 class ReadClosure : public SerializeClosure { 1968 private: 1969 intptr_t** _ptr_array; 1970 1971 inline intptr_t nextPtr() { 1972 return *(*_ptr_array)++; 1973 } 1974 1975 public: 1976 ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; } 1977 1978 void do_ptr(void** p) { 1979 assert(*p == NULL, "initializing previous initialized pointer."); 1980 intptr_t obj = nextPtr(); 1981 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, 1982 "hit tag while initializing ptrs."); 1983 *p = (void*)obj; 1984 } 1985 1986 void do_u4(u4* p) { 1987 intptr_t obj = nextPtr(); 1988 *p = (u4)(uintx(obj)); 1989 } 1990 1991 void do_tag(int tag) { 1992 int old_tag; 1993 old_tag = (int)(intptr_t)nextPtr(); 1994 // do_int(&old_tag); 1995 assert(tag == old_tag, "old tag doesn't match"); 1996 FileMapInfo::assert_mark(tag == old_tag); 1997 } 1998 1999 void do_oop(oop *p) { 2000 narrowOop o = (narrowOop)nextPtr(); 2001 if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) { 2002 p = NULL; 2003 } else { 2004 assert(MetaspaceShared::is_heap_object_archiving_allowed(), 2005 "Archived heap object is not allowed"); 2006 assert(MetaspaceShared::open_archive_heap_region_mapped(), 2007 "Open archive heap region is not mapped"); 2008 *p = CompressedOops::decode_not_null(o); 2009 } 2010 } 2011 2012 void do_region(u_char* start, size_t size) { 2013 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment"); 2014 assert(size % sizeof(intptr_t) == 0, "bad size"); 2015 do_tag((int)size); 2016 while (size > 0) { 2017 *(intptr_t*)start = nextPtr(); 2018 start += sizeof(intptr_t); 2019 size -= sizeof(intptr_t); 2020 } 2021 } 2022 2023 bool reading() const { return true; } 2024 }; 2025 2026 // Return true if given address is in the misc data region 2027 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { 2028 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); 2029 } 2030 2031 bool MetaspaceShared::is_in_trampoline_frame(address addr) { 2032 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { 2033 return true; 2034 } 2035 return false; 2036 } 2037 2038 void MetaspaceShared::print_shared_spaces() { 2039 if (UseSharedSpaces) { 2040 FileMapInfo::current_info()->print_shared_spaces(); 2041 } 2042 } 2043 2044 2045 // Map shared spaces at requested addresses and return if succeeded. 2046 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { 2047 size_t image_alignment = mapinfo->alignment(); 2048 2049 #ifndef _WINDOWS 2050 // Map in the shared memory and then map the regions on top of it. 2051 // On Windows, don't map the memory here because it will cause the 2052 // mappings of the regions to fail. 2053 ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); 2054 if (!shared_rs.is_reserved()) return false; 2055 #endif 2056 2057 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 2058 2059 char* ro_base = NULL; char* ro_top; 2060 char* rw_base = NULL; char* rw_top; 2061 char* mc_base = NULL; char* mc_top; 2062 char* md_base = NULL; char* md_top; 2063 char* od_base = NULL; char* od_top; 2064 2065 // Map each shared region 2066 if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL && 2067 (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL && 2068 (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL && 2069 (md_base = mapinfo->map_region(md, &md_top)) != NULL && 2070 (od_base = mapinfo->map_region(od, &od_top)) != NULL && 2071 (image_alignment == (size_t)os::vm_allocation_granularity()) && 2072 mapinfo->validate_shared_path_table()) { 2073 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for 2074 // fast checking in MetaspaceShared::is_in_shared_metaspace() and 2075 // MetaspaceObj::is_shared(). 2076 // 2077 // We require that mc->rw->ro->md->od to be laid out consecutively, with no 2078 // gaps between them. That way, we can ensure that the OS won't be able to 2079 // allocate any new memory spaces inside _shared_metaspace_{base,top}, which 2080 // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace(). 2081 assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be"); 2082 assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be"); 2083 assert(mc_top == rw_base, "must be"); 2084 assert(rw_top == ro_base, "must be"); 2085 assert(ro_top == md_base, "must be"); 2086 assert(md_top == od_base, "must be"); 2087 2088 MetaspaceObj::_shared_metaspace_base = (void*)mc_base; 2089 MetaspaceObj::_shared_metaspace_top = (void*)od_top; 2090 return true; 2091 } else { 2092 // If there was a failure in mapping any of the spaces, unmap the ones 2093 // that succeeded 2094 if (ro_base != NULL) mapinfo->unmap_region(ro); 2095 if (rw_base != NULL) mapinfo->unmap_region(rw); 2096 if (mc_base != NULL) mapinfo->unmap_region(mc); 2097 if (md_base != NULL) mapinfo->unmap_region(md); 2098 if (od_base != NULL) mapinfo->unmap_region(od); 2099 #ifndef _WINDOWS 2100 // Release the entire mapped region 2101 shared_rs.release(); 2102 #endif 2103 // If -Xshare:on is specified, print out the error message and exit VM, 2104 // otherwise, set UseSharedSpaces to false and continue. 2105 if (RequireSharedSpaces || PrintSharedArchiveAndExit) { 2106 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); 2107 } else { 2108 FLAG_SET_DEFAULT(UseSharedSpaces, false); 2109 } 2110 return false; 2111 } 2112 } 2113 2114 // Read the miscellaneous data from the shared file, and 2115 // serialize it out to its various destinations. 2116 2117 void MetaspaceShared::initialize_shared_spaces() { 2118 FileMapInfo *mapinfo = FileMapInfo::current_info(); 2119 _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers(); 2120 _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size(); 2121 _core_spaces_size = mapinfo->core_spaces_size(); 2122 char* buffer = mapinfo->misc_data_patching_start(); 2123 clone_cpp_vtables((intptr_t*)buffer); 2124 2125 // The rest of the data is now stored in the RW region 2126 buffer = mapinfo->read_only_tables_start(); 2127 int sharedDictionaryLen = *(intptr_t*)buffer; 2128 buffer += sizeof(intptr_t); 2129 int number_of_entries = *(intptr_t*)buffer; 2130 buffer += sizeof(intptr_t); 2131 SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer, 2132 sharedDictionaryLen, 2133 number_of_entries); 2134 buffer += sharedDictionaryLen; 2135 2136 // The following data are the linked list elements 2137 // (HashtableEntry objects) for the shared dictionary table. 2138 2139 int len = *(intptr_t*)buffer; // skip over shared dictionary entries 2140 buffer += sizeof(intptr_t); 2141 buffer += len; 2142 2143 // The table of archived java heap object sub-graph infos 2144 buffer = HeapShared::read_archived_subgraph_infos(buffer); 2145 2146 // Verify various attributes of the archive, plus initialize the 2147 // shared string/symbol tables 2148 intptr_t* array = (intptr_t*)buffer; 2149 ReadClosure rc(&array); 2150 serialize(&rc); 2151 2152 // Initialize the run-time symbol table. 2153 SymbolTable::create_table(); 2154 2155 // Close the mapinfo file 2156 mapinfo->close(); 2157 2158 if (PrintSharedArchiveAndExit) { 2159 if (PrintSharedDictionary) { 2160 tty->print_cr("\nShared classes:\n"); 2161 SystemDictionary::print_shared(tty); 2162 } 2163 if (_archive_loading_failed) { 2164 tty->print_cr("archive is invalid"); 2165 vm_exit(1); 2166 } else { 2167 tty->print_cr("archive is valid"); 2168 vm_exit(0); 2169 } 2170 } 2171 } 2172 2173 // JVM/TI RedefineClasses() support: 2174 bool MetaspaceShared::remap_shared_readonly_as_readwrite() { 2175 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2176 2177 if (UseSharedSpaces) { 2178 // remap the shared readonly space to shared readwrite, private 2179 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2180 if (!mapinfo->remap_shared_readonly_as_readwrite()) { 2181 return false; 2182 } 2183 _remapped_readwrite = true; 2184 } 2185 return true; 2186 } 2187 2188 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { 2189 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. 2190 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes 2191 // or so. 2192 _mc_region.print_out_of_space_msg(name, needed_bytes); 2193 _rw_region.print_out_of_space_msg(name, needed_bytes); 2194 _ro_region.print_out_of_space_msg(name, needed_bytes); 2195 _md_region.print_out_of_space_msg(name, needed_bytes); 2196 _od_region.print_out_of_space_msg(name, needed_bytes); 2197 2198 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), 2199 "Please reduce the number of shared classes."); 2200 }