1 /* 2 * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/classLoaderData.inline.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "classfile/systemDictionaryShared.hpp" 31 #include "logging/log.hpp" 32 #include "memory/archiveUtils.inline.hpp" 33 #include "memory/dynamicArchive.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/metaspace.hpp" 36 #include "memory/metaspaceClosure.hpp" 37 #include "memory/metaspaceShared.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/compressedOops.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "prims/jvmtiRedefineClasses.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/os.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/vmThread.hpp" 46 #include "runtime/vmOperations.hpp" 47 #include "utilities/bitMap.inline.hpp" 48 49 #ifndef O_BINARY // if defined (Win32) use binary files. 50 #define O_BINARY 0 // otherwise do nothing. 51 #endif 52 53 class DynamicArchiveBuilder : ResourceObj { 54 static unsigned my_hash(const address& a) { 55 return primitive_hash<address>(a); 56 } 57 static bool my_equals(const address& a0, const address& a1) { 58 return primitive_equals<address>(a0, a1); 59 } 60 typedef ResourceHashtable< 61 address, address, 62 DynamicArchiveBuilder::my_hash, // solaris compiler doesn't like: primitive_hash<address> 63 DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address> 64 16384, ResourceObj::C_HEAP> RelocationTable; 65 RelocationTable _new_loc_table; 66 67 static intx _buffer_to_target_delta; 68 69 DumpRegion* _current_dump_space; 70 71 static size_t reserve_alignment() { 72 return Metaspace::reserve_alignment(); 73 } 74 75 static const int _total_dump_regions = 3; 76 int _num_dump_regions_used; 77 78 public: 79 void mark_pointer(address* ptr_loc) { 80 ArchivePtrMarker::mark_pointer(ptr_loc); 81 } 82 83 DumpRegion* current_dump_space() const { 84 return _current_dump_space; 85 } 86 87 bool is_in_buffer_space(address p) const { 88 return (_alloc_bottom <= p && p < (address)current_dump_space()->top()); 89 } 90 91 template <typename T> bool is_in_target_space(T target_obj) const { 92 address buff_obj = address(target_obj) - _buffer_to_target_delta; 93 return is_in_buffer_space(buff_obj); 94 } 95 96 template <typename T> bool is_in_buffer_space(T obj) const { 97 return is_in_buffer_space(address(obj)); 98 } 99 100 template <typename T> T to_target_no_check(T obj) const { 101 return (T)(address(obj) + _buffer_to_target_delta); 102 } 103 104 template <typename T> T to_target(T obj) const { 105 assert(is_in_buffer_space(obj), "must be"); 106 return (T)(address(obj) + _buffer_to_target_delta); 107 } 108 109 template <typename T> T get_new_loc(T obj) { 110 address* pp = _new_loc_table.get((address)obj); 111 if (pp == NULL) { 112 // Excluded klasses are not copied 113 return NULL; 114 } else { 115 return (T)*pp; 116 } 117 } 118 119 address get_new_loc(MetaspaceClosure::Ref* ref) { 120 return get_new_loc(ref->obj()); 121 } 122 123 template <typename T> bool has_new_loc(T obj) { 124 address* pp = _new_loc_table.get((address)obj); 125 return pp != NULL; 126 } 127 128 static int dynamic_dump_method_comparator(Method* a, Method* b) { 129 Symbol* a_name = a->name(); 130 Symbol* b_name = b->name(); 131 132 if (a_name == b_name) { 133 return 0; 134 } 135 136 if (!MetaspaceShared::is_in_shared_metaspace(a_name)) { 137 // a_name points to a Symbol in the top archive. 138 // When this method is called, a_name is still pointing to the output space. 139 // Translate it to point to the output space, so that it can be compared with 140 // Symbols in the base archive. 141 a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta); 142 } 143 if (!MetaspaceShared::is_in_shared_metaspace(b_name)) { 144 b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta); 145 } 146 147 return a_name->fast_compare(b_name); 148 } 149 150 protected: 151 enum FollowMode { 152 make_a_copy, point_to_it, set_to_null 153 }; 154 155 public: 156 void copy(MetaspaceClosure::Ref* ref, bool read_only) { 157 int bytes = ref->size() * BytesPerWord; 158 address old_obj = ref->obj(); 159 address new_obj = copy_impl(ref, read_only, bytes); 160 161 assert(new_obj != NULL, "must be"); 162 assert(new_obj != old_obj, "must be"); 163 bool isnew = _new_loc_table.put(old_obj, new_obj); 164 assert(isnew, "must be"); 165 } 166 167 // Make a shallow copy of each eligible MetaspaceObj into the buffer. 168 class ShallowCopier: public UniqueMetaspaceClosure { 169 DynamicArchiveBuilder* _builder; 170 bool _read_only; 171 public: 172 ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only) 173 : _builder(shuffler), _read_only(read_only) {} 174 175 virtual bool do_unique_ref(Ref* orig_obj, bool read_only) { 176 // This method gets called on each *original* object 177 // reachable from _builder->iterate_roots(). Each orig_obj is 178 // called exactly once. 179 FollowMode mode = _builder->follow_ref(orig_obj); 180 181 if (mode == point_to_it) { 182 if (read_only == _read_only) { 183 log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s", p2i(orig_obj->obj()), 184 MetaspaceObj::type_name(orig_obj->msotype())); 185 address p = orig_obj->obj(); 186 bool isnew = _builder->_new_loc_table.put(p, p); 187 assert(isnew, "must be"); 188 } 189 return false; 190 } 191 192 if (mode == set_to_null) { 193 log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s", p2i(orig_obj->obj()), 194 MetaspaceObj::type_name(orig_obj->msotype())); 195 return false; 196 } 197 198 if (read_only == _read_only) { 199 // Make a shallow copy of orig_obj in a buffer (maintained 200 // by copy_impl in a subclass of DynamicArchiveBuilder). 201 _builder->copy(orig_obj, read_only); 202 } 203 return true; 204 } 205 }; 206 207 // Relocate all embedded pointer fields within a MetaspaceObj's shallow copy 208 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { 209 DynamicArchiveBuilder* _builder; 210 public: 211 ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler) 212 : _builder(shuffler) {} 213 214 // This method gets called on each *original* object reachable 215 // from _builder->iterate_roots(). Each orig_obj is 216 // called exactly once. 217 virtual bool do_unique_ref(Ref* orig_ref, bool read_only) { 218 FollowMode mode = _builder->follow_ref(orig_ref); 219 220 if (mode == point_to_it) { 221 // We did not make a copy of this object 222 // and we have nothing to update 223 assert(_builder->get_new_loc(orig_ref) == NULL || 224 _builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be"); 225 return false; 226 } 227 228 if (mode == set_to_null) { 229 // We did not make a copy of this object 230 // and we have nothing to update 231 assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to"); 232 return false; 233 } 234 235 // - orig_obj points to the original object. 236 // - new_obj points to the shallow copy (created by ShallowCopier) 237 // of orig_obj. new_obj is NULL if the orig_obj is excluded 238 address orig_obj = orig_ref->obj(); 239 address new_obj = _builder->get_new_loc(orig_ref); 240 241 assert(new_obj != orig_obj, "must be"); 242 #ifdef ASSERT 243 if (new_obj == NULL) { 244 if (orig_ref->msotype() == MetaspaceObj::ClassType) { 245 Klass* k = (Klass*)orig_obj; 246 assert(k->is_instance_klass() && 247 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)), 248 "orig_obj must be excluded Class"); 249 } 250 } 251 #endif 252 253 log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s", p2i(new_obj), 254 MetaspaceObj::type_name(orig_ref->msotype())); 255 if (new_obj != NULL) { 256 EmbeddedRefUpdater updater(_builder, orig_obj, new_obj); 257 orig_ref->metaspace_pointers_do(&updater); 258 } 259 260 return true; // keep recursing until every object is visited exactly once. 261 } 262 263 virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { 264 assert(type == _method_entry_ref, "only special type allowed for now"); 265 address obj = ref->obj(); 266 address new_obj = _builder->get_new_loc(ref); 267 size_t offset = pointer_delta(p, obj, sizeof(u1)); 268 intptr_t* new_p = (intptr_t*)(new_obj + offset); 269 assert(*p == *new_p, "must be a copy"); 270 ArchivePtrMarker::mark_pointer((address*)new_p); 271 } 272 }; 273 274 class EmbeddedRefUpdater: public MetaspaceClosure { 275 DynamicArchiveBuilder* _builder; 276 address _orig_obj; 277 address _new_obj; 278 public: 279 EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) : 280 _builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {} 281 282 // This method gets called once for each pointer field F of orig_obj. 283 // We update new_obj->F to point to the new location of orig_obj->F. 284 // 285 // Example: Klass* 0x100 is copied to 0x400 286 // Symbol* 0x200 is copied to 0x500 287 // 288 // Let orig_obj == 0x100; and 289 // new_obj == 0x400; and 290 // ((Klass*)orig_obj)->_name == 0x200; 291 // Then this function effectively assigns 292 // ((Klass*)new_obj)->_name = 0x500; 293 virtual bool do_ref(Ref* ref, bool read_only) { 294 address new_pointee = NULL; 295 296 if (ref->not_null()) { 297 address old_pointee = ref->obj(); 298 299 FollowMode mode = _builder->follow_ref(ref); 300 if (mode == point_to_it) { 301 new_pointee = old_pointee; 302 } else if (mode == set_to_null) { 303 new_pointee = NULL; 304 } else { 305 new_pointee = _builder->get_new_loc(old_pointee); 306 } 307 } 308 309 const char* kind = MetaspaceObj::type_name(ref->msotype()); 310 // offset of this field inside the original object 311 intx offset = (address)ref->addr() - _orig_obj; 312 _builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset); 313 314 // We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods 315 // may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass 316 // that would contain pointers. Therefore, we must mark the pointers after 317 // sort_methods(), using PointerMarker. 318 return false; // Do not recurse. 319 } 320 }; 321 322 class ExternalRefUpdater: public MetaspaceClosure { 323 DynamicArchiveBuilder* _builder; 324 325 public: 326 ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} 327 328 virtual bool do_ref(Ref* ref, bool read_only) { 329 // ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer 330 if (ref->not_null()) { 331 address new_loc = _builder->get_new_loc(ref); 332 const char* kind = MetaspaceObj::type_name(ref->msotype()); 333 _builder->update_pointer(ref->addr(), new_loc, kind, 0); 334 _builder->mark_pointer(ref->addr()); 335 } 336 return false; // Do not recurse. 337 } 338 }; 339 340 class PointerMarker: public UniqueMetaspaceClosure { 341 DynamicArchiveBuilder* _builder; 342 343 public: 344 PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} 345 346 virtual bool do_unique_ref(Ref* ref, bool read_only) { 347 if (_builder->is_in_buffer_space(ref->obj())) { 348 EmbeddedRefMarker ref_marker(_builder); 349 ref->metaspace_pointers_do(&ref_marker); 350 return true; // keep recursing until every buffered object is visited exactly once. 351 } else { 352 return false; 353 } 354 } 355 }; 356 357 class EmbeddedRefMarker: public MetaspaceClosure { 358 DynamicArchiveBuilder* _builder; 359 360 public: 361 EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} 362 virtual bool do_ref(Ref* ref, bool read_only) { 363 if (ref->not_null()) { 364 _builder->mark_pointer(ref->addr()); 365 } 366 return false; // Do not recurse. 367 } 368 }; 369 370 void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) { 371 // Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj() 372 if (is_mso_pointer) { 373 const uintx FLAG_MASK = 0x03; 374 uintx mask_bits = uintx(*addr) & FLAG_MASK; 375 value = (address)(uintx(value) | mask_bits); 376 } 377 378 if (*addr != value) { 379 log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT, 380 kind, int(offset), p2i(addr), p2i(*addr), p2i(value)); 381 *addr = value; 382 } 383 } 384 385 private: 386 GrowableArray<Symbol*>* _symbols; // symbols to dump 387 GrowableArray<InstanceKlass*>* _klasses; // klasses to dump 388 389 void append(InstanceKlass* k) { _klasses->append(k); } 390 void append(Symbol* s) { _symbols->append(s); } 391 392 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure { 393 DynamicArchiveBuilder* _builder; 394 bool _read_only; 395 396 public: 397 GatherKlassesAndSymbols(DynamicArchiveBuilder* builder) 398 : _builder(builder) {} 399 400 virtual bool do_unique_ref(Ref* ref, bool read_only) { 401 if (_builder->follow_ref(ref) != make_a_copy) { 402 return false; 403 } 404 if (ref->msotype() == MetaspaceObj::ClassType) { 405 Klass* klass = (Klass*)ref->obj(); 406 assert(klass->is_klass(), "must be"); 407 if (klass->is_instance_klass()) { 408 InstanceKlass* ik = InstanceKlass::cast(klass); 409 assert(!SystemDictionaryShared::is_excluded_class(ik), "must be"); 410 _builder->append(ik); 411 _builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for() 412 } 413 } else if (ref->msotype() == MetaspaceObj::SymbolType) { 414 _builder->append((Symbol*)ref->obj()); 415 } 416 417 int bytes = ref->size() * BytesPerWord; 418 _builder->_estimated_metsapceobj_bytes += bytes; 419 420 return true; 421 } 422 }; 423 424 FollowMode follow_ref(MetaspaceClosure::Ref *ref) { 425 address obj = ref->obj(); 426 if (MetaspaceShared::is_in_shared_metaspace(obj)) { 427 // Don't dump existing shared metadata again. 428 return point_to_it; 429 } else if (ref->msotype() == MetaspaceObj::MethodDataType) { 430 return set_to_null; 431 } else { 432 if (ref->msotype() == MetaspaceObj::ClassType) { 433 Klass* klass = (Klass*)ref->obj(); 434 assert(klass->is_klass(), "must be"); 435 if (klass->is_instance_klass()) { 436 InstanceKlass* ik = InstanceKlass::cast(klass); 437 if (SystemDictionaryShared::is_excluded_class(ik)) { 438 ResourceMark rm; 439 log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name()); 440 return set_to_null; 441 } 442 } else if (klass->is_array_klass()) { 443 // Don't support archiving of array klasses for now. 444 ResourceMark rm; 445 log_debug(cds, dynamic)("Skipping class (array): %s", klass->external_name()); 446 return set_to_null; 447 } 448 } 449 450 return make_a_copy; 451 } 452 } 453 454 address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) { 455 if (ref->msotype() == MetaspaceObj::ClassType) { 456 // Save a pointer immediate in front of an InstanceKlass, so 457 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* 458 // without building another hashtable. See RunTimeSharedClassInfo::get_for() 459 // in systemDictionaryShared.cpp. 460 address obj = ref->obj(); 461 Klass* klass = (Klass*)obj; 462 if (klass->is_instance_klass()) { 463 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); 464 current_dump_space()->allocate(sizeof(address), BytesPerWord); 465 } 466 } 467 address p = (address)current_dump_space()->allocate(bytes); 468 address obj = ref->obj(); 469 log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s", 470 p2i(obj), p2i(p), bytes, 471 MetaspaceObj::type_name(ref->msotype())); 472 memcpy(p, obj, bytes); 473 intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p); 474 if (cloned_vtable != NULL) { 475 update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false); 476 mark_pointer((address*)p); 477 } 478 479 return (address)p; 480 } 481 482 DynamicArchiveHeader *_header; 483 address _alloc_bottom; 484 address _last_verified_top; 485 size_t _other_region_used_bytes; 486 487 // Conservative estimate for number of bytes needed for: 488 size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's. 489 size_t _estimated_hashtable_bytes; // symbol table and dictionaries 490 size_t _estimated_trampoline_bytes; // method entry trampolines 491 492 size_t estimate_archive_size(); 493 size_t estimate_trampoline_size(); 494 size_t estimate_class_file_size(); 495 address reserve_space_and_init_buffer_to_target_delta(); 496 void init_header(address addr); 497 void release_header(); 498 void make_trampolines(); 499 void make_klasses_shareable(); 500 void sort_methods(InstanceKlass* ik) const; 501 void set_symbols_permanent(); 502 void relocate_buffer_to_target(); 503 void write_archive(char* serialized_data); 504 505 void init_first_dump_space(address reserved_bottom) { 506 address first_space_base = reserved_bottom; 507 DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space(); 508 DumpRegion* rw_space = MetaspaceShared::read_write_dump_space(); 509 510 // Use the same MC->RW->RO ordering as in the base archive. 511 MetaspaceShared::init_shared_dump_space(mc_space, first_space_base); 512 _current_dump_space = mc_space; 513 _last_verified_top = first_space_base; 514 _num_dump_regions_used = 1; 515 } 516 517 void reserve_buffers_for_trampolines() { 518 size_t n = _estimated_trampoline_bytes; 519 assert(n >= SharedRuntime::trampoline_size(), "dont want to be empty"); 520 MetaspaceShared::misc_code_space_alloc(n); 521 } 522 523 public: 524 DynamicArchiveBuilder() { 525 _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal); 526 _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal); 527 528 _estimated_metsapceobj_bytes = 0; 529 _estimated_hashtable_bytes = 0; 530 _estimated_trampoline_bytes = 0; 531 532 _num_dump_regions_used = 0; 533 } 534 535 void start_dump_space(DumpRegion* next) { 536 address bottom = _last_verified_top; 537 address top = (address)(current_dump_space()->top()); 538 _other_region_used_bytes += size_t(top - bottom); 539 540 MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs()); 541 _current_dump_space = next; 542 _num_dump_regions_used ++; 543 544 _last_verified_top = (address)(current_dump_space()->top()); 545 } 546 547 void verify_estimate_size(size_t estimate, const char* which) { 548 address bottom = _last_verified_top; 549 address top = (address)(current_dump_space()->top()); 550 size_t used = size_t(top - bottom) + _other_region_used_bytes; 551 int diff = int(estimate) - int(used); 552 553 log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff); 554 assert(diff >= 0, "Estimate is too small"); 555 556 _last_verified_top = top; 557 _other_region_used_bytes = 0; 558 } 559 560 // Do this before and after the archive dump to see if any corruption 561 // is caused by dynamic dumping. 562 void verify_universe(const char* info) { 563 if (VerifyBeforeExit) { 564 log_info(cds)("Verify %s", info); 565 HandleMark hm; 566 // Among other things, this ensures that Eden top is correct. 567 Universe::heap()->prepare_for_verify(); 568 Universe::verify(info); 569 } 570 } 571 572 void doit() { 573 verify_universe("Before CDS dynamic dump"); 574 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); 575 SystemDictionaryShared::check_excluded_classes(); 576 577 { 578 ResourceMark rm; 579 GatherKlassesAndSymbols gatherer(this); 580 581 SystemDictionaryShared::dumptime_classes_do(&gatherer); 582 SymbolTable::metaspace_pointers_do(&gatherer); 583 FileMapInfo::metaspace_pointers_do(&gatherer); 584 585 gatherer.finish(); 586 } 587 588 // rw space starts ... 589 address reserved_bottom = reserve_space_and_init_buffer_to_target_delta(); 590 init_header(reserved_bottom); 591 592 CHeapBitMap ptrmap; 593 ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top()); 594 595 reserve_buffers_for_trampolines(); 596 verify_estimate_size(_estimated_trampoline_bytes, "Trampolines"); 597 598 start_dump_space(MetaspaceShared::read_write_dump_space()); 599 600 log_info(cds, dynamic)("Copying %d klasses and %d symbols", 601 _klasses->length(), _symbols->length()); 602 603 { 604 assert(current_dump_space() == MetaspaceShared::read_write_dump_space(), 605 "Current dump space is not rw space"); 606 // shallow-copy RW objects, if necessary 607 ResourceMark rm; 608 ShallowCopier rw_copier(this, false); 609 iterate_roots(&rw_copier); 610 } 611 612 // ro space starts ... 613 DumpRegion* ro_space = MetaspaceShared::read_only_dump_space(); 614 { 615 start_dump_space(ro_space); 616 617 // shallow-copy RO objects, if necessary 618 ResourceMark rm; 619 ShallowCopier ro_copier(this, true); 620 iterate_roots(&ro_copier); 621 } 622 623 { 624 log_info(cds)("Relocating embedded pointers ... "); 625 ResourceMark rm; 626 ShallowCopyEmbeddedRefRelocator emb_reloc(this); 627 iterate_roots(&emb_reloc); 628 } 629 630 { 631 log_info(cds)("Relocating external roots ... "); 632 ResourceMark rm; 633 ExternalRefUpdater ext_reloc(this); 634 iterate_roots(&ext_reloc); 635 } 636 637 verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs"); 638 639 char* serialized_data; 640 { 641 set_symbols_permanent(); 642 643 // Write the symbol table and system dictionaries to the RO space. 644 // Note that these tables still point to the *original* objects 645 // (because they were not processed by ExternalRefUpdater), so 646 // they would need to call DynamicArchive::original_to_target() to 647 // get the correct addresses. 648 assert(current_dump_space() == ro_space, "Must be RO space"); 649 SymbolTable::write_to_archive(false); 650 SystemDictionaryShared::write_to_archive(false); 651 652 serialized_data = ro_space->top(); 653 WriteClosure wc(ro_space); 654 SymbolTable::serialize_shared_table_header(&wc, false); 655 SystemDictionaryShared::serialize_dictionary_headers(&wc, false); 656 } 657 658 verify_estimate_size(_estimated_hashtable_bytes, "Hashtables"); 659 660 make_trampolines(); 661 make_klasses_shareable(); 662 663 { 664 log_info(cds)("Final relocation of pointers ... "); 665 ResourceMark rm; 666 PointerMarker marker(this); 667 iterate_roots(&marker); 668 relocate_buffer_to_target(); 669 } 670 671 write_archive(serialized_data); 672 release_header(); 673 674 assert(_num_dump_regions_used == _total_dump_regions, "must be"); 675 verify_universe("After CDS dynamic dump"); 676 } 677 678 void iterate_roots(MetaspaceClosure* it) { 679 int i; 680 int num_klasses = _klasses->length(); 681 for (i = 0; i < num_klasses; i++) { 682 it->push(&_klasses->at(i)); 683 } 684 685 int num_symbols = _symbols->length(); 686 for (i = 0; i < num_symbols; i++) { 687 it->push(&_symbols->at(i)); 688 } 689 690 FileMapInfo::metaspace_pointers_do(it); 691 692 // Do not call these again, as we have already collected all the classes and symbols 693 // that we want to archive. Also, these calls would corrupt the tables when 694 // ExternalRefUpdater is used. 695 // 696 // SystemDictionaryShared::dumptime_classes_do(it); 697 // SymbolTable::metaspace_pointers_do(it); 698 699 it->finish(); 700 } 701 }; 702 703 intx DynamicArchiveBuilder::_buffer_to_target_delta; 704 705 706 size_t DynamicArchiveBuilder::estimate_archive_size() { 707 // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's 708 _estimated_hashtable_bytes = 0; 709 _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive(); 710 _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive(); 711 712 _estimated_trampoline_bytes = estimate_trampoline_size(); 713 714 size_t total = 0; 715 716 total += _estimated_metsapceobj_bytes; 717 total += _estimated_hashtable_bytes; 718 total += _estimated_trampoline_bytes; 719 720 // allow fragmentation at the end of each dump region 721 total += _total_dump_regions * reserve_alignment(); 722 723 return align_up(total, reserve_alignment()); 724 } 725 726 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() { 727 size_t total = estimate_archive_size(); 728 ReservedSpace rs = MetaspaceShared::reserve_shared_space(total); 729 if (!rs.is_reserved()) { 730 log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total); 731 vm_direct_exit(0); 732 } 733 734 address buffer_base = (address)rs.base(); 735 log_info(cds, dynamic)("Reserved output buffer space at : " PTR_FORMAT " [%d bytes]", 736 p2i(buffer_base), (int)total); 737 MetaspaceShared::set_shared_rs(rs); 738 739 // At run time, we will mmap the dynamic archive at target_space_bottom. 740 // However, at dump time, we may not be able to write into the target_space, 741 // as it's occupied by dynamically loaded Klasses. So we allocate a buffer 742 // at an arbitrary location chosen by the OS. We will write all the dynamically 743 // archived classes into this buffer. At the final stage of dumping, we relocate 744 // all pointers that are inside the buffer_space to point to their (runtime) 745 // target location inside thetarget_space. 746 address target_space_bottom = 747 (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment()); 748 _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base); 749 750 log_info(cds, dynamic)("Target archive space at : " PTR_FORMAT, p2i(target_space_bottom)); 751 log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta)); 752 753 return buffer_base; 754 } 755 756 void DynamicArchiveBuilder::init_header(address reserved_bottom) { 757 _alloc_bottom = reserved_bottom; 758 _last_verified_top = reserved_bottom; 759 _other_region_used_bytes = 0; 760 761 init_first_dump_space(reserved_bottom); 762 763 FileMapInfo* mapinfo = new FileMapInfo(false); 764 assert(FileMapInfo::dynamic_info() == mapinfo, "must be"); 765 _header = mapinfo->dynamic_header(); 766 767 Thread* THREAD = Thread::current(); 768 FileMapInfo* base_info = FileMapInfo::current_info(); 769 _header->set_base_header_crc(base_info->crc()); 770 for (int i = 0; i < MetaspaceShared::n_regions; i++) { 771 _header->set_base_region_crc(i, base_info->space_crc(i)); 772 } 773 _header->populate(base_info, os::vm_allocation_granularity()); 774 } 775 776 void DynamicArchiveBuilder::release_header() { 777 // We temporarily allocated a dynamic FileMapInfo for dumping, which makes it appear we 778 // have mapped a dynamic archive, but we actually have not. We are in a safepoint now. 779 // Let's free it so that if class loading happens after we leave the safepoint, nothing 780 // bad will happen. 781 assert(SafepointSynchronize::is_at_safepoint(), "must be"); 782 FileMapInfo *mapinfo = FileMapInfo::dynamic_info(); 783 assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be"); 784 delete mapinfo; 785 assert(!DynamicArchive::is_mapped(), "must be"); 786 _header = NULL; 787 } 788 789 size_t DynamicArchiveBuilder::estimate_trampoline_size() { 790 size_t total = 0; 791 size_t each_method_bytes = 792 align_up(SharedRuntime::trampoline_size(), BytesPerWord) + 793 align_up(sizeof(AdapterHandlerEntry*), BytesPerWord); 794 795 for (int i = 0; i < _klasses->length(); i++) { 796 InstanceKlass* ik = _klasses->at(i); 797 Array<Method*>* methods = ik->methods(); 798 total += each_method_bytes * methods->length(); 799 } 800 if (total == 0) { 801 // We have nothing to archive, but let's avoid having an empty region. 802 total = SharedRuntime::trampoline_size(); 803 } 804 return total; 805 } 806 807 void DynamicArchiveBuilder::make_trampolines() { 808 DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space(); 809 char* p = mc_space->base(); 810 for (int i = 0; i < _klasses->length(); i++) { 811 InstanceKlass* ik = _klasses->at(i); 812 Array<Method*>* methods = ik->methods(); 813 for (int j = 0; j < methods->length(); j++) { 814 Method* m = methods->at(j); 815 address c2i_entry_trampoline = (address)p; 816 p += SharedRuntime::trampoline_size(); 817 assert(p >= mc_space->base() && p <= mc_space->top(), "must be"); 818 m->set_from_compiled_entry(to_target(c2i_entry_trampoline)); 819 820 AdapterHandlerEntry** adapter_trampoline =(AdapterHandlerEntry**)p; 821 p += sizeof(AdapterHandlerEntry*); 822 assert(p >= mc_space->base() && p <= mc_space->top(), "must be"); 823 *adapter_trampoline = NULL; 824 m->set_adapter_trampoline(to_target(adapter_trampoline)); 825 } 826 } 827 828 guarantee(p <= mc_space->top(), "Estimate of trampoline size is insufficient"); 829 } 830 831 void DynamicArchiveBuilder::make_klasses_shareable() { 832 int i, count = _klasses->length(); 833 834 InstanceKlass::disable_method_binary_search(); 835 for (i = 0; i < count; i++) { 836 InstanceKlass* ik = _klasses->at(i); 837 sort_methods(ik); 838 } 839 840 for (i = 0; i < count; i++) { 841 InstanceKlass* ik = _klasses->at(i); 842 ClassLoaderData *cld = ik->class_loader_data(); 843 if (cld->is_boot_class_loader_data()) { 844 ik->set_shared_class_loader_type(ClassLoader::BOOT_LOADER); 845 } 846 else if (cld->is_platform_class_loader_data()) { 847 ik->set_shared_class_loader_type(ClassLoader::PLATFORM_LOADER); 848 } 849 else if (cld->is_system_class_loader_data()) { 850 ik->set_shared_class_loader_type(ClassLoader::APP_LOADER); 851 } 852 853 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik); 854 ik->remove_unshareable_info(); 855 856 assert(ik->array_klasses() == NULL, "sanity"); 857 858 if (log_is_enabled(Debug, cds, dynamic)) { 859 ResourceMark rm; 860 log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name()); 861 } 862 } 863 } 864 865 // The address order of the copied Symbols may be different than when the original 866 // klasses were created. Re-sort all the tables. See Method::sort_methods(). 867 void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const { 868 assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive"); 869 if (MetaspaceShared::is_in_shared_metaspace(ik)) { 870 // We have reached a supertype that's already in the base archive 871 return; 872 } 873 874 if (ik->java_mirror() == NULL) { 875 // NULL mirror means this class has already been visited and methods are already sorted 876 return; 877 } 878 ik->remove_java_mirror(); 879 880 if (log_is_enabled(Debug, cds, dynamic)) { 881 ResourceMark rm; 882 log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name()); 883 } 884 885 // Make sure all supertypes have been sorted 886 sort_methods(ik->java_super()); 887 Array<InstanceKlass*>* interfaces = ik->local_interfaces(); 888 int len = interfaces->length(); 889 for (int i = 0; i < len; i++) { 890 sort_methods(interfaces->at(i)); 891 } 892 893 #ifdef ASSERT 894 if (ik->methods() != NULL) { 895 for (int m = 0; m < ik->methods()->length(); m++) { 896 Symbol* name = ik->methods()->at(m)->name(); 897 assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be"); 898 } 899 } 900 if (ik->default_methods() != NULL) { 901 for (int m = 0; m < ik->default_methods()->length(); m++) { 902 Symbol* name = ik->default_methods()->at(m)->name(); 903 assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be"); 904 } 905 } 906 #endif 907 908 Thread* THREAD = Thread::current(); 909 Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator); 910 if (ik->default_methods() != NULL) { 911 Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator); 912 } 913 ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); 914 ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); 915 } 916 917 void DynamicArchiveBuilder::set_symbols_permanent() { 918 int count = _symbols->length(); 919 for (int i=0; i<count; i++) { 920 Symbol* s = _symbols->at(i); 921 s->set_permanent(); 922 923 if (log_is_enabled(Trace, cds, dynamic)) { 924 ResourceMark rm; 925 log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(s)), s->as_quoted_ascii()); 926 } 927 } 928 } 929 930 class RelocateBufferToTarget: public BitMapClosure { 931 DynamicArchiveBuilder *_builder; 932 address* _buffer_bottom; 933 intx _buffer_to_target_delta; 934 public: 935 RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) : 936 _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {} 937 938 bool do_bit(size_t offset) { 939 address* p = _buffer_bottom + offset; 940 assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space"); 941 942 address old_ptr = *p; 943 if (_builder->is_in_buffer_space(old_ptr)) { 944 address new_ptr = old_ptr + _buffer_to_target_delta; 945 log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT, 946 (int)offset, p2i(p), p2i(_builder->to_target(p)), 947 p2i(old_ptr), p2i(new_ptr)); 948 *p = new_ptr; 949 } 950 951 return true; // keep iterating 952 } 953 }; 954 955 void DynamicArchiveBuilder::relocate_buffer_to_target() { 956 RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta); 957 ArchivePtrMarker::ptrmap()->iterate(&patcher); 958 959 Array<u8>* table = FileMapInfo::saved_shared_path_table().table(); 960 SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size()); 961 _header->set_shared_path_table(runtime_table); 962 963 address relocatable_base = (address)SharedBaseAddress; 964 address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta; 965 966 intx addr_delta = MetaspaceShared::final_delta(); 967 if (addr_delta == 0) { 968 ArchivePtrMarker::compact(relocatable_base, relocatable_end); 969 } else { 970 // The base archive is NOT mapped at MetaspaceShared::default_base_address() (due to ASLR). 971 // This means that the current content of the dynamic archive is based on a random 972 // address. Let's relocate all the pointers, so that it can be mapped to 973 // MetaspaceShared::default_base_address() without runtime relocation. 974 // 975 // Note: both the base and dynamic archive are written with 976 // FileMapHeader::_shared_base_address == MetaspaceShared::default_base_address() 977 978 // Patch all pointers that are marked by ptrmap within this region, 979 // where we have just dumped all the metaspace data. 980 address patch_base = (address)_alloc_bottom; 981 address patch_end = (address)current_dump_space()->top(); 982 983 // the current value of the pointers to be patched must be within this 984 // range (i.e., must point to either the top archive (as currently mapped), or to the 985 // (targeted address of) the top archive) 986 address valid_old_base = relocatable_base; 987 address valid_old_end = relocatable_end; 988 size_t base_plus_top_size = valid_old_end - valid_old_base; 989 size_t top_size = patch_end - patch_base; 990 size_t base_size = base_plus_top_size - top_size; 991 assert(base_plus_top_size > base_size, "no overflow"); 992 assert(base_plus_top_size > top_size, "no overflow"); 993 994 // after patching, the pointers must point inside this range 995 // (the requested location of the archive, as mapped at runtime). 996 address valid_new_base = (address)MetaspaceShared::default_base_address(); 997 address valid_new_end = valid_new_base + base_plus_top_size; 998 999 log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to " 1000 "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes", 1001 p2i(patch_base + base_size), p2i(patch_end), 1002 p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta); 1003 1004 SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, 1005 valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap()); 1006 ArchivePtrMarker::ptrmap()->iterate(&patcher); 1007 ArchivePtrMarker::compact(patcher.max_non_null_offset()); 1008 } 1009 } 1010 1011 void DynamicArchiveBuilder::write_archive(char* serialized_data) { 1012 int num_klasses = _klasses->length(); 1013 int num_symbols = _symbols->length(); 1014 1015 _header->set_serialized_data(to_target(serialized_data)); 1016 1017 FileMapInfo* dynamic_info = FileMapInfo::dynamic_info(); 1018 assert(dynamic_info != NULL, "Sanity"); 1019 1020 // Now write the archived data including the file offsets. 1021 const char* archive_name = Arguments::GetSharedDynamicArchivePath(); 1022 dynamic_info->open_for_write(archive_name); 1023 MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL); 1024 dynamic_info->set_final_requested_base((char*)MetaspaceShared::default_base_address()); 1025 dynamic_info->set_header_crc(dynamic_info->compute_header_crc()); 1026 dynamic_info->write_header(); 1027 dynamic_info->close(); 1028 1029 address base = to_target(_alloc_bottom); 1030 address top = address(current_dump_space()->top()) + _buffer_to_target_delta; 1031 size_t file_size = pointer_delta(top, base, sizeof(char)); 1032 1033 base += MetaspaceShared::final_delta(); 1034 top += MetaspaceShared::final_delta(); 1035 log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT 1036 " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]", 1037 p2i(base), p2i(top), _header->header_size(), file_size); 1038 log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols); 1039 } 1040 1041 1042 class VM_PopulateDynamicDumpSharedSpace: public VM_Operation { 1043 DynamicArchiveBuilder* _builder; 1044 public: 1045 VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {} 1046 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 1047 void doit() { 1048 ResourceMark rm; 1049 if (SystemDictionaryShared::empty_dumptime_table()) { 1050 log_warning(cds, dynamic)("There is no class to be included in the dynamic archive."); 1051 return; 1052 } 1053 if (AllowArchivingWithJavaAgent) { 1054 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " 1055 "for testing purposes only and should not be used in a production environment"); 1056 } 1057 FileMapInfo::check_nonempty_dir_in_shared_path_table(); 1058 1059 _builder->doit(); 1060 } 1061 }; 1062 1063 1064 void DynamicArchive::dump() { 1065 if (Arguments::GetSharedDynamicArchivePath() == NULL) { 1066 log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified"); 1067 return; 1068 } 1069 1070 DynamicArchiveBuilder builder; 1071 _builder = &builder; 1072 VM_PopulateDynamicDumpSharedSpace op(&builder); 1073 VMThread::execute(&op); 1074 _builder = NULL; 1075 } 1076 1077 address DynamicArchive::original_to_buffer_impl(address orig_obj) { 1078 assert(DynamicDumpSharedSpaces, "must be"); 1079 address buff_obj = _builder->get_new_loc(orig_obj); 1080 assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive"); 1081 assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced"); 1082 assert(_builder->is_in_buffer_space(buff_obj), "must be"); 1083 return buff_obj; 1084 } 1085 1086 address DynamicArchive::buffer_to_target_impl(address buff_obj) { 1087 assert(DynamicDumpSharedSpaces, "must be"); 1088 assert(_builder->is_in_buffer_space(buff_obj), "must be"); 1089 return _builder->to_target(buff_obj); 1090 } 1091 1092 address DynamicArchive::original_to_target_impl(address orig_obj) { 1093 assert(DynamicDumpSharedSpaces, "must be"); 1094 if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) { 1095 // This happens when the top archive points to a Symbol* in the base archive. 1096 return orig_obj; 1097 } 1098 address buff_obj = _builder->get_new_loc(orig_obj); 1099 assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive"); 1100 if (buff_obj == orig_obj) { 1101 // We are storing a pointer to an original object into the dynamic buffer. E.g., 1102 // a Symbol* that used by both the base and top archives. 1103 assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be"); 1104 return orig_obj; 1105 } else { 1106 return _builder->to_target(buff_obj); 1107 } 1108 } 1109 1110 uintx DynamicArchive::object_delta_uintx(void* buff_obj) { 1111 assert(DynamicDumpSharedSpaces, "must be"); 1112 address target_obj = _builder->to_target_no_check(address(buff_obj)); 1113 assert(uintx(target_obj) >= SharedBaseAddress, "must be"); 1114 return uintx(target_obj) - SharedBaseAddress; 1115 } 1116 1117 bool DynamicArchive::is_in_target_space(void *obj) { 1118 assert(DynamicDumpSharedSpaces, "must be"); 1119 return _builder->is_in_target_space(obj); 1120 } 1121 1122 1123 DynamicArchiveBuilder* DynamicArchive::_builder = NULL; 1124 1125 1126 bool DynamicArchive::validate(FileMapInfo* dynamic_info) { 1127 // Check if the recorded base archive matches with the current one 1128 FileMapInfo* base_info = FileMapInfo::current_info(); 1129 DynamicArchiveHeader* dynamic_header = dynamic_info->dynamic_header(); 1130 1131 // Check the header crc 1132 if (dynamic_header->base_header_crc() != base_info->crc()) { 1133 FileMapInfo::fail_continue("Archive header checksum verification failed."); 1134 return false; 1135 } 1136 1137 // Check each space's crc 1138 for (int i = 0; i < MetaspaceShared::n_regions; i++) { 1139 if (dynamic_header->base_region_crc(i) != base_info->space_crc(i)) { 1140 FileMapInfo::fail_continue("Archive region #%d checksum verification failed.", i); 1141 return false; 1142 } 1143 } 1144 1145 // Validate the dynamic archived shared path table, and set the global 1146 // _shared_path_table to that. 1147 if (!dynamic_info->validate_shared_path_table()) { 1148 return false; 1149 } 1150 return true; 1151 }