1 /* 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/loaderConstraints.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "gc_implementation/shared/spaceDecorator.hpp" 31 #include "memory/classify.hpp" 32 #include "memory/filemap.hpp" 33 #include "memory/oopFactory.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/methodDataOop.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/javaCalls.hpp" 38 #include "runtime/signature.hpp" 39 #include "runtime/vmThread.hpp" 40 #include "runtime/vm_operations.hpp" 41 #include "utilities/copy.hpp" 42 43 44 // Closure to set up the fingerprint field for all methods. 45 46 class FingerprintMethodsClosure: public ObjectClosure { 47 public: 48 void do_object(oop obj) { 49 if (obj->is_method()) { 50 methodOop mobj = (methodOop)obj; 51 ResourceMark rm; 52 (new Fingerprinter(mobj))->fingerprint(); 53 } 54 } 55 }; 56 57 58 59 // Closure to set the hash value (String.hash field) in all of the 60 // String objects in the heap. Setting the hash value is not required. 61 // However, setting the value in advance prevents the value from being 62 // written later, increasing the likelihood that the shared page contain 63 // the hash can be shared. 64 // 65 // NOTE THAT the algorithm in StringTable::hash_string() MUST MATCH the 66 // algorithm in java.lang.String.hashCode(). 67 68 class StringHashCodeClosure: public OopClosure { 69 private: 70 Thread* THREAD; 71 int hash_offset; 72 public: 73 StringHashCodeClosure(Thread* t) { 74 THREAD = t; 75 hash_offset = java_lang_String::hash_offset_in_bytes(); 76 } 77 78 void do_oop(oop* p) { 79 if (p != NULL) { 80 oop obj = *p; 81 if (obj->klass() == SystemDictionary::String_klass()) { 82 83 int hash = java_lang_String::hash_string(obj); 84 obj->int_field_put(hash_offset, hash); 85 } 86 } 87 } 88 void do_oop(narrowOop* p) { ShouldNotReachHere(); } 89 }; 90 91 92 // Remove data from objects which should not appear in the shared file 93 // (as it pertains only to the current JVM). 94 95 class RemoveUnshareableInfoClosure : public ObjectClosure { 96 public: 97 void do_object(oop obj) { 98 // Zap data from the objects which is pertains only to this JVM. We 99 // want that data recreated in new JVMs when the shared file is used. 100 if (obj->is_method()) { 101 ((methodOop)obj)->remove_unshareable_info(); 102 } 103 else if (obj->is_klass()) { 104 Klass::cast((klassOop)obj)->remove_unshareable_info(); 105 } 106 107 // Don't save compiler related special oops (shouldn't be any yet). 108 if (obj->is_methodData() || obj->is_compiledICHolder()) { 109 ShouldNotReachHere(); 110 } 111 } 112 }; 113 114 115 static bool mark_object(oop obj) { 116 if (obj != NULL && 117 !obj->is_shared() && 118 !obj->is_forwarded() && 119 !obj->is_gc_marked()) { 120 obj->set_mark(markOopDesc::prototype()->set_marked()); 121 return true; 122 } 123 124 return false; 125 } 126 127 128 class MoveSymbols : public SymbolClosure { 129 private: 130 char* _start; 131 char* _end; 132 char* _top; 133 int _count; 134 135 bool in_shared_space(Symbol* sym) const { 136 return (char*)sym >= _start && (char*)sym < _end; 137 } 138 139 Symbol* get_shared_copy(Symbol* sym) { 140 return sym->refcount() > 0 ? NULL : (Symbol*)(_start - sym->refcount()); 141 } 142 143 Symbol* make_shared_copy(Symbol* sym) { 144 Symbol* new_sym = (Symbol*)_top; 145 int size = sym->object_size(); 146 _top += size * HeapWordSize; 147 if (_top <= _end) { 148 Copy::disjoint_words((HeapWord*)sym, (HeapWord*)new_sym, size); 149 // Encode a reference to the copy as a negative distance from _start 150 // When a symbol is being copied to a shared space 151 // during CDS archive creation, the original symbol is marked 152 // as relocated by putting a negative value to its _refcount field, 153 // This value is also used to find where exactly the shared copy is 154 // (see MoveSymbols::get_shared_copy), so that the other references 155 // to this symbol could be changed to point to the shared copy. 156 sym->_refcount = (int)(_start - (char*)new_sym); 157 // Mark the symbol in the shared archive as immortal so it is read only 158 // and not refcounted. 159 new_sym->_refcount = -1; 160 _count++; 161 } else { 162 report_out_of_shared_space(SharedMiscData); 163 } 164 return new_sym; 165 } 166 167 public: 168 MoveSymbols(char* top, char* end) : 169 _start(top), _end(end), _top(top), _count(0) { } 170 171 char* get_top() const { return _top; } 172 int count() const { return _count; } 173 174 void do_symbol(Symbol** p) { 175 Symbol* sym = load_symbol(p); 176 if (sym != NULL && !in_shared_space(sym)) { 177 Symbol* new_sym = get_shared_copy(sym); 178 if (new_sym == NULL) { 179 // The symbol has not been relocated yet; copy it to _top address 180 assert(sym->refcount() > 0, "should have positive reference count"); 181 new_sym = make_shared_copy(sym); 182 } 183 // Make the reference point to the shared copy of the symbol 184 store_symbol(p, new_sym); 185 } 186 } 187 }; 188 189 190 // Closure: mark objects closure. 191 192 class MarkObjectsOopClosure : public OopClosure { 193 public: 194 void do_oop(oop* p) { mark_object(*p); } 195 void do_oop(narrowOop* p) { ShouldNotReachHere(); } 196 }; 197 198 199 class MarkObjectsSkippingKlassesOopClosure : public OopClosure { 200 public: 201 void do_oop(oop* pobj) { 202 oop obj = *pobj; 203 if (obj != NULL && 204 !obj->is_klass()) { 205 mark_object(obj); 206 } 207 } 208 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } 209 }; 210 211 212 static void mark_object_recursive_skipping_klasses(oop obj) { 213 mark_object(obj); 214 if (obj != NULL) { 215 MarkObjectsSkippingKlassesOopClosure mark_all; 216 obj->oop_iterate(&mark_all); 217 } 218 } 219 220 221 // Closure: mark common read-only objects 222 223 class MarkCommonReadOnly : public ObjectClosure { 224 private: 225 MarkObjectsOopClosure mark_all; 226 public: 227 void do_object(oop obj) { 228 229 // Mark all constMethod objects. 230 231 if (obj->is_constMethod()) { 232 mark_object(obj); 233 mark_object(constMethodOop(obj)->stackmap_data()); 234 // Exception tables are needed by ci code during compilation. 235 mark_object(constMethodOop(obj)->exception_table()); 236 } 237 238 // Mark objects referenced by klass objects which are read-only. 239 240 else if (obj->is_klass()) { 241 Klass* k = Klass::cast((klassOop)obj); 242 mark_object(k->secondary_supers()); 243 244 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though 245 // it is never modified. Otherwise, they will be pre-marked; the 246 // GC marking phase will skip them; and by skipping them will fail 247 // to mark the methods objects referenced by the array. 248 249 if (obj->blueprint()->oop_is_instanceKlass()) { 250 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 251 mark_object(ik->method_ordering()); 252 mark_object(ik->local_interfaces()); 253 mark_object(ik->transitive_interfaces()); 254 mark_object(ik->fields()); 255 256 mark_object(ik->class_annotations()); 257 258 mark_object_recursive_skipping_klasses(ik->fields_annotations()); 259 mark_object_recursive_skipping_klasses(ik->methods_annotations()); 260 mark_object_recursive_skipping_klasses(ik->methods_parameter_annotations()); 261 mark_object_recursive_skipping_klasses(ik->methods_default_annotations()); 262 263 typeArrayOop inner_classes = ik->inner_classes(); 264 if (inner_classes != NULL) { 265 mark_object(inner_classes); 266 } 267 } 268 } 269 } 270 }; 271 272 273 // Closure: find symbol references in Java Heap objects 274 275 class CommonSymbolsClosure : public ObjectClosure { 276 private: 277 SymbolClosure* _closure; 278 public: 279 CommonSymbolsClosure(SymbolClosure* closure) : _closure(closure) { } 280 281 void do_object(oop obj) { 282 283 // Traverse symbols referenced by method objects. 284 285 if (obj->is_method()) { 286 methodOop m = methodOop(obj); 287 constantPoolOop constants = m->constants(); 288 _closure->do_symbol(constants->symbol_at_addr(m->name_index())); 289 _closure->do_symbol(constants->symbol_at_addr(m->signature_index())); 290 } 291 292 // Traverse symbols referenced by klass objects which are read-only. 293 294 else if (obj->is_klass()) { 295 Klass* k = Klass::cast((klassOop)obj); 296 k->shared_symbols_iterate(_closure); 297 298 if (obj->blueprint()->oop_is_instanceKlass()) { 299 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 300 typeArrayOop inner_classes = ik->inner_classes(); 301 if (inner_classes != NULL) { 302 constantPoolOop constants = ik->constants(); 303 int n = inner_classes->length(); 304 for (int i = 0; i < n; i += instanceKlass::inner_class_next_offset) { 305 int ioff = i + instanceKlass::inner_class_inner_name_offset; 306 int index = inner_classes->ushort_at(ioff); 307 if (index != 0) { 308 _closure->do_symbol(constants->symbol_at_addr(index)); 309 } 310 } 311 } 312 } 313 } 314 315 // Traverse symbols referenced by other constantpool entries. 316 317 else if (obj->is_constantPool()) { 318 constantPoolOop(obj)->shared_symbols_iterate(_closure); 319 } 320 } 321 }; 322 323 324 // Closure: mark char arrays used by strings 325 326 class MarkStringValues : public ObjectClosure { 327 private: 328 MarkObjectsOopClosure mark_all; 329 public: 330 void do_object(oop obj) { 331 332 // Character arrays referenced by String objects are read-only. 333 334 if (java_lang_String::is_instance(obj)) { 335 mark_object(java_lang_String::value(obj)); 336 } 337 } 338 }; 339 340 341 #ifdef DEBUG 342 // Closure: Check for objects left in the heap which have not been moved. 343 344 class CheckRemainingObjects : public ObjectClosure { 345 private: 346 int count; 347 348 public: 349 CheckRemainingObjects() { 350 count = 0; 351 } 352 353 void do_object(oop obj) { 354 if (!obj->is_shared() && 355 !obj->is_forwarded()) { 356 ++count; 357 if (Verbose) { 358 tty->print("Unreferenced object: "); 359 obj->print_on(tty); 360 } 361 } 362 } 363 364 void status() { 365 tty->print_cr("%d objects no longer referenced, not shared.", count); 366 } 367 }; 368 #endif 369 370 371 // Closure: Mark remaining objects read-write, except Strings. 372 373 class MarkReadWriteObjects : public ObjectClosure { 374 private: 375 MarkObjectsOopClosure mark_objects; 376 public: 377 void do_object(oop obj) { 378 379 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though 380 // it is never modified. Otherwise, they will be pre-marked; the 381 // GC marking phase will skip them; and by skipping them will fail 382 // to mark the methods objects referenced by the array. 383 384 if (obj->is_klass()) { 385 mark_object(obj); 386 Klass* k = klassOop(obj)->klass_part(); 387 mark_object(k->java_mirror()); 388 if (obj->blueprint()->oop_is_instanceKlass()) { 389 instanceKlass* ik = (instanceKlass*)k; 390 mark_object(ik->methods()); 391 mark_object(ik->constants()); 392 } 393 if (obj->blueprint()->oop_is_javaArray()) { 394 arrayKlass* ak = (arrayKlass*)k; 395 mark_object(ak->component_mirror()); 396 } 397 return; 398 } 399 400 // Mark constantPool tags and the constantPoolCache. 401 402 else if (obj->is_constantPool()) { 403 constantPoolOop pool = constantPoolOop(obj); 404 mark_object(pool->cache()); 405 pool->shared_tags_iterate(&mark_objects); 406 return; 407 } 408 409 // Mark all method objects. 410 411 if (obj->is_method()) { 412 mark_object(obj); 413 } 414 } 415 }; 416 417 418 // Closure: Mark String objects read-write. 419 420 class MarkStringObjects : public ObjectClosure { 421 private: 422 MarkObjectsOopClosure mark_objects; 423 public: 424 void do_object(oop obj) { 425 426 // Mark String objects referenced by constant pool entries. 427 428 if (obj->is_constantPool()) { 429 constantPoolOop pool = constantPoolOop(obj); 430 pool->shared_strings_iterate(&mark_objects); 431 return; 432 } 433 } 434 }; 435 436 437 // Move objects matching specified type (ie. lock_bits) to the specified 438 // space. 439 440 class MoveMarkedObjects : public ObjectClosure { 441 private: 442 OffsetTableContigSpace* _space; 443 bool _read_only; 444 445 public: 446 MoveMarkedObjects(OffsetTableContigSpace* space, bool read_only) { 447 _space = space; 448 _read_only = read_only; 449 } 450 451 void do_object(oop obj) { 452 if (obj->is_shared()) { 453 return; 454 } 455 if (obj->is_gc_marked() && obj->forwardee() == NULL) { 456 int s = obj->size(); 457 oop sh_obj = (oop)_space->allocate(s); 458 if (sh_obj == NULL) { 459 report_out_of_shared_space(_read_only ? SharedReadOnly : SharedReadWrite); 460 } 461 if (PrintSharedSpaces && Verbose && WizardMode) { 462 tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj, 463 (_read_only ? "ro" : "rw")); 464 } 465 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s); 466 obj->forward_to(sh_obj); 467 if (_read_only) { 468 // Readonly objects: set hash value to self pointer and make gc_marked. 469 sh_obj->forward_to(sh_obj); 470 } else { 471 sh_obj->init_mark(); 472 } 473 } 474 } 475 }; 476 477 static void mark_and_move(oop obj, MoveMarkedObjects* move) { 478 if (mark_object(obj)) move->do_object(obj); 479 } 480 481 enum order_policy { 482 OP_favor_startup = 0, 483 OP_balanced = 1, 484 OP_favor_runtime = 2 485 }; 486 487 static void mark_and_move_for_policy(order_policy policy, oop obj, MoveMarkedObjects* move) { 488 if (SharedOptimizeColdStartPolicy >= policy) mark_and_move(obj, move); 489 } 490 491 class MarkAndMoveOrderedReadOnly : public ObjectClosure { 492 private: 493 MoveMarkedObjects *_move_ro; 494 495 public: 496 MarkAndMoveOrderedReadOnly(MoveMarkedObjects *move_ro) : _move_ro(move_ro) {} 497 498 void do_object(oop obj) { 499 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { 500 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 501 int i; 502 503 if (ik->super() != NULL) { 504 do_object(ik->super()); 505 } 506 507 objArrayOop interfaces = ik->local_interfaces(); 508 mark_and_move_for_policy(OP_favor_startup, interfaces, _move_ro); 509 for(i = 0; i < interfaces->length(); i++) { 510 klassOop k = klassOop(interfaces->obj_at(i)); 511 do_object(k); 512 } 513 514 objArrayOop methods = ik->methods(); 515 for(i = 0; i < methods->length(); i++) { 516 methodOop m = methodOop(methods->obj_at(i)); 517 mark_and_move_for_policy(OP_favor_startup, m->constMethod(), _move_ro); 518 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->exception_table(), _move_ro); 519 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->stackmap_data(), _move_ro); 520 } 521 522 mark_and_move_for_policy(OP_favor_startup, ik->transitive_interfaces(), _move_ro); 523 mark_and_move_for_policy(OP_favor_startup, ik->fields(), _move_ro); 524 525 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); 526 mark_and_move_for_policy(OP_favor_runtime, ik->method_ordering(), _move_ro); 527 mark_and_move_for_policy(OP_favor_runtime, ik->class_annotations(), _move_ro); 528 mark_and_move_for_policy(OP_favor_runtime, ik->fields_annotations(), _move_ro); 529 mark_and_move_for_policy(OP_favor_runtime, ik->methods_annotations(), _move_ro); 530 mark_and_move_for_policy(OP_favor_runtime, ik->methods_parameter_annotations(), _move_ro); 531 mark_and_move_for_policy(OP_favor_runtime, ik->methods_default_annotations(), _move_ro); 532 mark_and_move_for_policy(OP_favor_runtime, ik->inner_classes(), _move_ro); 533 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); 534 } 535 } 536 }; 537 538 class MarkAndMoveOrderedReadWrite: public ObjectClosure { 539 private: 540 MoveMarkedObjects *_move_rw; 541 542 public: 543 MarkAndMoveOrderedReadWrite(MoveMarkedObjects *move_rw) : _move_rw(move_rw) {} 544 545 void do_object(oop obj) { 546 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { 547 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 548 int i; 549 550 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop(), _move_rw); 551 552 if (ik->super() != NULL) { 553 do_object(ik->super()); 554 } 555 556 objArrayOop interfaces = ik->local_interfaces(); 557 for(i = 0; i < interfaces->length(); i++) { 558 klassOop k = klassOop(interfaces->obj_at(i)); 559 mark_and_move_for_policy(OP_favor_startup, k, _move_rw); 560 do_object(k); 561 } 562 563 objArrayOop methods = ik->methods(); 564 mark_and_move_for_policy(OP_favor_startup, methods, _move_rw); 565 for(i = 0; i < methods->length(); i++) { 566 methodOop m = methodOop(methods->obj_at(i)); 567 mark_and_move_for_policy(OP_favor_startup, m, _move_rw); 568 mark_and_move_for_policy(OP_favor_startup, ik->constants(), _move_rw); // idempotent 569 mark_and_move_for_policy(OP_balanced, ik->constants()->cache(), _move_rw); // idempotent 570 mark_and_move_for_policy(OP_balanced, ik->constants()->tags(), _move_rw); // idempotent 571 } 572 573 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop()->klass(), _move_rw); 574 mark_and_move_for_policy(OP_favor_startup, ik->constants()->klass(), _move_rw); 575 576 // Although Java mirrors are marked in MarkReadWriteObjects, 577 // apparently they were never moved into shared spaces since 578 // MoveMarkedObjects skips marked instance oops. This may 579 // be a bug in the original implementation or simply the vestige 580 // of an abandoned experiment. Nevertheless we leave a hint 581 // here in case this capability is ever correctly implemented. 582 // 583 // mark_and_move_for_policy(OP_favor_runtime, ik->java_mirror(), _move_rw); 584 } 585 } 586 587 }; 588 589 // Adjust references in oops to refer to shared spaces. 590 591 class ResolveForwardingClosure: public OopClosure { 592 public: 593 void do_oop(oop* p) { 594 oop obj = *p; 595 if (!obj->is_shared()) { 596 if (obj != NULL) { 597 oop f = obj->forwardee(); 598 guarantee(f->is_shared(), "Oop doesn't refer to shared space."); 599 *p = f; 600 } 601 } 602 } 603 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } 604 }; 605 606 607 // The methods array must be reordered by Symbol* address. 608 // (See classFileParser.cpp where methods in a class are originally 609 // sorted). The addresses of symbols have been changed as a result 610 // of moving to the shared space. 611 612 class SortMethodsClosure: public ObjectClosure { 613 public: 614 void do_object(oop obj) { 615 if (obj->blueprint()->oop_is_instanceKlass()) { 616 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 617 methodOopDesc::sort_methods(ik->methods(), 618 ik->methods_annotations(), 619 ik->methods_parameter_annotations(), 620 ik->methods_default_annotations(), 621 true /* idempotent, slow */); 622 } 623 } 624 }; 625 626 // Vtable and Itable indices are calculated based on methods array 627 // order (see klassItable::compute_itable_index()). Must reinitialize 628 // after ALL methods of ALL classes have been reordered. 629 // We assume that since checkconstraints is false, this method 630 // cannot throw an exception. An exception here would be 631 // problematic since this is the VMThread, not a JavaThread. 632 633 class ReinitializeTables: public ObjectClosure { 634 private: 635 Thread* _thread; 636 637 public: 638 ReinitializeTables(Thread* thread) : _thread(thread) {} 639 640 // Initialize super vtable first, check if already initialized to avoid 641 // quadradic behavior. The vtable is cleared in remove_unshareable_info. 642 void reinitialize_vtables(klassOop k) { 643 if (k->blueprint()->oop_is_instanceKlass()) { 644 instanceKlass* ik = instanceKlass::cast(k); 645 if (ik->vtable()->is_initialized()) return; 646 if (ik->super() != NULL) { 647 reinitialize_vtables(ik->super()); 648 } 649 ik->vtable()->initialize_vtable(false, _thread); 650 } 651 } 652 653 void do_object(oop obj) { 654 if (obj->blueprint()->oop_is_instanceKlass()) { 655 instanceKlass* ik = instanceKlass::cast((klassOop)obj); 656 ResourceMark rm(_thread); 657 ik->itable()->initialize_itable(false, _thread); 658 reinitialize_vtables((klassOop)obj); 659 #ifdef ASSERT 660 ik->vtable()->verify(tty, true); 661 #endif // ASSERT 662 } else if (obj->blueprint()->oop_is_arrayKlass()) { 663 // The vtable for array klasses are that of its super class, 664 // ie. java.lang.Object. 665 arrayKlass* ak = arrayKlass::cast((klassOop)obj); 666 if (ak->vtable()->is_initialized()) return; 667 ak->vtable()->initialize_vtable(false, _thread); 668 } 669 } 670 }; 671 672 673 // Adjust references in oops to refer to shared spaces. 674 675 class PatchOopsClosure: public ObjectClosure { 676 private: 677 Thread* _thread; 678 ResolveForwardingClosure resolve; 679 680 public: 681 PatchOopsClosure(Thread* thread) : _thread(thread) {} 682 683 void do_object(oop obj) { 684 obj->oop_iterate_header(&resolve); 685 obj->oop_iterate(&resolve); 686 687 assert(obj->klass()->is_shared(), "Klass not pointing into shared space."); 688 689 // If the object is a Java object or class which might (in the 690 // future) contain a reference to a young gen object, add it to the 691 // list. 692 693 if (obj->is_klass() || obj->is_instance()) { 694 if (obj->is_klass() || 695 obj->is_a(SystemDictionary::Class_klass()) || 696 obj->is_a(SystemDictionary::Throwable_klass())) { 697 // Do nothing 698 } 699 else if (obj->is_a(SystemDictionary::String_klass())) { 700 // immutable objects. 701 } else { 702 // someone added an object we hadn't accounted for. 703 ShouldNotReachHere(); 704 } 705 } 706 } 707 }; 708 709 710 // Empty the young and old generations. 711 712 class ClearSpaceClosure : public SpaceClosure { 713 public: 714 void do_space(Space* s) { 715 s->clear(SpaceDecorator::Mangle); 716 } 717 }; 718 719 720 // Closure for serializing initialization data out to a data area to be 721 // written to the shared file. 722 723 class WriteClosure : public SerializeOopClosure { 724 private: 725 oop* top; 726 char* end; 727 728 inline void check_space() { 729 if ((char*)top + sizeof(oop) > end) { 730 report_out_of_shared_space(SharedMiscData); 731 } 732 } 733 734 735 public: 736 WriteClosure(char* md_top, char* md_end) { 737 top = (oop*)md_top; 738 end = md_end; 739 } 740 741 char* get_top() { return (char*)top; } 742 743 void do_oop(oop* p) { 744 check_space(); 745 oop obj = *p; 746 assert(obj->is_oop_or_null(), "invalid oop"); 747 assert(obj == NULL || obj->is_shared(), 748 "Oop in shared space not pointing into shared space."); 749 *top = obj; 750 ++top; 751 } 752 753 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } 754 755 void do_int(int* p) { 756 check_space(); 757 *top = (oop)(intptr_t)*p; 758 ++top; 759 } 760 761 void do_size_t(size_t* p) { 762 check_space(); 763 *top = (oop)(intptr_t)*p; 764 ++top; 765 } 766 767 void do_ptr(void** p) { 768 check_space(); 769 *top = (oop)*p; 770 ++top; 771 } 772 773 void do_ptr(HeapWord** p) { do_ptr((void **) p); } 774 775 void do_tag(int tag) { 776 check_space(); 777 *top = (oop)(intptr_t)tag; 778 ++top; 779 } 780 781 void do_region(u_char* start, size_t size) { 782 if ((char*)top + size > end) { 783 report_out_of_shared_space(SharedMiscData); 784 } 785 assert((intptr_t)start % sizeof(oop) == 0, "bad alignment"); 786 assert(size % sizeof(oop) == 0, "bad size"); 787 do_tag((int)size); 788 while (size > 0) { 789 *top = *(oop*)start; 790 ++top; 791 start += sizeof(oop); 792 size -= sizeof(oop); 793 } 794 } 795 796 bool reading() const { return false; } 797 }; 798 799 800 class ResolveConstantPoolsClosure : public ObjectClosure { 801 private: 802 TRAPS; 803 public: 804 ResolveConstantPoolsClosure(Thread *t) { 805 __the_thread__ = t; 806 } 807 void do_object(oop obj) { 808 if (obj->is_constantPool()) { 809 constantPoolOop cpool = (constantPoolOop)obj; 810 int unresolved = cpool->pre_resolve_shared_klasses(THREAD); 811 } 812 } 813 }; 814 815 816 // Print a summary of the contents of the read/write spaces to help 817 // identify objects which might be able to be made read-only. At this 818 // point, the objects have been written, and we can trash them as 819 // needed. 820 821 static void print_contents() { 822 if (PrintSharedSpaces) { 823 GenCollectedHeap* gch = GenCollectedHeap::heap(); 824 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); 825 826 // High level summary of the read-only space: 827 828 ClassifyObjectClosure coc; 829 tty->cr(); tty->print_cr("ReadOnly space:"); 830 gen->ro_space()->object_iterate(&coc); 831 coc.print(); 832 833 // High level summary of the read-write space: 834 835 coc.reset(); 836 tty->cr(); tty->print_cr("ReadWrite space:"); 837 gen->rw_space()->object_iterate(&coc); 838 coc.print(); 839 840 // Reset counters 841 842 ClearAllocCountClosure cacc; 843 gen->ro_space()->object_iterate(&cacc); 844 gen->rw_space()->object_iterate(&cacc); 845 coc.reset(); 846 847 // Lower level summary of the read-only space: 848 849 gen->ro_space()->object_iterate(&coc); 850 tty->cr(); tty->print_cr("ReadOnly space:"); 851 ClassifyInstanceKlassClosure cikc; 852 gen->rw_space()->object_iterate(&cikc); 853 cikc.print(); 854 855 // Reset counters 856 857 gen->ro_space()->object_iterate(&cacc); 858 gen->rw_space()->object_iterate(&cacc); 859 coc.reset(); 860 861 // Lower level summary of the read-write space: 862 863 gen->rw_space()->object_iterate(&coc); 864 cikc.reset(); 865 tty->cr(); tty->print_cr("ReadWrite space:"); 866 gen->rw_space()->object_iterate(&cikc); 867 cikc.print(); 868 } 869 } 870 871 872 // Patch C++ vtable pointer in klass oops. 873 874 // Klass objects contain references to c++ vtables in the JVM library. 875 // Fix them to point to our constructed vtables. However, don't iterate 876 // across the space while doing this, as that causes the vtables to be 877 // patched, undoing our useful work. Instead, iterate to make a list, 878 // then use the list to do the fixing. 879 // 880 // Our constructed vtables: 881 // Dump time: 882 // 1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs 883 // 2. generate_vtable_methods: create jump table, appended to above vtbl_list 884 // 3. PatchKlassVtables: for Klass list, patch the vtable entry to point to jump table 885 // rather than to current vtbl 886 // Table layout: NOTE FIXED SIZE 887 // 1. vtbl pointers 888 // 2. #Klass X #virtual methods per Klass 889 // 1 entry for each, in the order: 890 // Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method<num_virtuals> entry 891 // Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method<num_virtuals> entry 892 // ... 893 // Klass<vtbl_list_size>:method1 entry, Klass<vtbl_list_size>:method2 entry, 894 // ... Klass<vtbl_list_size>:method<num_virtuals> entry 895 // Sample entry: (Sparc): 896 // save(sp, -256, sp) 897 // ba,pt common_code 898 // mov XXX, %L0 %L0 gets: Klass index <<8 + method index (note: max method index 255) 899 // 900 // Restore time: 901 // 1. initialize_oops: reserve space for table 902 // 2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text 903 // 904 // Execution time: 905 // First virtual method call for any object of these Klass types: 906 // 1. object->klass->klass_part 907 // 2. vtable entry for that klass_part points to the jump table entries 908 // 3. branches to common_code with %O0/klass_part, %L0: Klass index <<8 + method index 909 // 4. common_code: 910 // Get address of new vtbl pointer for this Klass from updated table 911 // Update new vtbl pointer in the Klass: future virtual calls go direct 912 // Jump to method, using new vtbl pointer and method index 913 914 class PatchKlassVtables: public ObjectClosure { 915 private: 916 GrowableArray<klassOop>* _klass_objects; 917 918 public: 919 PatchKlassVtables() { 920 _klass_objects = new GrowableArray<klassOop>(); 921 } 922 923 void do_object(oop obj) { 924 if (obj->is_klass()) { 925 _klass_objects->append(klassOop(obj)); 926 } 927 } 928 929 void patch(void** vtbl_list, void* new_vtable_start) { 930 int n = _klass_objects->length(); 931 for (int i = 0; i < n; i++) { 932 klassOop obj = (klassOop)_klass_objects->at(i); 933 Klass* k = obj->klass_part(); 934 *(void**)k = CompactingPermGenGen::find_matching_vtbl_ptr( 935 vtbl_list, new_vtable_start, k); 936 } 937 } 938 }; 939 940 // Walk through all symbols and patch their vtable pointers. 941 // Note that symbols have vtable pointers only in non-product builds 942 // (see allocation.hpp). 943 944 #ifndef PRODUCT 945 class PatchSymbolVtables: public SymbolClosure { 946 private: 947 void* _new_vtbl_ptr; 948 949 public: 950 PatchSymbolVtables(void** vtbl_list, void* new_vtable_start) { 951 Symbol s; 952 _new_vtbl_ptr = CompactingPermGenGen::find_matching_vtbl_ptr( 953 vtbl_list, new_vtable_start, &s); 954 } 955 956 void do_symbol(Symbol** p) { 957 Symbol* sym = load_symbol(p); 958 *(void**)sym = _new_vtbl_ptr; 959 } 960 }; 961 #endif 962 963 964 // Populate the shared space. 965 966 class VM_PopulateDumpSharedSpace: public VM_Operation { 967 private: 968 GrowableArray<oop> *_class_promote_order; 969 OffsetTableContigSpace* _ro_space; 970 OffsetTableContigSpace* _rw_space; 971 VirtualSpace* _md_vs; 972 VirtualSpace* _mc_vs; 973 974 public: 975 VM_PopulateDumpSharedSpace(GrowableArray<oop> *class_promote_order, 976 OffsetTableContigSpace* ro_space, 977 OffsetTableContigSpace* rw_space, 978 VirtualSpace* md_vs, VirtualSpace* mc_vs) { 979 _class_promote_order = class_promote_order; 980 _ro_space = ro_space; 981 _rw_space = rw_space; 982 _md_vs = md_vs; 983 _mc_vs = mc_vs; 984 } 985 986 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } 987 void doit() { 988 Thread* THREAD = VMThread::vm_thread(); 989 NOT_PRODUCT(SystemDictionary::verify();) 990 // The following guarantee is meant to ensure that no loader constraints 991 // exist yet, since the constraints table is not shared. This becomes 992 // more important now that we don't re-initialize vtables/itables for 993 // shared classes at runtime, where constraints were previously created. 994 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, 995 "loader constraints are not saved"); 996 // Revisit and implement this if we prelink method handle call sites: 997 guarantee(SystemDictionary::invoke_method_table() == NULL || 998 SystemDictionary::invoke_method_table()->number_of_entries() == 0, 999 "invoke method table is not saved"); 1000 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1001 1002 // At this point, many classes have been loaded. 1003 1004 // Update all the fingerprints in the shared methods. 1005 1006 tty->print("Calculating fingerprints ... "); 1007 FingerprintMethodsClosure fpmc; 1008 gch->object_iterate(&fpmc); 1009 tty->print_cr("done. "); 1010 1011 // Remove all references outside the heap. 1012 1013 tty->print("Removing unshareable information ... "); 1014 RemoveUnshareableInfoClosure ruic; 1015 gch->object_iterate(&ruic); 1016 tty->print_cr("done. "); 1017 1018 // Move the objects in three passes. 1019 1020 MarkObjectsOopClosure mark_all; 1021 MarkCommonReadOnly mark_common_ro; 1022 MarkStringValues mark_string_values; 1023 MarkReadWriteObjects mark_rw; 1024 MarkStringObjects mark_strings; 1025 MoveMarkedObjects move_ro(_ro_space, true); 1026 MoveMarkedObjects move_rw(_rw_space, false); 1027 1028 // The SharedOptimizeColdStart VM option governs the new layout 1029 // algorithm for promoting classes into the shared archive. 1030 // The general idea is to minimize cold start time by laying 1031 // out the objects in the order they are accessed at startup time. 1032 // By doing this we are trying to eliminate out-of-order accesses 1033 // in the shared archive. This benefits cold startup time by making 1034 // disk reads as sequential as possible during class loading and 1035 // bootstrapping activities. There may also be a small secondary 1036 // effect of better "packing" of more commonly used data on a smaller 1037 // number of pages, although no direct benefit has been measured from 1038 // this effect. 1039 // 1040 // At the class level of granularity, the promotion order is dictated 1041 // by the classlist file whose generation is discussed elsewhere. 1042 // 1043 // At smaller granularity, optimal ordering was determined by an 1044 // offline analysis of object access order in the shared archive. 1045 // The dbx watchpoint facility, combined with SA post-processing, 1046 // was used to observe common access patterns primarily during 1047 // classloading. This information was used to craft the promotion 1048 // order seen in the following closures. 1049 // 1050 // The observed access order is mostly governed by what happens 1051 // in SystemDictionary::load_shared_class(). NOTE WELL - care 1052 // should be taken when making changes to this method, because it 1053 // may invalidate assumptions made about access order! 1054 // 1055 // (Ideally, there would be a better way to manage changes to 1056 // the access order. Unfortunately a generic in-VM solution for 1057 // dynamically observing access order and optimizing shared 1058 // archive layout is pretty difficult. We go with the static 1059 // analysis because the code is fairly mature at this point 1060 // and we're betting that the access order won't change much.) 1061 1062 MarkAndMoveOrderedReadOnly mark_and_move_ordered_ro(&move_ro); 1063 MarkAndMoveOrderedReadWrite mark_and_move_ordered_rw(&move_rw); 1064 1065 // Set up the share data and shared code segments. 1066 1067 char* md_top = _md_vs->low(); 1068 char* md_end = _md_vs->high(); 1069 char* mc_top = _mc_vs->low(); 1070 char* mc_end = _mc_vs->high(); 1071 1072 // Reserve space for the list of klassOops whose vtables are used 1073 // for patching others as needed. 1074 1075 void** vtbl_list = (void**)md_top; 1076 int vtbl_list_size = CompactingPermGenGen::vtbl_list_size; 1077 Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size); 1078 1079 md_top += vtbl_list_size * sizeof(void*); 1080 void* vtable = md_top; 1081 1082 // Reserve space for a new dummy vtable for klass objects in the 1083 // heap. Generate self-patching vtable entries. 1084 1085 CompactingPermGenGen::generate_vtable_methods(vtbl_list, 1086 &vtable, 1087 &md_top, md_end, 1088 &mc_top, mc_end); 1089 1090 // Reserve space for the total size and the number of stored symbols. 1091 1092 md_top += sizeof(intptr_t) * 2; 1093 1094 MoveSymbols move_symbols(md_top, md_end); 1095 CommonSymbolsClosure traverse_common_symbols(&move_symbols); 1096 1097 // Phase 1a: remove symbols with _refcount == 0 1098 1099 SymbolTable::unlink(); 1100 1101 // Phase 1b: move commonly used symbols referenced by oop fields. 1102 1103 tty->print("Moving common symbols to metadata section at " PTR_FORMAT " ... ", 1104 move_symbols.get_top()); 1105 gch->object_iterate(&traverse_common_symbols); 1106 tty->print_cr("done. "); 1107 1108 // Phase 1c: move known names and signatures. 1109 1110 tty->print("Moving vmSymbols to metadata section at " PTR_FORMAT " ... ", 1111 move_symbols.get_top()); 1112 vmSymbols::symbols_do(&move_symbols); 1113 tty->print_cr("done. "); 1114 1115 // Phase 1d: move the remaining symbols by scanning the whole SymbolTable. 1116 1117 void* extra_symbols = move_symbols.get_top(); 1118 tty->print("Moving the remaining symbols to metadata section at " PTR_FORMAT " ... ", 1119 move_symbols.get_top()); 1120 SymbolTable::symbols_do(&move_symbols); 1121 tty->print_cr("done. "); 1122 1123 // Record the total length of all symbols at the beginning of the block. 1124 ((intptr_t*)md_top)[-2] = move_symbols.get_top() - md_top; 1125 ((intptr_t*)md_top)[-1] = move_symbols.count(); 1126 tty->print_cr("Moved %d symbols, %d bytes.", 1127 move_symbols.count(), move_symbols.get_top() - md_top); 1128 // Advance the pointer to the end of symbol store. 1129 md_top = move_symbols.get_top(); 1130 1131 1132 // Phase 2: move commonly used read-only objects to the read-only space. 1133 1134 if (SharedOptimizeColdStart) { 1135 tty->print("Moving pre-ordered read-only objects to shared space at " PTR_FORMAT " ... ", 1136 _ro_space->top()); 1137 for (int i = 0; i < _class_promote_order->length(); i++) { 1138 oop obj = _class_promote_order->at(i); 1139 mark_and_move_ordered_ro.do_object(obj); 1140 } 1141 tty->print_cr("done. "); 1142 } 1143 1144 tty->print("Moving read-only objects to shared space at " PTR_FORMAT " ... ", 1145 _ro_space->top()); 1146 gch->object_iterate(&mark_common_ro); 1147 gch->object_iterate(&move_ro); 1148 tty->print_cr("done. "); 1149 1150 // Phase 3: move String character arrays to the read-only space. 1151 1152 tty->print("Moving string char arrays to shared space at " PTR_FORMAT " ... ", 1153 _ro_space->top()); 1154 gch->object_iterate(&mark_string_values); 1155 gch->object_iterate(&move_ro); 1156 tty->print_cr("done. "); 1157 1158 // Phase 4: move read-write objects to the read-write space, except 1159 // Strings. 1160 1161 if (SharedOptimizeColdStart) { 1162 tty->print("Moving pre-ordered read-write objects to shared space at " PTR_FORMAT " ... ", 1163 _rw_space->top()); 1164 for (int i = 0; i < _class_promote_order->length(); i++) { 1165 oop obj = _class_promote_order->at(i); 1166 mark_and_move_ordered_rw.do_object(obj); 1167 } 1168 tty->print_cr("done. "); 1169 } 1170 tty->print("Moving read-write objects to shared space at " PTR_FORMAT " ... ", 1171 _rw_space->top()); 1172 Universe::oops_do(&mark_all, true); 1173 SystemDictionary::oops_do(&mark_all); 1174 oop tmp = Universe::arithmetic_exception_instance(); 1175 mark_object(java_lang_Throwable::message(tmp)); 1176 gch->object_iterate(&mark_rw); 1177 gch->object_iterate(&move_rw); 1178 tty->print_cr("done. "); 1179 1180 // Phase 5: move String objects to the read-write space. 1181 1182 tty->print("Moving String objects to shared space at " PTR_FORMAT " ... ", 1183 _rw_space->top()); 1184 StringTable::oops_do(&mark_all); 1185 gch->object_iterate(&mark_strings); 1186 gch->object_iterate(&move_rw); 1187 tty->print_cr("done. "); 1188 tty->print_cr("Read-write space ends at " PTR_FORMAT ", %d bytes.", 1189 _rw_space->top(), _rw_space->used()); 1190 1191 #ifdef DEBUG 1192 // Check: scan for objects which were not moved. 1193 1194 CheckRemainingObjects check_objects; 1195 gch->object_iterate(&check_objects); 1196 check_objects.status(); 1197 #endif 1198 1199 // Resolve forwarding in objects and saved C++ structures 1200 tty->print("Updating references to shared objects ... "); 1201 ResolveForwardingClosure resolve; 1202 Universe::oops_do(&resolve); 1203 SystemDictionary::oops_do(&resolve); 1204 StringTable::oops_do(&resolve); 1205 1206 // Fix (forward) all of the references in these shared objects (which 1207 // are required to point ONLY to objects in the shared spaces). 1208 // Also, create a list of all objects which might later contain a 1209 // reference to a younger generation object. 1210 1211 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); 1212 PatchOopsClosure patch(THREAD); 1213 gen->ro_space()->object_iterate(&patch); 1214 gen->rw_space()->object_iterate(&patch); 1215 1216 // Previously method sorting was done concurrently with forwarding 1217 // pointer resolution in the shared spaces. This imposed an ordering 1218 // restriction in that methods were required to be promoted/patched 1219 // before their holder classes. (Because constant pool pointers in 1220 // methodKlasses are required to be resolved before their holder class 1221 // is visited for sorting, otherwise methods are sorted by incorrect, 1222 // pre-forwarding addresses.) 1223 // 1224 // Now, we reorder methods as a separate step after ALL forwarding 1225 // pointer resolution, so that methods can be promoted in any order 1226 // with respect to their holder classes. 1227 1228 SortMethodsClosure sort; 1229 gen->ro_space()->object_iterate(&sort); 1230 gen->rw_space()->object_iterate(&sort); 1231 1232 ReinitializeTables reinit_tables(THREAD); 1233 gen->ro_space()->object_iterate(&reinit_tables); 1234 gen->rw_space()->object_iterate(&reinit_tables); 1235 tty->print_cr("done. "); 1236 tty->cr(); 1237 1238 // Reorder the system dictionary. (Moving the symbols opps affects 1239 // how the hash table indices are calculated.) 1240 1241 SystemDictionary::reorder_dictionary(); 1242 1243 // Empty the non-shared heap (because most of the objects were 1244 // copied out, and the remainder cannot be considered valid oops). 1245 1246 ClearSpaceClosure csc; 1247 for (int i = 0; i < gch->n_gens(); ++i) { 1248 gch->get_gen(i)->space_iterate(&csc); 1249 } 1250 csc.do_space(gen->the_space()); 1251 NOT_PRODUCT(SystemDictionary::verify();) 1252 1253 // Copy the String table, the symbol table, and the system 1254 // dictionary to the shared space in usable form. Copy the hastable 1255 // buckets first [read-write], then copy the linked lists of entries 1256 // [read-only]. 1257 1258 SymbolTable::reverse(extra_symbols); 1259 NOT_PRODUCT(SymbolTable::verify()); 1260 SymbolTable::copy_buckets(&md_top, md_end); 1261 1262 StringTable::reverse(); 1263 NOT_PRODUCT(StringTable::verify()); 1264 StringTable::copy_buckets(&md_top, md_end); 1265 1266 SystemDictionary::reverse(); 1267 SystemDictionary::copy_buckets(&md_top, md_end); 1268 1269 ClassLoader::verify(); 1270 ClassLoader::copy_package_info_buckets(&md_top, md_end); 1271 ClassLoader::verify(); 1272 1273 SymbolTable::copy_table(&md_top, md_end); 1274 StringTable::copy_table(&md_top, md_end); 1275 SystemDictionary::copy_table(&md_top, md_end); 1276 ClassLoader::verify(); 1277 ClassLoader::copy_package_info_table(&md_top, md_end); 1278 ClassLoader::verify(); 1279 1280 // Print debug data. 1281 1282 if (PrintSharedSpaces) { 1283 const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " bytes allocated at " PTR_FORMAT "."; 1284 tty->print_cr(fmt, "ro", _ro_space->used(), _ro_space->capacity(), 1285 _ro_space->bottom()); 1286 tty->print_cr(fmt, "rw", _rw_space->used(), _rw_space->capacity(), 1287 _rw_space->bottom()); 1288 } 1289 1290 // Write the oop data to the output array. 1291 1292 WriteClosure wc(md_top, md_end); 1293 CompactingPermGenGen::serialize_oops(&wc); 1294 md_top = wc.get_top(); 1295 1296 // Update the vtable pointers in all of the Klass objects in the 1297 // heap. They should point to newly generated vtable. 1298 1299 PatchKlassVtables pkvt; 1300 _rw_space->object_iterate(&pkvt); 1301 pkvt.patch(vtbl_list, vtable); 1302 1303 #ifndef PRODUCT 1304 // Update the vtable pointers in all symbols, 1305 // but only in non-product builds where symbols DO have virtual methods. 1306 PatchSymbolVtables psvt(vtbl_list, vtable); 1307 SymbolTable::symbols_do(&psvt); 1308 #endif 1309 1310 char* saved_vtbl = (char*)malloc(vtbl_list_size * sizeof(void*)); 1311 memmove(saved_vtbl, vtbl_list, vtbl_list_size * sizeof(void*)); 1312 memset(vtbl_list, 0, vtbl_list_size * sizeof(void*)); 1313 1314 // Create and write the archive file that maps the shared spaces. 1315 1316 FileMapInfo* mapinfo = new FileMapInfo(); 1317 mapinfo->populate_header(gch->gen_policy()->max_alignment()); 1318 1319 // Pass 1 - update file offsets in header. 1320 mapinfo->write_header(); 1321 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); 1322 _ro_space->set_saved_mark(); 1323 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); 1324 _rw_space->set_saved_mark(); 1325 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), 1326 pointer_delta(md_top, _md_vs->low(), sizeof(char)), 1327 SharedMiscDataSize, 1328 false, false); 1329 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), 1330 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), 1331 SharedMiscCodeSize, 1332 true, true); 1333 1334 // Pass 2 - write data. 1335 mapinfo->open_for_write(); 1336 mapinfo->write_header(); 1337 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); 1338 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); 1339 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), 1340 pointer_delta(md_top, _md_vs->low(), sizeof(char)), 1341 SharedMiscDataSize, 1342 false, false); 1343 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), 1344 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), 1345 SharedMiscCodeSize, 1346 true, true); 1347 mapinfo->close(); 1348 1349 // Summarize heap. 1350 memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*)); 1351 print_contents(); 1352 } 1353 }; // class VM_PopulateDumpSharedSpace 1354 1355 1356 // Populate the shared spaces and dump to a file. 1357 1358 jint CompactingPermGenGen::dump_shared(GrowableArray<oop>* class_promote_order, TRAPS) { 1359 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1360 1361 // Calculate hash values for all of the (interned) strings to avoid 1362 // writes to shared pages in the future. 1363 1364 tty->print("Calculating hash values for String objects .. "); 1365 StringHashCodeClosure shcc(THREAD); 1366 StringTable::oops_do(&shcc); 1367 tty->print_cr("done. "); 1368 1369 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); 1370 VM_PopulateDumpSharedSpace op(class_promote_order, 1371 gen->ro_space(), gen->rw_space(), 1372 gen->md_space(), gen->mc_space()); 1373 VMThread::execute(&op); 1374 return JNI_OK; 1375 } 1376 1377 void* CompactingPermGenGen::find_matching_vtbl_ptr(void** vtbl_list, 1378 void* new_vtable_start, 1379 void* obj) { 1380 void* old_vtbl_ptr = *(void**)obj; 1381 for (int i = 0; i < vtbl_list_size; i++) { 1382 if (vtbl_list[i] == old_vtbl_ptr) { 1383 return (void**)new_vtable_start + i * num_virtuals; 1384 } 1385 } 1386 ShouldNotReachHere(); 1387 return NULL; 1388 } 1389 1390 1391 class LinkClassesClosure : public ObjectClosure { 1392 private: 1393 Thread* THREAD; 1394 1395 public: 1396 LinkClassesClosure(Thread* thread) : THREAD(thread) {} 1397 1398 void do_object(oop obj) { 1399 if (obj->is_klass()) { 1400 Klass* k = Klass::cast((klassOop) obj); 1401 if (k->oop_is_instance()) { 1402 instanceKlass* ik = (instanceKlass*) k; 1403 // Link the class to cause the bytecodes to be rewritten and the 1404 // cpcache to be created. 1405 if (ik->init_state() < instanceKlass::linked) { 1406 ik->link_class(THREAD); 1407 guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting"); 1408 } 1409 1410 // Create String objects from string initializer symbols. 1411 ik->constants()->resolve_string_constants(THREAD); 1412 guarantee(!HAS_PENDING_EXCEPTION, "exception resolving string constants"); 1413 } 1414 } 1415 } 1416 }; 1417 1418 1419 // Support for a simple checksum of the contents of the class list 1420 // file to prevent trivial tampering. The algorithm matches that in 1421 // the MakeClassList program used by the J2SE build process. 1422 #define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe)) 1423 static jlong 1424 jsum(jlong start, const char *buf, const int len) 1425 { 1426 jlong h = start; 1427 char *p = (char *)buf, *e = p + len; 1428 while (p < e) { 1429 char c = *p++; 1430 if (c <= ' ') { 1431 /* Skip spaces and control characters */ 1432 continue; 1433 } 1434 h = 31 * h + c; 1435 } 1436 return h; 1437 } 1438 1439 1440 1441 1442 1443 // Preload classes from a list, populate the shared spaces and dump to a 1444 // file. 1445 1446 void GenCollectedHeap::preload_and_dump(TRAPS) { 1447 TraceTime timer("Dump Shared Spaces", TraceStartupTime); 1448 ResourceMark rm; 1449 1450 // Preload classes to be shared. 1451 // Should use some os:: method rather than fopen() here. aB. 1452 // Construct the path to the class list (in jre/lib) 1453 // Walk up two directories from the location of the VM and 1454 // optionally tack on "lib" (depending on platform) 1455 char class_list_path[JVM_MAXPATHLEN]; 1456 os::jvm_path(class_list_path, sizeof(class_list_path)); 1457 for (int i = 0; i < 3; i++) { 1458 char *end = strrchr(class_list_path, *os::file_separator()); 1459 if (end != NULL) *end = '\0'; 1460 } 1461 int class_list_path_len = (int)strlen(class_list_path); 1462 if (class_list_path_len >= 3) { 1463 if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) { 1464 strcat(class_list_path, os::file_separator()); 1465 strcat(class_list_path, "lib"); 1466 } 1467 } 1468 strcat(class_list_path, os::file_separator()); 1469 strcat(class_list_path, "classlist"); 1470 1471 FILE* file = fopen(class_list_path, "r"); 1472 if (file != NULL) { 1473 jlong computed_jsum = JSUM_SEED; 1474 jlong file_jsum = 0; 1475 1476 char class_name[256]; 1477 int class_count = 0; 1478 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1479 gch->_preloading_shared_classes = true; 1480 GrowableArray<oop>* class_promote_order = new GrowableArray<oop>(); 1481 1482 // Preload (and intern) strings which will be used later. 1483 1484 StringTable::intern("main", THREAD); 1485 StringTable::intern("([Ljava/lang/String;)V", THREAD); 1486 StringTable::intern("Ljava/lang/Class;", THREAD); 1487 1488 StringTable::intern("I", THREAD); // Needed for StringBuffer persistence? 1489 StringTable::intern("Z", THREAD); // Needed for StringBuffer persistence? 1490 1491 // sun.io.Converters 1492 static const char obj_array_sig[] = "[[Ljava/lang/Object;"; 1493 SymbolTable::lookup(obj_array_sig, (int)strlen(obj_array_sig), THREAD); 1494 1495 // java.util.HashMap 1496 static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;"; 1497 SymbolTable::lookup(map_entry_array_sig, (int)strlen(map_entry_array_sig), 1498 THREAD); 1499 1500 tty->print("Loading classes to share ... "); 1501 while ((fgets(class_name, sizeof class_name, file)) != NULL) { 1502 if (*class_name == '#') { 1503 jint fsh, fsl; 1504 if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) { 1505 file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff); 1506 } 1507 1508 continue; 1509 } 1510 // Remove trailing newline 1511 size_t name_len = strlen(class_name); 1512 class_name[name_len-1] = '\0'; 1513 1514 computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1); 1515 1516 // Got a class name - load it. 1517 TempNewSymbol class_name_symbol = SymbolTable::new_symbol(class_name, THREAD); 1518 guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol."); 1519 klassOop klass = SystemDictionary::resolve_or_null(class_name_symbol, 1520 THREAD); 1521 guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class."); 1522 if (klass != NULL) { 1523 if (PrintSharedSpaces) { 1524 tty->print_cr("Shared spaces preloaded: %s", class_name); 1525 } 1526 1527 1528 instanceKlass* ik = instanceKlass::cast(klass); 1529 1530 // Should be class load order as per -XX:+TraceClassLoadingPreorder 1531 class_promote_order->append(ik->as_klassOop()); 1532 1533 // Link the class to cause the bytecodes to be rewritten and the 1534 // cpcache to be created. The linking is done as soon as classes 1535 // are loaded in order that the related data structures (klass, 1536 // cpCache, Sting constants) are located together. 1537 1538 if (ik->init_state() < instanceKlass::linked) { 1539 ik->link_class(THREAD); 1540 guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting"); 1541 } 1542 1543 // Create String objects from string initializer symbols. 1544 1545 ik->constants()->resolve_string_constants(THREAD); 1546 1547 class_count++; 1548 } else { 1549 if (PrintSharedSpaces) { 1550 tty->cr(); 1551 tty->print_cr(" Preload failed: %s", class_name); 1552 } 1553 } 1554 file_jsum = 0; // Checksum must be on last line of file 1555 } 1556 if (computed_jsum != file_jsum) { 1557 tty->cr(); 1558 tty->print_cr("Preload failed: checksum of class list was incorrect."); 1559 exit(1); 1560 } 1561 1562 tty->print_cr("done. "); 1563 1564 if (PrintSharedSpaces) { 1565 tty->print_cr("Shared spaces: preloaded %d classes", class_count); 1566 } 1567 1568 // Rewrite and unlink classes. 1569 tty->print("Rewriting and unlinking classes ... "); 1570 // Make heap parsable 1571 ensure_parsability(false); // arg is actually don't care 1572 1573 // Link any classes which got missed. (It's not quite clear why 1574 // they got missed.) This iteration would be unsafe if we weren't 1575 // single-threaded at this point; however we can't do it on the VM 1576 // thread because it requires object allocation. 1577 LinkClassesClosure lcc(Thread::current()); 1578 object_iterate(&lcc); 1579 ensure_parsability(false); // arg is actually don't care 1580 tty->print_cr("done. "); 1581 1582 // Create and dump the shared spaces. 1583 jint err = CompactingPermGenGen::dump_shared(class_promote_order, THREAD); 1584 if (err != JNI_OK) { 1585 fatal("Dumping shared spaces failed."); 1586 } 1587 1588 } else { 1589 char errmsg[JVM_MAXPATHLEN]; 1590 os::lasterror(errmsg, JVM_MAXPATHLEN); 1591 tty->print_cr("Loading classlist failed: %s", errmsg); 1592 exit(1); 1593 } 1594 1595 // Since various initialization steps have been undone by this process, 1596 // it is not reasonable to continue running a java process. 1597 exit(0); 1598 }