< prev index next >

src/hotspot/share/memory/metaspaceShared.cpp

Print this page




  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/signature.hpp"
  61 #include "runtime/timerTrace.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "runtime/vm_operations.hpp"
  64 #include "utilities/align.hpp"
  65 #include "utilities/bitMap.hpp"
  66 #include "utilities/defaultStream.hpp"
  67 #include "utilities/hashtable.inline.hpp"
  68 #if INCLUDE_G1GC
  69 #include "gc/g1/g1CollectedHeap.hpp"
  70 #endif
  71 
  72 ReservedSpace MetaspaceShared::_shared_rs;
  73 VirtualSpace MetaspaceShared::_shared_vs;
  74 MetaspaceSharedStats MetaspaceShared::_stats;
  75 bool MetaspaceShared::_has_error_classes;
  76 bool MetaspaceShared::_archive_loading_failed = false;
  77 bool MetaspaceShared::_remapped_readwrite = false;
  78 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
  79 bool MetaspaceShared::_archive_heap_region_fixed = false;
  80 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  81 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  82 size_t MetaspaceShared::_core_spaces_size = 0;
  83 
  84 // The CDS archive is divided into the following regions:
  85 //     mc  - misc code (the method entry trampolines)
  86 //     rw  - read-write metadata
  87 //     ro  - read-only metadata and read-only tables
  88 //     md  - misc data (the c++ vtables)
  89 //     od  - optional data (original class files)
  90 //
  91 //     s0  - shared strings(closed archive heap space) #0
  92 //     s1  - shared strings(closed archive heap space) #1 (may be empty)
  93 //     oa0 - open archive heap space #0
  94 //     oa1 - open archive heap space #1 (may be empty)
  95 //
  96 // The mc, rw, ro, md and od regions are linearly allocated, starting from
  97 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  98 // are page-aligned, and there's no gap between any consecutive regions.
  99 //
 100 // These 5 regions are populated in the following steps:
 101 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 102 //     temporarily allocated outside of the shared regions. Only the method entry
 103 //     trampolines are written into the mc region.
 104 // [2] ArchiveCompactor copies RW metadata into the rw region.
 105 // [3] ArchiveCompactor copies RO metadata into the ro region.
 106 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 107 //     are copied into the ro region as read-only tables.
 108 // [5] C++ vtables are copied into the md region.
 109 // [6] Original class files are copied into the od region.
 110 //
 111 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects.
 112 // Their layout is independent of the other 5 regions.
 113 
 114 class DumpRegion {
 115 private:
 116   const char* _name;
 117   char* _base;
 118   char* _top;
 119   char* _end;
 120   bool _is_packed;
 121 
 122   char* expand_top_to(char* newtop) {
 123     assert(is_allocatable(), "must be initialized and not packed");
 124     assert(newtop >= _top, "must not grow backwards");
 125     if (newtop > _end) {
 126       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 127       ShouldNotReachHere();
 128     }
 129     MetaspaceShared::commit_shared_space_to(newtop);
 130     _top = newtop;
 131     return _top;


 437     if (_cds_i2i_entry_code_buffers == NULL) {
 438       _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 439       _cds_i2i_entry_code_buffers_size = total_size;
 440     }
 441   } else if (UseSharedSpaces) {
 442     assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
 443   } else {
 444     return NULL;
 445   }
 446 
 447   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
 448   return _cds_i2i_entry_code_buffers;
 449 }
 450 
 451 // CDS code for dumping shared archive.
 452 
 453 // Global object for holding classes that have been loaded.  Since this
 454 // is run at a safepoint just before exit, this is the entire set of classes.
 455 static GrowableArray<Klass*>* _global_klass_objects;
 456 




 457 static void collect_array_classes(Klass* k) {
 458   _global_klass_objects->append_if_missing(k);
 459   if (k->is_array_klass()) {
 460     // Add in the array classes too
 461     ArrayKlass* ak = ArrayKlass::cast(k);
 462     Klass* h = ak->higher_dimension();
 463     if (h != NULL) {
 464       h->array_klasses_do(collect_array_classes);
 465     }
 466   }
 467 }
 468 
 469 class CollectClassesClosure : public KlassClosure {
 470   void do_klass(Klass* k) {
 471     if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
 472       if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) {
 473         // Mark any class with signers and don't add to the _global_klass_objects
 474         k->set_has_signer_and_not_archived();
 475       } else {
 476         _global_klass_objects->append_if_missing(k);


 495       // on their array classes.
 496       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 497       k->remove_unshareable_info();
 498     }
 499   }
 500 }
 501 
 502 static void remove_java_mirror_in_classes() {
 503   for (int i = 0; i < _global_klass_objects->length(); i++) {
 504     Klass* k = _global_klass_objects->at(i);
 505     if (!k->is_objArray_klass()) {
 506       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 507       // on their array classes.
 508       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 509       k->remove_java_mirror();
 510     }
 511   }
 512 }
 513 
 514 static void clear_basic_type_mirrors() {
 515   assert(!MetaspaceShared::is_heap_object_archiving_allowed(), "Sanity");
 516   Universe::set_int_mirror(NULL);
 517   Universe::set_float_mirror(NULL);
 518   Universe::set_double_mirror(NULL);
 519   Universe::set_byte_mirror(NULL);
 520   Universe::set_bool_mirror(NULL);
 521   Universe::set_char_mirror(NULL);
 522   Universe::set_long_mirror(NULL);
 523   Universe::set_short_mirror(NULL);
 524   Universe::set_void_mirror(NULL);
 525 }
 526 
 527 static void rewrite_nofast_bytecode(Method* method) {
 528   BytecodeStream bcs(method);
 529   while (!bcs.is_last_bytecode()) {
 530     Bytecodes::Code opcode = bcs.next();
 531     switch (opcode) {
 532     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 533     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 534     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 535     case Bytecodes::_iload: {


 833     _dump_region = r;
 834   }
 835 
 836   void do_ptr(void** p) {
 837     _dump_region->append_intptr_t((intptr_t)*p);
 838   }
 839 
 840   void do_u4(u4* p) {
 841     void* ptr = (void*)(uintx(*p));
 842     do_ptr(&ptr);
 843   }
 844 
 845   void do_tag(int tag) {
 846     _dump_region->append_intptr_t((intptr_t)tag);
 847   }
 848 
 849   void do_oop(oop* o) {
 850     if (*o == NULL) {
 851       _dump_region->append_intptr_t(0);
 852     } else {
 853       assert(MetaspaceShared::is_heap_object_archiving_allowed(),
 854              "Archiving heap object is not allowed");
 855       _dump_region->append_intptr_t(
 856         (intptr_t)CompressedOops::encode_not_null(*o));
 857     }
 858   }
 859 
 860   void do_region(u_char* start, size_t size) {
 861     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
 862     assert(size % sizeof(intptr_t) == 0, "bad size");
 863     do_tag((int)size);
 864     while (size > 0) {
 865       _dump_region->append_intptr_t(*(intptr_t*)start);
 866       start += sizeof(intptr_t);
 867       size -= sizeof(intptr_t);
 868     }
 869   }
 870 
 871   bool reading() const { return false; }
 872 };
 873 


1297 
1298 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1299                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1300   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1301 }
1302 
1303 void VM_PopulateDumpSharedSpace::dump_symbols() {
1304   tty->print_cr("Dumping symbol table ...");
1305 
1306   NOT_PRODUCT(SymbolTable::verify());
1307   SymbolTable::write_to_archive();
1308 }
1309 
1310 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1311   char* oldtop = _ro_region.top();
1312   // Reorder the system dictionary. Moving the symbols affects
1313   // how the hash table indices are calculated.
1314   SystemDictionary::reorder_dictionary_for_sharing();
1315 
1316   tty->print("Removing java_mirror ... ");
1317   if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1318     clear_basic_type_mirrors();
1319   }
1320   remove_java_mirror_in_classes();
1321   tty->print_cr("done. ");
1322   NOT_PRODUCT(SystemDictionary::verify();)
1323 
1324   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1325   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1326   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1327 
1328   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1329   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1330   SystemDictionary::copy_table(table_top, _ro_region.top());
1331 
1332   // Write the archived object sub-graph infos. For each klass with sub-graphs,
1333   // the info includes the static fields (sub-graph entry points) and Klasses
1334   // of objects included in the sub-graph.
1335   HeapShared::write_archived_subgraph_infos();
1336 
1337   // Write the other data to the output array.


1773       BytecodeVerificationLocal = BytecodeVerificationRemote;
1774     }
1775     ik->link_class(THREAD);
1776     if (HAS_PENDING_EXCEPTION) {
1777       ResourceMark rm;
1778       tty->print_cr("Preload Warning: Verification failed for %s",
1779                     ik->external_name());
1780       CLEAR_PENDING_EXCEPTION;
1781       ik->set_in_error_state();
1782       _has_error_classes = true;
1783     }
1784     BytecodeVerificationLocal = saved;
1785     return true;
1786   } else {
1787     return false;
1788   }
1789 }
1790 
1791 #if INCLUDE_CDS_JAVA_HEAP
1792 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1793   if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1794     if (log_is_enabled(Info, cds)) {
1795       log_info(cds)(
1796         "Archived java heap is not supported as UseG1GC, "
1797         "UseCompressedOops and UseCompressedClassPointers are required."
1798         "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1799         BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1800         BOOL_TO_STR(UseCompressedClassPointers));
1801     }
1802     return;
1803   }
1804 
1805   {
1806     NoSafepointVerifier nsv;
1807 
1808     // Cache for recording where the archived objects are copied to
1809     MetaspaceShared::create_archive_object_cache();
1810 
1811     tty->print_cr("Dumping objects to closed archive heap region ...");
1812     NOT_PRODUCT(StringTable::verify());
1813     // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1814     _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
1815     MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions);
1816 
1817     tty->print_cr("Dumping objects to open archive heap region ...");
1818     _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1819     MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1820 
1821     MetaspaceShared::destroy_archive_object_cache();
1822   }
1823 
1824   G1HeapVerifier::verify_archive_regions();
1825 }
1826 
1827 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
1828   if (MetaspaceShared::is_heap_object_archiving_allowed()) {
1829     _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1830     dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
1831 
1832     _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1833     dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
1834   }
1835 }
1836 
1837 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1838                                                            GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
1839   for (int i=0; i<regions->length(); i++) {
1840     ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
1841     size_t size_in_bits = oopmap.size();
1842     size_t size_in_bytes = oopmap.size_in_bytes();
1843     uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t));
1844     oopmap.write_to(buffer, size_in_bytes);
1845     log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
1846                   INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
1847                   p2i(buffer), size_in_bytes,
1848                   p2i(regions->at(i).start()), regions->at(i).byte_size());
1849 
1850     ArchiveHeapOopmapInfo info;
1851     info._oopmap = (address)buffer;
1852     info._oopmap_size_in_bits = size_in_bits;
1853     oopmaps->append(info);
1854   }
1855 }
1856 
1857 void MetaspaceShared::dump_closed_archive_heap_objects(
1858                                     GrowableArray<MemRegion> * closed_archive) {
1859   assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects");
1860 
1861   Thread* THREAD = Thread::current();
1862   G1CollectedHeap::heap()->begin_archive_alloc_range();
1863 
1864   // Archive interned string objects
1865   StringTable::write_to_archive();
1866 
1867   G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
1868                                                    os::vm_allocation_granularity());
1869 }
1870 
1871 void MetaspaceShared::dump_open_archive_heap_objects(
1872                                     GrowableArray<MemRegion> * open_archive) {
1873   assert(UseG1GC, "Only support G1 GC");
1874   assert(UseCompressedOops && UseCompressedClassPointers,
1875          "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1876 
1877   Thread* THREAD = Thread::current();
1878   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1879 
1880   java_lang_Class::archive_basic_type_mirrors(THREAD);
1881 
1882   MetaspaceShared::archive_klass_objects(THREAD);
1883 
1884   HeapShared::archive_static_fields(THREAD);
1885 
1886   G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1887                                                    os::vm_allocation_granularity());
1888 }
1889 
1890 unsigned MetaspaceShared::obj_hash(oop const& p) {
1891   assert(!p->mark()->has_bias_pattern(),
1892          "this object should never have been locked");  // so identity_hash won't safepoin
1893   unsigned hash = (unsigned)p->identity_hash();
1894   return hash;
1895 }
1896 
1897 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1898 oop MetaspaceShared::find_archived_heap_object(oop obj) {
1899   assert(DumpSharedSpaces, "dump-time only");
1900   ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1901   oop* p = cache->get(obj);
1902   if (p != NULL) {
1903     return *p;
1904   } else {
1905     return NULL;
1906   }
1907 }
1908 
1909 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1910   assert(DumpSharedSpaces, "dump-time only");
1911 
1912   oop ao = find_archived_heap_object(obj);
1913   if (ao != NULL) {
1914     // already archived
1915     return ao;
1916   }
1917 
1918   int len = obj->size();
1919   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1920     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
1921                          p2i(obj), (size_t)obj->size());
1922     return NULL;
1923   }
1924 
1925   int hash = obj->identity_hash();
1926   oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1927   if (archived_oop != NULL) {
1928     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1929     relocate_klass_ptr(archived_oop);
1930     ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1931     cache->put(obj, archived_oop);
1932     log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
1933                          p2i(obj), p2i(archived_oop));
1934   } else {
1935     log_error(cds, heap)(
1936       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
1937       p2i(obj));
1938     vm_exit(1);
1939   }
1940   return archived_oop;
1941 }
1942 
1943 oop MetaspaceShared::materialize_archived_object(narrowOop v) {
1944   assert(archive_heap_region_fixed(),
1945          "must be called after archive heap regions are fixed");
1946   if (!CompressedOops::is_null(v)) {
1947     oop obj = HeapShared::decode_from_archive(v);
1948     return G1CollectedHeap::heap()->materialize_archived_object(obj);
1949   }
1950   return NULL;
1951 }
1952 
1953 void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
1954   int i;
1955   for (i = 0; i < _global_klass_objects->length(); i++) {
1956     Klass* k = _global_klass_objects->at(i);
1957 
1958     // archive mirror object
1959     java_lang_Class::archive_mirror(k, CHECK);
1960 
1961     // archive the resolved_referenes array
1962     if (k->is_instance_klass()) {
1963       InstanceKlass* ik = InstanceKlass::cast(k);
1964       ik->constants()->archive_resolved_references(THREAD);
1965     }
1966   }
1967 }
1968 
1969 void MetaspaceShared::fixup_mapped_heap_regions() {
1970   FileMapInfo *mapinfo = FileMapInfo::current_info();
1971   mapinfo->fixup_mapped_heap_regions();
1972   set_archive_heap_region_fixed();
1973 }
1974 #endif // INCLUDE_CDS_JAVA_HEAP
1975 
1976 // Closure for serializing initialization data in from a data area
1977 // (ptr_array) read from the shared file.
1978 
1979 class ReadClosure : public SerializeClosure {
1980 private:
1981   intptr_t** _ptr_array;
1982 
1983   inline intptr_t nextPtr() {
1984     return *(*_ptr_array)++;
1985   }
1986 
1987 public:
1988   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1989 
1990   void do_ptr(void** p) {
1991     assert(*p == NULL, "initializing previous initialized pointer.");
1992     intptr_t obj = nextPtr();
1993     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1994            "hit tag while initializing ptrs.");
1995     *p = (void*)obj;
1996   }
1997 
1998   void do_u4(u4* p) {
1999     intptr_t obj = nextPtr();
2000     *p = (u4)(uintx(obj));
2001   }
2002 
2003   void do_tag(int tag) {
2004     int old_tag;
2005     old_tag = (int)(intptr_t)nextPtr();
2006     // do_int(&old_tag);
2007     assert(tag == old_tag, "old tag doesn't match");
2008     FileMapInfo::assert_mark(tag == old_tag);
2009   }
2010 
2011   void do_oop(oop *p) {
2012     narrowOop o = (narrowOop)nextPtr();
2013     if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) {
2014       p = NULL;
2015     } else {
2016       assert(MetaspaceShared::is_heap_object_archiving_allowed(),
2017              "Archived heap object is not allowed");
2018       assert(MetaspaceShared::open_archive_heap_region_mapped(),
2019              "Open archive heap region is not mapped");
2020       *p = HeapShared::decode_from_archive(o);
2021     }
2022   }
2023 
2024   void do_region(u_char* start, size_t size) {
2025     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
2026     assert(size % sizeof(intptr_t) == 0, "bad size");
2027     do_tag((int)size);
2028     while (size > 0) {
2029       *(intptr_t*)start = nextPtr();
2030       start += sizeof(intptr_t);
2031       size -= sizeof(intptr_t);
2032     }
2033   }
2034 
2035   bool reading() const { return true; }
2036 };
2037 
2038 // Return true if given address is in the misc data region




  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/signature.hpp"
  61 #include "runtime/timerTrace.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "runtime/vm_operations.hpp"
  64 #include "utilities/align.hpp"
  65 #include "utilities/bitMap.hpp"
  66 #include "utilities/defaultStream.hpp"
  67 #include "utilities/hashtable.inline.hpp"
  68 #if INCLUDE_G1GC
  69 #include "gc/g1/g1CollectedHeap.hpp"
  70 #endif
  71 
  72 ReservedSpace MetaspaceShared::_shared_rs;
  73 VirtualSpace MetaspaceShared::_shared_vs;
  74 MetaspaceSharedStats MetaspaceShared::_stats;
  75 bool MetaspaceShared::_has_error_classes;
  76 bool MetaspaceShared::_archive_loading_failed = false;
  77 bool MetaspaceShared::_remapped_readwrite = false;


  78 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  79 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  80 size_t MetaspaceShared::_core_spaces_size = 0;
  81 
  82 // The CDS archive is divided into the following regions:
  83 //     mc  - misc code (the method entry trampolines)
  84 //     rw  - read-write metadata
  85 //     ro  - read-only metadata and read-only tables
  86 //     md  - misc data (the c++ vtables)
  87 //     od  - optional data (original class files)
  88 //
  89 //     s0  - shared strings(closed archive heap space) #0
  90 //     s1  - shared strings(closed archive heap space) #1 (may be empty)
  91 //     oa0 - open archive heap space #0
  92 //     oa1 - open archive heap space #1 (may be empty)
  93 //
  94 // The mc, rw, ro, md and od regions are linearly allocated, starting from
  95 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
  96 // are page-aligned, and there's no gap between any consecutive regions.
  97 //
  98 // These 5 regions are populated in the following steps:
  99 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 100 //     temporarily allocated outside of the shared regions. Only the method entry
 101 //     trampolines are written into the mc region.
 102 // [2] ArchiveCompactor copies RW metadata into the rw region.
 103 // [3] ArchiveCompactor copies RO metadata into the ro region.
 104 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 105 //     are copied into the ro region as read-only tables.
 106 // [5] C++ vtables are copied into the md region.
 107 // [6] Original class files are copied into the od region.
 108 //
 109 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
 110 // Their layout is independent of the other 5 regions.
 111 
 112 class DumpRegion {
 113 private:
 114   const char* _name;
 115   char* _base;
 116   char* _top;
 117   char* _end;
 118   bool _is_packed;
 119 
 120   char* expand_top_to(char* newtop) {
 121     assert(is_allocatable(), "must be initialized and not packed");
 122     assert(newtop >= _top, "must not grow backwards");
 123     if (newtop > _end) {
 124       MetaspaceShared::report_out_of_space(_name, newtop - _top);
 125       ShouldNotReachHere();
 126     }
 127     MetaspaceShared::commit_shared_space_to(newtop);
 128     _top = newtop;
 129     return _top;


 435     if (_cds_i2i_entry_code_buffers == NULL) {
 436       _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
 437       _cds_i2i_entry_code_buffers_size = total_size;
 438     }
 439   } else if (UseSharedSpaces) {
 440     assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
 441   } else {
 442     return NULL;
 443   }
 444 
 445   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
 446   return _cds_i2i_entry_code_buffers;
 447 }
 448 
 449 // CDS code for dumping shared archive.
 450 
 451 // Global object for holding classes that have been loaded.  Since this
 452 // is run at a safepoint just before exit, this is the entire set of classes.
 453 static GrowableArray<Klass*>* _global_klass_objects;
 454 
 455 GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
 456   return _global_klass_objects;
 457 }
 458 
 459 static void collect_array_classes(Klass* k) {
 460   _global_klass_objects->append_if_missing(k);
 461   if (k->is_array_klass()) {
 462     // Add in the array classes too
 463     ArrayKlass* ak = ArrayKlass::cast(k);
 464     Klass* h = ak->higher_dimension();
 465     if (h != NULL) {
 466       h->array_klasses_do(collect_array_classes);
 467     }
 468   }
 469 }
 470 
 471 class CollectClassesClosure : public KlassClosure {
 472   void do_klass(Klass* k) {
 473     if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
 474       if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) {
 475         // Mark any class with signers and don't add to the _global_klass_objects
 476         k->set_has_signer_and_not_archived();
 477       } else {
 478         _global_klass_objects->append_if_missing(k);


 497       // on their array classes.
 498       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 499       k->remove_unshareable_info();
 500     }
 501   }
 502 }
 503 
 504 static void remove_java_mirror_in_classes() {
 505   for (int i = 0; i < _global_klass_objects->length(); i++) {
 506     Klass* k = _global_klass_objects->at(i);
 507     if (!k->is_objArray_klass()) {
 508       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 509       // on their array classes.
 510       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
 511       k->remove_java_mirror();
 512     }
 513   }
 514 }
 515 
 516 static void clear_basic_type_mirrors() {
 517   assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity");
 518   Universe::set_int_mirror(NULL);
 519   Universe::set_float_mirror(NULL);
 520   Universe::set_double_mirror(NULL);
 521   Universe::set_byte_mirror(NULL);
 522   Universe::set_bool_mirror(NULL);
 523   Universe::set_char_mirror(NULL);
 524   Universe::set_long_mirror(NULL);
 525   Universe::set_short_mirror(NULL);
 526   Universe::set_void_mirror(NULL);
 527 }
 528 
 529 static void rewrite_nofast_bytecode(Method* method) {
 530   BytecodeStream bcs(method);
 531   while (!bcs.is_last_bytecode()) {
 532     Bytecodes::Code opcode = bcs.next();
 533     switch (opcode) {
 534     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
 535     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
 536     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
 537     case Bytecodes::_iload: {


 835     _dump_region = r;
 836   }
 837 
 838   void do_ptr(void** p) {
 839     _dump_region->append_intptr_t((intptr_t)*p);
 840   }
 841 
 842   void do_u4(u4* p) {
 843     void* ptr = (void*)(uintx(*p));
 844     do_ptr(&ptr);
 845   }
 846 
 847   void do_tag(int tag) {
 848     _dump_region->append_intptr_t((intptr_t)tag);
 849   }
 850 
 851   void do_oop(oop* o) {
 852     if (*o == NULL) {
 853       _dump_region->append_intptr_t(0);
 854     } else {
 855       assert(HeapShared::is_heap_object_archiving_allowed(),
 856              "Archiving heap object is not allowed");
 857       _dump_region->append_intptr_t(
 858         (intptr_t)CompressedOops::encode_not_null(*o));
 859     }
 860   }
 861 
 862   void do_region(u_char* start, size_t size) {
 863     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
 864     assert(size % sizeof(intptr_t) == 0, "bad size");
 865     do_tag((int)size);
 866     while (size > 0) {
 867       _dump_region->append_intptr_t(*(intptr_t*)start);
 868       start += sizeof(intptr_t);
 869       size -= sizeof(intptr_t);
 870     }
 871   }
 872 
 873   bool reading() const { return false; }
 874 };
 875 


1299 
1300 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1301                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) {
1302   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1303 }
1304 
1305 void VM_PopulateDumpSharedSpace::dump_symbols() {
1306   tty->print_cr("Dumping symbol table ...");
1307 
1308   NOT_PRODUCT(SymbolTable::verify());
1309   SymbolTable::write_to_archive();
1310 }
1311 
1312 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1313   char* oldtop = _ro_region.top();
1314   // Reorder the system dictionary. Moving the symbols affects
1315   // how the hash table indices are calculated.
1316   SystemDictionary::reorder_dictionary_for_sharing();
1317 
1318   tty->print("Removing java_mirror ... ");
1319   if (!HeapShared::is_heap_object_archiving_allowed()) {
1320     clear_basic_type_mirrors();
1321   }
1322   remove_java_mirror_in_classes();
1323   tty->print_cr("done. ");
1324   NOT_PRODUCT(SystemDictionary::verify();)
1325 
1326   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1327   char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1328   SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1329 
1330   size_t table_bytes = SystemDictionary::count_bytes_for_table();
1331   char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1332   SystemDictionary::copy_table(table_top, _ro_region.top());
1333 
1334   // Write the archived object sub-graph infos. For each klass with sub-graphs,
1335   // the info includes the static fields (sub-graph entry points) and Klasses
1336   // of objects included in the sub-graph.
1337   HeapShared::write_archived_subgraph_infos();
1338 
1339   // Write the other data to the output array.


1775       BytecodeVerificationLocal = BytecodeVerificationRemote;
1776     }
1777     ik->link_class(THREAD);
1778     if (HAS_PENDING_EXCEPTION) {
1779       ResourceMark rm;
1780       tty->print_cr("Preload Warning: Verification failed for %s",
1781                     ik->external_name());
1782       CLEAR_PENDING_EXCEPTION;
1783       ik->set_in_error_state();
1784       _has_error_classes = true;
1785     }
1786     BytecodeVerificationLocal = saved;
1787     return true;
1788   } else {
1789     return false;
1790   }
1791 }
1792 
1793 #if INCLUDE_CDS_JAVA_HEAP
1794 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1795   // The closed and open archive heap space has maximum two regions.
1796   // See FileMapInfo::write_archive_heap_regions() for details.



















1797   _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);



1798   _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1799   HeapShared::archive_java_heap_objects(_closed_archive_heap_regions,
1800                                         _open_archive_heap_regions);




1801 }
1802 
1803 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
1804   if (HeapShared::is_heap_object_archiving_allowed()) {
1805     _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1806     dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
1807 
1808     _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1809     dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
1810   }
1811 }
1812 
1813 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1814                                                            GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
1815   for (int i=0; i<regions->length(); i++) {
1816     ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
1817     size_t size_in_bits = oopmap.size();
1818     size_t size_in_bytes = oopmap.size_in_bytes();
1819     uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t));
1820     oopmap.write_to(buffer, size_in_bytes);
1821     log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
1822                   INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
1823                   p2i(buffer), size_in_bytes,
1824                   p2i(regions->at(i).start()), regions->at(i).byte_size());
1825 
1826     ArchiveHeapOopmapInfo info;
1827     info._oopmap = (address)buffer;
1828     info._oopmap_size_in_bits = size_in_bits;
1829     oopmaps->append(info);
1830   }
1831 }






















































































































1832 #endif // INCLUDE_CDS_JAVA_HEAP
1833 
1834 // Closure for serializing initialization data in from a data area
1835 // (ptr_array) read from the shared file.
1836 
1837 class ReadClosure : public SerializeClosure {
1838 private:
1839   intptr_t** _ptr_array;
1840 
1841   inline intptr_t nextPtr() {
1842     return *(*_ptr_array)++;
1843   }
1844 
1845 public:
1846   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1847 
1848   void do_ptr(void** p) {
1849     assert(*p == NULL, "initializing previous initialized pointer.");
1850     intptr_t obj = nextPtr();
1851     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1852            "hit tag while initializing ptrs.");
1853     *p = (void*)obj;
1854   }
1855 
1856   void do_u4(u4* p) {
1857     intptr_t obj = nextPtr();
1858     *p = (u4)(uintx(obj));
1859   }
1860 
1861   void do_tag(int tag) {
1862     int old_tag;
1863     old_tag = (int)(intptr_t)nextPtr();
1864     // do_int(&old_tag);
1865     assert(tag == old_tag, "old tag doesn't match");
1866     FileMapInfo::assert_mark(tag == old_tag);
1867   }
1868 
1869   void do_oop(oop *p) {
1870     narrowOop o = (narrowOop)nextPtr();
1871     if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
1872       p = NULL;
1873     } else {
1874       assert(HeapShared::is_heap_object_archiving_allowed(),
1875              "Archived heap object is not allowed");
1876       assert(HeapShared::open_archive_heap_region_mapped(),
1877              "Open archive heap region is not mapped");
1878       *p = HeapShared::decode_from_archive(o);
1879     }
1880   }
1881 
1882   void do_region(u_char* start, size_t size) {
1883     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1884     assert(size % sizeof(intptr_t) == 0, "bad size");
1885     do_tag((int)size);
1886     while (size > 0) {
1887       *(intptr_t*)start = nextPtr();
1888       start += sizeof(intptr_t);
1889       size -= sizeof(intptr_t);
1890     }
1891   }
1892 
1893   bool reading() const { return true; }
1894 };
1895 
1896 // Return true if given address is in the misc data region


< prev index next >