22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classListParser.hpp"
28 #include "classfile/classLoaderExt.hpp"
29 #include "classfile/dictionary.hpp"
30 #include "classfile/loaderConstraints.hpp"
31 #include "classfile/placeholders.hpp"
32 #include "classfile/symbolTable.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "classfile/systemDictionaryShared.hpp"
36 #include "code/codeCache.hpp"
37 #include "interpreter/bytecodeStream.hpp"
38 #include "interpreter/bytecodes.hpp"
39 #include "logging/log.hpp"
40 #include "logging/logMessage.hpp"
41 #include "memory/filemap.hpp"
42 #include "memory/heapShared.hpp"
43 #include "memory/metaspace.hpp"
44 #include "memory/metaspaceClosure.hpp"
45 #include "memory/metaspaceShared.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/instanceClassLoaderKlass.hpp"
49 #include "oops/instanceMirrorKlass.hpp"
50 #include "oops/instanceRefKlass.hpp"
51 #include "oops/objArrayKlass.hpp"
52 #include "oops/objArrayOop.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/typeArrayKlass.hpp"
55 #include "prims/jvmtiRedefineClasses.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/os.hpp"
58 #include "runtime/safepointVerifiers.hpp"
59 #include "runtime/signature.hpp"
60 #include "runtime/timerTrace.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "runtime/vmOperations.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/defaultStream.hpp"
65 #include "utilities/hashtable.inline.hpp"
66 #if INCLUDE_G1GC
67 #include "gc/g1/g1Allocator.inline.hpp"
68 #include "gc/g1/g1CollectedHeap.hpp"
69 #endif
70
71 ReservedSpace MetaspaceShared::_shared_rs;
72 VirtualSpace MetaspaceShared::_shared_vs;
73 MetaspaceSharedStats MetaspaceShared::_stats;
74 bool MetaspaceShared::_has_error_classes;
75 bool MetaspaceShared::_archive_loading_failed = false;
76 bool MetaspaceShared::_remapped_readwrite = false;
77 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
78 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
79 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
80 size_t MetaspaceShared::_core_spaces_size = 0;
81
82 // The CDS archive is divided into the following regions:
83 // mc - misc code (the method entry trampolines)
210
211 char* MetaspaceShared::read_only_space_top() {
212 return _ro_region.top();
213 }
214
215 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
216 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
217
218 // If using shared space, open the file that contains the shared space
219 // and map in the memory before initializing the rest of metaspace (so
220 // the addresses don't conflict)
221 address cds_address = NULL;
222 FileMapInfo* mapinfo = new FileMapInfo();
223
224 // Open the shared archive file, read and validate the header. If
225 // initialization fails, shared spaces [UseSharedSpaces] are
226 // disabled and the file is closed.
227 // Map in spaces now also
228 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
229 size_t cds_total = core_spaces_size();
230 cds_address = (address)mapinfo->header()->region_addr(0);
231 #ifdef _LP64
232 if (Metaspace::using_class_space()) {
233 char* cds_end = (char*)(cds_address + cds_total);
234 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
235 // If UseCompressedClassPointers is set then allocate the metaspace area
236 // above the heap and above the CDS area (if it exists).
237 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
238 // map_heap_regions() compares the current narrow oop and klass encodings
239 // with the archived ones, so it must be done after all encodings are determined.
240 mapinfo->map_heap_regions();
241 }
242 Universe::set_narrow_klass_range(CompressedClassSpaceSize);
243 #endif // _LP64
244 } else {
245 assert(!mapinfo->is_open() && !UseSharedSpaces,
246 "archive file not closed or shared spaces not disabled.");
247 }
248 }
249
250 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
292 // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
293 // then the RO parts.
294
295 assert(UseCompressedOops && UseCompressedClassPointers,
296 "UseCompressedOops and UseCompressedClassPointers must be set");
297
298 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
299 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
300 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
301 _shared_rs = _shared_rs.first_part(max_archive_size);
302
303 // Set up compress class pointers.
304 Universe::set_narrow_klass_base((address)_shared_rs.base());
305 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
306 // with AOT.
307 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
308 // Set the range of klass addresses to 4GB.
309 Universe::set_narrow_klass_range(cds_total);
310
311 Metaspace::initialize_class_space(tmp_class_space);
312 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
313 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
314
315 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
316 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
317 #endif
318
319 // Start with 0 committed bytes. The memory will be committed as needed by
320 // MetaspaceShared::commit_shared_space_to().
321 if (!_shared_vs.initialize(_shared_rs, 0)) {
322 vm_exit_during_initialization("Unable to allocate memory for shared space");
323 }
324
325 _mc_region.init(&_shared_rs);
326 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
327 _shared_rs.size(), p2i(_shared_rs.base()));
328 }
329
330 // Called by universe_post_init()
331 void MetaspaceShared::post_initialize(TRAPS) {
332 if (UseSharedSpaces) {
333 int size = FileMapInfo::get_number_of_shared_paths();
334 if (size > 0) {
335 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
407 soc->do_tag(sizeof(ConstantPool));
408 soc->do_tag(sizeof(ConstantPoolCache));
409 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
410 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
411 soc->do_tag(sizeof(Symbol));
412
413 // Dump/restore miscellaneous metadata.
414 Universe::serialize(soc, true);
415 soc->do_tag(--tag);
416
417 // Dump/restore references to commonly used names and signatures.
418 vmSymbols::serialize(soc);
419 soc->do_tag(--tag);
420
421 // Dump/restore the symbol and string tables
422 SymbolTable::serialize(soc);
423 StringTable::serialize(soc);
424 soc->do_tag(--tag);
425
426 JavaClasses::serialize_offsets(soc);
427 soc->do_tag(--tag);
428
429 soc->do_tag(666);
430 }
431
432 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
433 if (DumpSharedSpaces) {
434 if (_cds_i2i_entry_code_buffers == NULL) {
435 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
436 _cds_i2i_entry_code_buffers_size = total_size;
437 }
438 } else if (UseSharedSpaces) {
439 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
440 } else {
441 return NULL;
442 }
443
444 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
445 return _cds_i2i_entry_code_buffers;
446 }
1001
1002 msg.info("%s", sep);
1003 msg.info(fmt_stats, "Total",
1004 all_ro_count, all_ro_bytes, all_ro_perc,
1005 all_rw_count, all_rw_bytes, all_rw_perc,
1006 all_count, all_bytes, all_perc);
1007
1008 assert(all_ro_bytes == ro_all, "everything should have been counted");
1009 assert(all_rw_bytes == rw_all, "everything should have been counted");
1010
1011 #undef fmt_stats
1012 }
1013
1014 // Populate the shared space.
1015
1016 class VM_PopulateDumpSharedSpace: public VM_Operation {
1017 private:
1018 GrowableArray<MemRegion> *_closed_archive_heap_regions;
1019 GrowableArray<MemRegion> *_open_archive_heap_regions;
1020
1021 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
1022 void dump_symbols();
1023 char* dump_read_only_tables();
1024 void print_region_stats();
1025 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1026 const char *name, const size_t total_size);
1027 public:
1028
1029 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1030 void doit(); // outline because gdb sucks
1031 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
1032 bool allow_nested_vm_operations() const { return true; }
1033 }; // class VM_PopulateDumpSharedSpace
1034
1035 class SortedSymbolClosure: public SymbolClosure {
1036 GrowableArray<Symbol*> _symbols;
1037 virtual void do_symbol(Symbol** sym) {
1038 assert((*sym)->is_permanent(), "archived symbols must be permanent");
1039 _symbols.append(*sym);
1040 }
1041 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
1313 tty->print_cr("done. ");
1314 NOT_PRODUCT(SystemDictionary::verify();)
1315
1316 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1317 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1318 SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1319
1320 size_t table_bytes = SystemDictionary::count_bytes_for_table();
1321 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1322 SystemDictionary::copy_table(table_top, _ro_region.top());
1323
1324 // Write the archived object sub-graph infos. For each klass with sub-graphs,
1325 // the info includes the static fields (sub-graph entry points) and Klasses
1326 // of objects included in the sub-graph.
1327 HeapShared::write_archived_subgraph_infos();
1328
1329 // Write the other data to the output array.
1330 WriteClosure wc(&_ro_region);
1331 MetaspaceShared::serialize(&wc);
1332
1333 char* newtop = _ro_region.top();
1334 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1335 return buckets_top;
1336 }
1337
1338 void VM_PopulateDumpSharedSpace::doit() {
1339 Thread* THREAD = VMThread::vm_thread();
1340
1341 FileMapInfo::check_nonempty_dir_in_shared_path_table();
1342
1343 NOT_PRODUCT(SystemDictionary::verify();)
1344 // The following guarantee is meant to ensure that no loader constraints
1345 // exist yet, since the constraints table is not shared. This becomes
1346 // more important now that we don't re-initialize vtables/itables for
1347 // shared classes at runtime, where constraints were previously created.
1348 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1349 "loader constraints are not saved");
1350 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1351 "placeholders are not saved");
1352 // Revisit and implement this if we prelink method handle call sites:
1455 // does is to update the fields in the mapinfo->_header.
1456 } else {
1457 // After the first pass, the contents of mapinfo->_header are finalized,
1458 // so we can compute the header's CRC, and write the contents of the header
1459 // and the regions into disk.
1460 mapinfo->open_for_write();
1461 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1462 }
1463 mapinfo->write_header();
1464
1465 // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1466 // so it needs to be read/write.
1467 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1468 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1469 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1470 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1471 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1472
1473 _total_string_region_size = mapinfo->write_archive_heap_regions(
1474 _closed_archive_heap_regions,
1475 MetaspaceShared::first_string,
1476 MetaspaceShared::max_strings);
1477 _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1478 _open_archive_heap_regions,
1479 MetaspaceShared::first_open_archive_heap_region,
1480 MetaspaceShared::max_open_archive_heap_region);
1481 }
1482
1483 mapinfo->close();
1484
1485 // Restore the vtable in case we invoke any virtual methods.
1486 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1487
1488 print_region_stats();
1489
1490 if (log_is_enabled(Info, cds)) {
1491 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1492 int(_mc_region.used()), int(_md_region.used()));
1493 }
1494
1495 if (PrintSystemDictionaryAtExit) {
1496 SystemDictionary::print();
1497 }
1498 // There may be other pending VM operations that operate on the InstanceKlasses,
1794
1795 // Cache for recording where the archived objects are copied to
1796 MetaspaceShared::create_archive_object_cache();
1797
1798 tty->print_cr("Dumping objects to closed archive heap region ...");
1799 NOT_PRODUCT(StringTable::verify());
1800 // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1801 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
1802 MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions);
1803
1804 tty->print_cr("Dumping objects to open archive heap region ...");
1805 _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1806 MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1807
1808 MetaspaceShared::destroy_archive_object_cache();
1809 }
1810
1811 G1HeapVerifier::verify_archive_regions();
1812 }
1813
1814 void MetaspaceShared::dump_closed_archive_heap_objects(
1815 GrowableArray<MemRegion> * closed_archive) {
1816 assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects");
1817
1818 Thread* THREAD = Thread::current();
1819 G1CollectedHeap::heap()->begin_archive_alloc_range();
1820
1821 // Archive interned string objects
1822 StringTable::write_to_archive();
1823
1824 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
1825 os::vm_allocation_granularity());
1826 }
1827
1828 void MetaspaceShared::dump_open_archive_heap_objects(
1829 GrowableArray<MemRegion> * open_archive) {
1830 assert(UseG1GC, "Only support G1 GC");
1831 assert(UseCompressedOops && UseCompressedClassPointers,
1832 "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1833
1880 }
1881
1882 int hash = obj->identity_hash();
1883 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1884 if (archived_oop != NULL) {
1885 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1886 relocate_klass_ptr(archived_oop);
1887 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1888 cache->put(obj, archived_oop);
1889 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
1890 p2i(obj), p2i(archived_oop));
1891 } else {
1892 log_error(cds, heap)(
1893 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
1894 p2i(obj));
1895 vm_exit(1);
1896 }
1897 return archived_oop;
1898 }
1899
1900 oop MetaspaceShared::materialize_archived_object(oop obj) {
1901 if (obj != NULL) {
1902 return G1CollectedHeap::heap()->materialize_archived_object(obj);
1903 }
1904 return NULL;
1905 }
1906
1907 void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
1908 int i;
1909 for (i = 0; i < _global_klass_objects->length(); i++) {
1910 Klass* k = _global_klass_objects->at(i);
1911
1912 // archive mirror object
1913 java_lang_Class::archive_mirror(k, CHECK);
1914
1915 // archive the resolved_referenes array
1916 if (k->is_instance_klass()) {
1917 InstanceKlass* ik = InstanceKlass::cast(k);
1918 ik->constants()->archive_resolved_references(THREAD);
1919 }
1920 }
1921 }
1957 *p = (u4)(uintx(obj));
1958 }
1959
1960 void do_tag(int tag) {
1961 int old_tag;
1962 old_tag = (int)(intptr_t)nextPtr();
1963 // do_int(&old_tag);
1964 assert(tag == old_tag, "old tag doesn't match");
1965 FileMapInfo::assert_mark(tag == old_tag);
1966 }
1967
1968 void do_oop(oop *p) {
1969 narrowOop o = (narrowOop)nextPtr();
1970 if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) {
1971 p = NULL;
1972 } else {
1973 assert(MetaspaceShared::is_heap_object_archiving_allowed(),
1974 "Archived heap object is not allowed");
1975 assert(MetaspaceShared::open_archive_heap_region_mapped(),
1976 "Open archive heap region is not mapped");
1977 *p = CompressedOops::decode_not_null(o);
1978 }
1979 }
1980
1981 void do_region(u_char* start, size_t size) {
1982 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
1983 assert(size % sizeof(intptr_t) == 0, "bad size");
1984 do_tag((int)size);
1985 while (size > 0) {
1986 *(intptr_t*)start = nextPtr();
1987 start += sizeof(intptr_t);
1988 size -= sizeof(intptr_t);
1989 }
1990 }
1991
1992 bool reading() const { return true; }
1993 };
1994
1995 // Return true if given address is in the misc data region
1996 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
1997 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
1998 }
1999
2000 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
2001 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
2002 return true;
2003 }
2004 return false;
2005 }
2006
2007 void MetaspaceShared::print_shared_spaces() {
2008 if (UseSharedSpaces) {
2009 FileMapInfo::current_info()->print_shared_spaces();
2010 }
2011 }
2012
2013
2014 // Map shared spaces at requested addresses and return if succeeded.
2015 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
2016 size_t image_alignment = mapinfo->alignment();
2017
2018 #ifndef _WINDOWS
2019 // Map in the shared memory and then map the regions on top of it.
2020 // On Windows, don't map the memory here because it will cause the
2021 // mappings of the regions to fail.
2022 ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
2023 if (!shared_rs.is_reserved()) return false;
2024 #endif
2025
2026 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
2027
2028 char* ro_base = NULL; char* ro_top;
2029 char* rw_base = NULL; char* rw_top;
2030 char* mc_base = NULL; char* mc_top;
2031 char* md_base = NULL; char* md_top;
2032 char* od_base = NULL; char* od_top;
2033
2102 buffer += sharedDictionaryLen;
2103
2104 // The following data are the linked list elements
2105 // (HashtableEntry objects) for the shared dictionary table.
2106
2107 int len = *(intptr_t*)buffer; // skip over shared dictionary entries
2108 buffer += sizeof(intptr_t);
2109 buffer += len;
2110
2111 // The table of archived java heap object sub-graph infos
2112 buffer = HeapShared::read_archived_subgraph_infos(buffer);
2113
2114 // Verify various attributes of the archive, plus initialize the
2115 // shared string/symbol tables
2116 intptr_t* array = (intptr_t*)buffer;
2117 ReadClosure rc(&array);
2118 serialize(&rc);
2119
2120 // Initialize the run-time symbol table.
2121 SymbolTable::create_table();
2122
2123 // Close the mapinfo file
2124 mapinfo->close();
2125
2126 if (PrintSharedArchiveAndExit) {
2127 if (PrintSharedDictionary) {
2128 tty->print_cr("\nShared classes:\n");
2129 SystemDictionary::print_shared(tty);
2130 }
2131 if (_archive_loading_failed) {
2132 tty->print_cr("archive is invalid");
2133 vm_exit(1);
2134 } else {
2135 tty->print_cr("archive is valid");
2136 vm_exit(0);
2137 }
2138 }
2139 }
2140
2141 // JVM/TI RedefineClasses() support:
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classListParser.hpp"
28 #include "classfile/classLoaderExt.hpp"
29 #include "classfile/dictionary.hpp"
30 #include "classfile/loaderConstraints.hpp"
31 #include "classfile/placeholders.hpp"
32 #include "classfile/symbolTable.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "classfile/systemDictionaryShared.hpp"
36 #include "code/codeCache.hpp"
37 #include "interpreter/bytecodeStream.hpp"
38 #include "interpreter/bytecodes.hpp"
39 #include "logging/log.hpp"
40 #include "logging/logMessage.hpp"
41 #include "memory/filemap.hpp"
42 #include "memory/heapShared.inline.hpp"
43 #include "memory/metaspace.hpp"
44 #include "memory/metaspaceClosure.hpp"
45 #include "memory/metaspaceShared.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/instanceClassLoaderKlass.hpp"
49 #include "oops/instanceMirrorKlass.hpp"
50 #include "oops/instanceRefKlass.hpp"
51 #include "oops/objArrayKlass.hpp"
52 #include "oops/objArrayOop.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/typeArrayKlass.hpp"
55 #include "prims/jvmtiRedefineClasses.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/os.hpp"
58 #include "runtime/safepointVerifiers.hpp"
59 #include "runtime/signature.hpp"
60 #include "runtime/timerTrace.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "runtime/vmOperations.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/bitMap.hpp"
65 #include "utilities/defaultStream.hpp"
66 #include "utilities/hashtable.inline.hpp"
67 #if INCLUDE_G1GC
68 #include "gc/g1/g1Allocator.inline.hpp"
69 #include "gc/g1/g1CollectedHeap.hpp"
70 #endif
71
72 ReservedSpace MetaspaceShared::_shared_rs;
73 VirtualSpace MetaspaceShared::_shared_vs;
74 MetaspaceSharedStats MetaspaceShared::_stats;
75 bool MetaspaceShared::_has_error_classes;
76 bool MetaspaceShared::_archive_loading_failed = false;
77 bool MetaspaceShared::_remapped_readwrite = false;
78 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
79 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
80 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
81 size_t MetaspaceShared::_core_spaces_size = 0;
82
83 // The CDS archive is divided into the following regions:
84 // mc - misc code (the method entry trampolines)
211
212 char* MetaspaceShared::read_only_space_top() {
213 return _ro_region.top();
214 }
215
216 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
217 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
218
219 // If using shared space, open the file that contains the shared space
220 // and map in the memory before initializing the rest of metaspace (so
221 // the addresses don't conflict)
222 address cds_address = NULL;
223 FileMapInfo* mapinfo = new FileMapInfo();
224
225 // Open the shared archive file, read and validate the header. If
226 // initialization fails, shared spaces [UseSharedSpaces] are
227 // disabled and the file is closed.
228 // Map in spaces now also
229 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
230 size_t cds_total = core_spaces_size();
231 cds_address = (address)mapinfo->region_addr(0);
232 #ifdef _LP64
233 if (Metaspace::using_class_space()) {
234 char* cds_end = (char*)(cds_address + cds_total);
235 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
236 // If UseCompressedClassPointers is set then allocate the metaspace area
237 // above the heap and above the CDS area (if it exists).
238 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
239 // map_heap_regions() compares the current narrow oop and klass encodings
240 // with the archived ones, so it must be done after all encodings are determined.
241 mapinfo->map_heap_regions();
242 }
243 Universe::set_narrow_klass_range(CompressedClassSpaceSize);
244 #endif // _LP64
245 } else {
246 assert(!mapinfo->is_open() && !UseSharedSpaces,
247 "archive file not closed or shared spaces not disabled.");
248 }
249 }
250
251 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
293 // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
294 // then the RO parts.
295
296 assert(UseCompressedOops && UseCompressedClassPointers,
297 "UseCompressedOops and UseCompressedClassPointers must be set");
298
299 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
300 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
301 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
302 _shared_rs = _shared_rs.first_part(max_archive_size);
303
304 // Set up compress class pointers.
305 Universe::set_narrow_klass_base((address)_shared_rs.base());
306 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
307 // with AOT.
308 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
309 // Set the range of klass addresses to 4GB.
310 Universe::set_narrow_klass_range(cds_total);
311
312 Metaspace::initialize_class_space(tmp_class_space);
313 log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
314 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
315
316 log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
317 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
318 #endif
319
320 // Start with 0 committed bytes. The memory will be committed as needed by
321 // MetaspaceShared::commit_shared_space_to().
322 if (!_shared_vs.initialize(_shared_rs, 0)) {
323 vm_exit_during_initialization("Unable to allocate memory for shared space");
324 }
325
326 _mc_region.init(&_shared_rs);
327 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
328 _shared_rs.size(), p2i(_shared_rs.base()));
329 }
330
331 // Called by universe_post_init()
332 void MetaspaceShared::post_initialize(TRAPS) {
333 if (UseSharedSpaces) {
334 int size = FileMapInfo::get_number_of_shared_paths();
335 if (size > 0) {
336 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
408 soc->do_tag(sizeof(ConstantPool));
409 soc->do_tag(sizeof(ConstantPoolCache));
410 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
411 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
412 soc->do_tag(sizeof(Symbol));
413
414 // Dump/restore miscellaneous metadata.
415 Universe::serialize(soc, true);
416 soc->do_tag(--tag);
417
418 // Dump/restore references to commonly used names and signatures.
419 vmSymbols::serialize(soc);
420 soc->do_tag(--tag);
421
422 // Dump/restore the symbol and string tables
423 SymbolTable::serialize(soc);
424 StringTable::serialize(soc);
425 soc->do_tag(--tag);
426
427 JavaClasses::serialize_offsets(soc);
428 InstanceMirrorKlass::serialize_offsets(soc);
429 soc->do_tag(--tag);
430
431 soc->do_tag(666);
432 }
433
434 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
435 if (DumpSharedSpaces) {
436 if (_cds_i2i_entry_code_buffers == NULL) {
437 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
438 _cds_i2i_entry_code_buffers_size = total_size;
439 }
440 } else if (UseSharedSpaces) {
441 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
442 } else {
443 return NULL;
444 }
445
446 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
447 return _cds_i2i_entry_code_buffers;
448 }
1003
1004 msg.info("%s", sep);
1005 msg.info(fmt_stats, "Total",
1006 all_ro_count, all_ro_bytes, all_ro_perc,
1007 all_rw_count, all_rw_bytes, all_rw_perc,
1008 all_count, all_bytes, all_perc);
1009
1010 assert(all_ro_bytes == ro_all, "everything should have been counted");
1011 assert(all_rw_bytes == rw_all, "everything should have been counted");
1012
1013 #undef fmt_stats
1014 }
1015
1016 // Populate the shared space.
1017
1018 class VM_PopulateDumpSharedSpace: public VM_Operation {
1019 private:
1020 GrowableArray<MemRegion> *_closed_archive_heap_regions;
1021 GrowableArray<MemRegion> *_open_archive_heap_regions;
1022
1023 GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps;
1024 GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps;
1025
1026 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
1027 void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
1028 void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1029 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
1030 void dump_symbols();
1031 char* dump_read_only_tables();
1032 void print_region_stats();
1033 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1034 const char *name, const size_t total_size);
1035 public:
1036
1037 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1038 void doit(); // outline because gdb sucks
1039 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
1040 bool allow_nested_vm_operations() const { return true; }
1041 }; // class VM_PopulateDumpSharedSpace
1042
1043 class SortedSymbolClosure: public SymbolClosure {
1044 GrowableArray<Symbol*> _symbols;
1045 virtual void do_symbol(Symbol** sym) {
1046 assert((*sym)->is_permanent(), "archived symbols must be permanent");
1047 _symbols.append(*sym);
1048 }
1049 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
1321 tty->print_cr("done. ");
1322 NOT_PRODUCT(SystemDictionary::verify();)
1323
1324 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1325 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1326 SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1327
1328 size_t table_bytes = SystemDictionary::count_bytes_for_table();
1329 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1330 SystemDictionary::copy_table(table_top, _ro_region.top());
1331
1332 // Write the archived object sub-graph infos. For each klass with sub-graphs,
1333 // the info includes the static fields (sub-graph entry points) and Klasses
1334 // of objects included in the sub-graph.
1335 HeapShared::write_archived_subgraph_infos();
1336
1337 // Write the other data to the output array.
1338 WriteClosure wc(&_ro_region);
1339 MetaspaceShared::serialize(&wc);
1340
1341 // Write the bitmaps for patching the archive heap regions
1342 dump_archive_heap_oopmaps();
1343
1344 char* newtop = _ro_region.top();
1345 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1346 return buckets_top;
1347 }
1348
1349 void VM_PopulateDumpSharedSpace::doit() {
1350 Thread* THREAD = VMThread::vm_thread();
1351
1352 FileMapInfo::check_nonempty_dir_in_shared_path_table();
1353
1354 NOT_PRODUCT(SystemDictionary::verify();)
1355 // The following guarantee is meant to ensure that no loader constraints
1356 // exist yet, since the constraints table is not shared. This becomes
1357 // more important now that we don't re-initialize vtables/itables for
1358 // shared classes at runtime, where constraints were previously created.
1359 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1360 "loader constraints are not saved");
1361 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1362 "placeholders are not saved");
1363 // Revisit and implement this if we prelink method handle call sites:
1466 // does is to update the fields in the mapinfo->_header.
1467 } else {
1468 // After the first pass, the contents of mapinfo->_header are finalized,
1469 // so we can compute the header's CRC, and write the contents of the header
1470 // and the regions into disk.
1471 mapinfo->open_for_write();
1472 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1473 }
1474 mapinfo->write_header();
1475
1476 // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1477 // so it needs to be read/write.
1478 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1479 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1480 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1481 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1482 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1483
1484 _total_string_region_size = mapinfo->write_archive_heap_regions(
1485 _closed_archive_heap_regions,
1486 _closed_archive_heap_oopmaps,
1487 MetaspaceShared::first_string,
1488 MetaspaceShared::max_strings);
1489 _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1490 _open_archive_heap_regions,
1491 _open_archive_heap_oopmaps,
1492 MetaspaceShared::first_open_archive_heap_region,
1493 MetaspaceShared::max_open_archive_heap_region);
1494 }
1495
1496 mapinfo->close();
1497
1498 // Restore the vtable in case we invoke any virtual methods.
1499 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1500
1501 print_region_stats();
1502
1503 if (log_is_enabled(Info, cds)) {
1504 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1505 int(_mc_region.used()), int(_md_region.used()));
1506 }
1507
1508 if (PrintSystemDictionaryAtExit) {
1509 SystemDictionary::print();
1510 }
1511 // There may be other pending VM operations that operate on the InstanceKlasses,
1807
1808 // Cache for recording where the archived objects are copied to
1809 MetaspaceShared::create_archive_object_cache();
1810
1811 tty->print_cr("Dumping objects to closed archive heap region ...");
1812 NOT_PRODUCT(StringTable::verify());
1813 // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1814 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
1815 MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions);
1816
1817 tty->print_cr("Dumping objects to open archive heap region ...");
1818 _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1819 MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1820
1821 MetaspaceShared::destroy_archive_object_cache();
1822 }
1823
1824 G1HeapVerifier::verify_archive_regions();
1825 }
1826
1827 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() {
1828 if (MetaspaceShared::is_heap_object_archiving_allowed()) {
1829 _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1830 dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps);
1831
1832 _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
1833 dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps);
1834 }
1835 }
1836
1837 void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
1838 GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) {
1839 for (int i=0; i<regions->length(); i++) {
1840 ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i));
1841 size_t size_in_bits = oopmap.size();
1842 size_t size_in_bytes = oopmap.size_in_bytes();
1843 uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t));
1844 oopmap.write_to(buffer, size_in_bytes);
1845 log_info(cds)("Oopmap = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
1846 INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
1847 p2i(buffer), size_in_bytes,
1848 p2i(regions->at(i).start()), regions->at(i).byte_size());
1849
1850 ArchiveHeapOopmapInfo info;
1851 info._oopmap = (address)buffer;
1852 info._oopmap_size_in_bits = size_in_bits;
1853 oopmaps->append(info);
1854 }
1855 }
1856
1857 void MetaspaceShared::dump_closed_archive_heap_objects(
1858 GrowableArray<MemRegion> * closed_archive) {
1859 assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects");
1860
1861 Thread* THREAD = Thread::current();
1862 G1CollectedHeap::heap()->begin_archive_alloc_range();
1863
1864 // Archive interned string objects
1865 StringTable::write_to_archive();
1866
1867 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
1868 os::vm_allocation_granularity());
1869 }
1870
1871 void MetaspaceShared::dump_open_archive_heap_objects(
1872 GrowableArray<MemRegion> * open_archive) {
1873 assert(UseG1GC, "Only support G1 GC");
1874 assert(UseCompressedOops && UseCompressedClassPointers,
1875 "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1876
1923 }
1924
1925 int hash = obj->identity_hash();
1926 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1927 if (archived_oop != NULL) {
1928 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1929 relocate_klass_ptr(archived_oop);
1930 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1931 cache->put(obj, archived_oop);
1932 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
1933 p2i(obj), p2i(archived_oop));
1934 } else {
1935 log_error(cds, heap)(
1936 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
1937 p2i(obj));
1938 vm_exit(1);
1939 }
1940 return archived_oop;
1941 }
1942
1943 oop MetaspaceShared::materialize_archived_object(narrowOop v) {
1944 if (!CompressedOops::is_null(v)) {
1945 oop obj = HeapShared::decode_with_archived_oop_encoding_mode(v);
1946 return G1CollectedHeap::heap()->materialize_archived_object(obj);
1947 }
1948 return NULL;
1949 }
1950
1951 void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
1952 int i;
1953 for (i = 0; i < _global_klass_objects->length(); i++) {
1954 Klass* k = _global_klass_objects->at(i);
1955
1956 // archive mirror object
1957 java_lang_Class::archive_mirror(k, CHECK);
1958
1959 // archive the resolved_referenes array
1960 if (k->is_instance_klass()) {
1961 InstanceKlass* ik = InstanceKlass::cast(k);
1962 ik->constants()->archive_resolved_references(THREAD);
1963 }
1964 }
1965 }
2001 *p = (u4)(uintx(obj));
2002 }
2003
2004 void do_tag(int tag) {
2005 int old_tag;
2006 old_tag = (int)(intptr_t)nextPtr();
2007 // do_int(&old_tag);
2008 assert(tag == old_tag, "old tag doesn't match");
2009 FileMapInfo::assert_mark(tag == old_tag);
2010 }
2011
2012 void do_oop(oop *p) {
2013 narrowOop o = (narrowOop)nextPtr();
2014 if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) {
2015 p = NULL;
2016 } else {
2017 assert(MetaspaceShared::is_heap_object_archiving_allowed(),
2018 "Archived heap object is not allowed");
2019 assert(MetaspaceShared::open_archive_heap_region_mapped(),
2020 "Open archive heap region is not mapped");
2021 *p = HeapShared::decode_with_archived_oop_encoding_mode(o);
2022 }
2023 }
2024
2025 void do_region(u_char* start, size_t size) {
2026 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
2027 assert(size % sizeof(intptr_t) == 0, "bad size");
2028 do_tag((int)size);
2029 while (size > 0) {
2030 *(intptr_t*)start = nextPtr();
2031 start += sizeof(intptr_t);
2032 size -= sizeof(intptr_t);
2033 }
2034 }
2035
2036 bool reading() const { return true; }
2037 };
2038
2039 // Return true if given address is in the misc data region
2040 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
2041 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
2042 }
2043
2044 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
2045 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
2046 return true;
2047 }
2048 return false;
2049 }
2050
2051 // Map shared spaces at requested addresses and return if succeeded.
2052 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
2053 size_t image_alignment = mapinfo->alignment();
2054
2055 #ifndef _WINDOWS
2056 // Map in the shared memory and then map the regions on top of it.
2057 // On Windows, don't map the memory here because it will cause the
2058 // mappings of the regions to fail.
2059 ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
2060 if (!shared_rs.is_reserved()) return false;
2061 #endif
2062
2063 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
2064
2065 char* ro_base = NULL; char* ro_top;
2066 char* rw_base = NULL; char* rw_top;
2067 char* mc_base = NULL; char* mc_top;
2068 char* md_base = NULL; char* md_top;
2069 char* od_base = NULL; char* od_top;
2070
2139 buffer += sharedDictionaryLen;
2140
2141 // The following data are the linked list elements
2142 // (HashtableEntry objects) for the shared dictionary table.
2143
2144 int len = *(intptr_t*)buffer; // skip over shared dictionary entries
2145 buffer += sizeof(intptr_t);
2146 buffer += len;
2147
2148 // The table of archived java heap object sub-graph infos
2149 buffer = HeapShared::read_archived_subgraph_infos(buffer);
2150
2151 // Verify various attributes of the archive, plus initialize the
2152 // shared string/symbol tables
2153 intptr_t* array = (intptr_t*)buffer;
2154 ReadClosure rc(&array);
2155 serialize(&rc);
2156
2157 // Initialize the run-time symbol table.
2158 SymbolTable::create_table();
2159
2160 mapinfo->patch_archived_heap_embedded_pointers();
2161
2162 // Close the mapinfo file
2163 mapinfo->close();
2164
2165 if (PrintSharedArchiveAndExit) {
2166 if (PrintSharedDictionary) {
2167 tty->print_cr("\nShared classes:\n");
2168 SystemDictionary::print_shared(tty);
2169 }
2170 if (_archive_loading_failed) {
2171 tty->print_cr("archive is invalid");
2172 vm_exit(1);
2173 } else {
2174 tty->print_cr("archive is valid");
2175 vm_exit(0);
2176 }
2177 }
2178 }
2179
2180 // JVM/TI RedefineClasses() support:
|