54 #include "runtime/timerTrace.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/signature.hpp"
57 #include "runtime/vmThread.hpp"
58 #include "runtime/vm_operations.hpp"
59 #include "utilities/align.hpp"
60 #include "utilities/defaultStream.hpp"
61 #include "utilities/hashtable.inline.hpp"
62 #include "memory/metaspaceClosure.hpp"
63
64 ReservedSpace MetaspaceShared::_shared_rs;
65 VirtualSpace MetaspaceShared::_shared_vs;
66 MetaspaceSharedStats MetaspaceShared::_stats;
67 bool MetaspaceShared::_has_error_classes;
68 bool MetaspaceShared::_archive_loading_failed = false;
69 bool MetaspaceShared::_remapped_readwrite = false;
70 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
71 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
72 size_t MetaspaceShared::_core_spaces_size = 0;
73
74 // The CDS archive is divided into 6 regions:
75 // mc - misc code (the method entry trampolines)
76 // rw - read-write metadata
77 // ro - read-only metadata and read-only tables
78 // md - misc data (the c++ vtables)
79 // od - other data (original class files)
80 // st - shared strings
81 //
82 // Except for the st region, the other 5 regions are linearly allocated, starting from
83 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
84 // are page-aligned, and there's no gap between any consecutive regions.
85 //
86 // These 5 regions are populated in the following steps:
87 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
88 // temporarily allocated outside of the shared regions. Only the method entry
89 // trampolines are written into the mc region.
90 // [2] ArchiveCompactor copies RW metadata into the rw region.
91 // [3] ArchiveCompactor copies RO metadata into the ro region.
92 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
93 // are copied into the ro region as read-only tables.
94 // [5] C++ vtables are copied into the md region.
95 // [6] Original class files are copied into the od region.
96 //
97 // The st region is populated inside MetaspaceShared::dump_string_and_symbols. Its
98 // layout is independent of the other 5 regions.
99
100 class DumpRegion {
101 private:
102 const char* _name;
103 char* _base;
104 char* _top;
105 char* _end;
106 bool _is_packed;
107
108 char* expand_top_to(char* newtop) {
109 assert(is_allocatable(), "must be initialized and not packed");
110 assert(newtop >= _top, "must not grow backwards");
111 if (newtop > _end) {
112 MetaspaceShared::report_out_of_space(_name, newtop - _top);
113 ShouldNotReachHere();
114 }
115 MetaspaceShared::commit_shared_space_to(newtop);
116 _top = newtop;
117 return _top;
118 }
119
120 public:
121 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
122
123 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
124 char* p = (char*)align_ptr_up(_top, alignment);
125 char* newtop = p + align_size_up(num_bytes, alignment);
126 expand_top_to(newtop);
127 memset(p, 0, newtop - p);
128 return p;
129 }
130
131 void append_intptr_t(intptr_t n) {
132 assert(is_ptr_aligned(_top, sizeof(intptr_t)), "bad alignment");
133 intptr_t *p = (intptr_t*)_top;
134 char* newtop = _top + sizeof(intptr_t);
135 expand_top_to(newtop);
136 *p = n;
137 }
138
139 char* base() const { return _base; }
140 char* top() const { return _top; }
141 char* end() const { return _end; }
142 size_t reserved() const { return _end - _base; }
143 size_t used() const { return _top - _base; }
144 bool is_packed() const { return _is_packed; }
145 bool is_allocatable() const {
146 return !is_packed() && _base != NULL;
147 }
148
149 double perc(size_t used, size_t total) const {
150 if (total == 0) {total = 1;}
151 return used / double(total) * 100.0;
152 }
153
154 void print(size_t total_bytes) const {
155 tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
156 _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
157 }
158 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
159 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
160 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
161 if (strcmp(_name, failing_region) == 0) {
162 tty->print_cr(" required = %d", int(needed_bytes));
163 } else {
164 tty->cr();
165 }
166 }
167
168 void init(const ReservedSpace* rs) {
169 _base = _top = rs->base();
170 _end = rs->end();
171 }
172 void init(char* b, char* t, char* e) {
173 _base = b;
174 _top = t;
175 _end = e;
176 }
177
178 void pack(DumpRegion* next = NULL) {
179 assert(!is_packed(), "sanity");
180 _end = (char*)align_ptr_up(_top, Metaspace::reserve_alignment());
181 _is_packed = true;
182 if (next != NULL) {
183 next->_base = next->_top = this->_end;
184 next->_end = MetaspaceShared::shared_rs()->end();
185 }
186 }
187 bool contains(char* p) {
188 return base() <= p && p < top();
189 }
190 };
191
192 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
193 DumpRegion _st_region("st"), _od_region("od");
194
195 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
196 return _mc_region.allocate(num_bytes);
197 }
198
199 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
200 return _ro_region.allocate(num_bytes);
201 }
202
203 void MetaspaceShared::initialize_shared_rs() {
204 const size_t reserve_alignment = Metaspace::reserve_alignment();
205 bool large_pages = false; // No large pages when dumping the CDS archive.
206 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, reserve_alignment);
207
208 #ifdef _LP64
209 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
210 const size_t cds_total = align_size_down(UnscaledClassSpaceMax, reserve_alignment);
211 #else
212 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
213 size_t cds_total = align_size_down(256*M, reserve_alignment);
214 #endif
215
216 // First try to reserve the space at the specified SharedBaseAddress.
217 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
218 if (_shared_rs.is_reserved()) {
219 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
220 } else {
221 // Get a mmap region anywhere if the SharedBaseAddress fails.
222 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
223 }
224 if (!_shared_rs.is_reserved()) {
225 vm_exit_during_initialization("Unable to reserve memory for shared space",
226 err_msg(SIZE_FORMAT " bytes.", cds_total));
227 }
228
229 #ifdef _LP64
230 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
231 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
232 // will store Klasses into this space.
233 // + The lower 3 GB is used for the archive -- when preload_classes() is done,
234 // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
235 // then the RO parts.
236
237 assert(UseCompressedOops && UseCompressedClassPointers,
238 "UseCompressedOops and UseCompressedClassPointers must be set");
239
240 size_t max_archive_size = align_size_down(cds_total * 3 / 4, reserve_alignment);
241 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
242 CompressedClassSpaceSize = align_size_down(tmp_class_space.size(), reserve_alignment);
243 _shared_rs = _shared_rs.first_part(max_archive_size);
244
245 // Set up compress class pointers.
246 Universe::set_narrow_klass_base((address)_shared_rs.base());
247 if (UseAOT || cds_total > UnscaledClassSpaceMax) {
248 // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
249 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
250 } else {
251 Universe::set_narrow_klass_shift(0);
252 }
253
254 Metaspace::initialize_class_space(tmp_class_space);
255 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
256 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
257
258 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
259 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
260 #endif
261
262 // Start with 0 committed bytes. The memory will be committed as needed by
270 _shared_rs.size(), p2i(_shared_rs.base()));
271 }
272
273 void MetaspaceShared::commit_shared_space_to(char* newtop) {
274 assert(DumpSharedSpaces, "dump-time only");
275 char* base = _shared_rs.base();
276 size_t need_committed_size = newtop - base;
277 size_t has_committed_size = _shared_vs.committed_size();
278 if (need_committed_size < has_committed_size) {
279 return;
280 }
281
282 size_t min_bytes = need_committed_size - has_committed_size;
283 size_t preferred_bytes = 1 * M;
284 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
285
286 size_t commit = MAX2(min_bytes, preferred_bytes);
287 assert(commit <= uncommitted, "sanity");
288
289 bool result = _shared_vs.expand_by(commit, false);
290 assert(result, "Failed to commit memory");
291
292 log_info(cds)("Expanding shared spaces by %7d bytes [total %8d bytes ending at %p]",
293 int(commit), int(_shared_vs.actual_committed_size()), _shared_vs.high());
294 }
295
296 // Read/write a data stream for restoring/preserving metadata pointers and
297 // miscellaneous data from/to the shared archive file.
298
299 void MetaspaceShared::serialize(SerializeClosure* soc) {
300 int tag = 0;
301 soc->do_tag(--tag);
302
303 // Verify the sizes of various metadata in the system.
304 soc->do_tag(sizeof(Method));
305 soc->do_tag(sizeof(ConstMethod));
306 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
307 soc->do_tag(sizeof(ConstantPool));
308 soc->do_tag(sizeof(ConstantPoolCache));
309 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
310 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
311 soc->do_tag(sizeof(Symbol));
312
313 // Dump/restore miscellaneous metadata.
480 assert(DumpSharedSpaces, "dump-time only");
481 _info->zero();
482 }
483
484 // Switch the vtable pointer to point to the cloned vtable.
485 static void patch(Metadata* obj) {
486 assert(DumpSharedSpaces, "dump-time only");
487 *(void**)obj = (void*)(_info->cloned_vtable());
488 }
489
490 static bool is_valid_shared_object(const T* obj) {
491 intptr_t* vptr = *(intptr_t**)obj;
492 return vptr == _info->cloned_vtable();
493 }
494 };
495
496 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
497
498 template <class T>
499 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
500 assert(is_ptr_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
501 int n = get_vtable_length(name);
502 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
503 _info->set_vtable_size(n);
504
505 intptr_t* p = clone_vtable(name, _info);
506 assert((char*)p == _md_region.top(), "must be");
507
508 return p;
509 }
510
511 template <class T>
512 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
513 if (!DumpSharedSpaces) {
514 assert(_info == 0, "_info is initialized only at dump time");
515 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
516 }
517 T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
518 int n = info->vtable_size();
519 intptr_t* srcvtable = vtable_of(tmp);
520 intptr_t* dstvtable = info->cloned_vtable();
718 }
719
720 public:
721 enum { RO = 0, RW = 1 };
722
723 int _counts[2][_number_of_types];
724 int _bytes [2][_number_of_types];
725
726 DumpAllocStats() {
727 memset(_counts, 0, sizeof(_counts));
728 memset(_bytes, 0, sizeof(_bytes));
729 };
730
731 void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
732 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
733 int which = (read_only) ? RO : RW;
734 _counts[which][type] ++;
735 _bytes [which][type] += byte_size;
736 }
737
738 void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
739 };
740
741 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
742 // Calculate size of data that was not allocated by Metaspace::allocate()
743 MetaspaceSharedStats *stats = MetaspaceShared::stats();
744
745 // symbols
746 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
747 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
748
749 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
750 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
751
752 // strings
753 _counts[RO][StringHashentryType] = stats->string.hashentry_count;
754 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
755
756 _counts[RO][StringBucketType] = stats->string.bucket_count;
757 _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
765 ro_all = 1;
766 }
767 if (rw_all < 1) {
768 rw_all = 1;
769 }
770
771 int all_ro_count = 0;
772 int all_ro_bytes = 0;
773 int all_rw_count = 0;
774 int all_rw_bytes = 0;
775
776 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
777 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
778 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
779 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
780
781 ResourceMark rm;
782 LogMessage(cds) msg;
783 stringStream info_stream;
784
785 info_stream.print_cr("Detailed metadata info (rw includes md and mc):");
786 info_stream.print_cr("%s", hdr);
787 info_stream.print_cr("%s", sep);
788 for (int type = 0; type < int(_number_of_types); type ++) {
789 const char *name = type_name((Type)type);
790 int ro_count = _counts[RO][type];
791 int ro_bytes = _bytes [RO][type];
792 int rw_count = _counts[RW][type];
793 int rw_bytes = _bytes [RW][type];
794 int count = ro_count + rw_count;
795 int bytes = ro_bytes + rw_bytes;
796
797 double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
798 double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
799 double perc = 100.0 * double(bytes) / double(ro_all + rw_all);
800
801 info_stream.print_cr(fmt_stats, name,
802 ro_count, ro_bytes, ro_perc,
803 rw_count, rw_bytes, rw_perc,
804 count, bytes, perc);
805
806 all_ro_count += ro_count;
807 all_ro_bytes += ro_bytes;
808 all_rw_count += rw_count;
809 all_rw_bytes += rw_bytes;
810 }
811
812 int all_count = all_ro_count + all_rw_count;
813 int all_bytes = all_ro_bytes + all_rw_bytes;
814
815 double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
816 double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
817 double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all);
818
819 info_stream.print_cr("%s", sep);
820 info_stream.print_cr(fmt_stats, "Total",
821 all_ro_count, all_ro_bytes, all_ro_perc,
822 all_rw_count, all_rw_bytes, all_rw_perc,
823 all_count, all_bytes, all_perc);
824
825 //assert(all_ro_bytes == ro_all, "everything should have been counted");
826 assert(all_rw_bytes == rw_all, "everything should have been counted");
827
828 msg.info("%s", info_stream.as_string());
829 #undef fmt_stats
830 }
831
832 // Populate the shared space.
833
834 class VM_PopulateDumpSharedSpace: public VM_Operation {
835 private:
836 GrowableArray<MemRegion> *_string_regions;
837
838 void dump_string_and_symbols();
839 char* dump_read_only_tables();
840 public:
841
842 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
843 void doit(); // outline because gdb sucks
844 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
845 }; // class VM_PopulateDumpSharedSpace
846
847 class SortedSymbolClosure: public SymbolClosure {
848 GrowableArray<Symbol*> _symbols;
849 virtual void do_symbol(Symbol** sym) {
850 assert((*sym)->is_permanent(), "archived symbols must be permanent");
851 _symbols.append(*sym);
852 }
853 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
854 if (a[0] < b[0]) {
855 return -1;
856 } else if (a[0] == b[0]) {
857 return 0;
858 } else {
859 return 1;
873 // ArchiveCompactor --
874 //
875 // This class is the central piece of shared archive compaction -- all metaspace data are
876 // initially allocated outside of the shared regions. ArchiveCompactor copies the
877 // metaspace data into their final location in the shared regions.
878
879 class ArchiveCompactor : AllStatic {
880 static DumpAllocStats* _alloc_stats;
881 static SortedSymbolClosure* _ssc;
882
883 static unsigned my_hash(const address& a) {
884 return primitive_hash<address>(a);
885 }
886 static bool my_equals(const address& a0, const address& a1) {
887 return primitive_equals<address>(a0, a1);
888 }
889 typedef ResourceHashtable<
890 address, address,
891 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address>
892 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
893 16384, ResourceObj::C_HEAP> MyTable;
894 static MyTable* _new_loc_table;
895
896 public:
897 static void initialize() {
898 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
899 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)MyTable;
900 }
901 static DumpAllocStats* alloc_stats() {
902 return _alloc_stats;
903 }
904
905 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
906 address obj = ref->obj();
907 int bytes = ref->size() * BytesPerWord;
908 char* p;
909 size_t alignment = BytesPerWord;
910 if (read_only) {
911 p = _ro_region.allocate(bytes, alignment);
912 } else {
913 p = _rw_region.allocate(bytes, alignment);
914 }
915 memcpy(p, obj, bytes);
916 bool isnew = _new_loc_table->put(obj, (address)p);
917 assert(isnew, "must be");
918 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
919
920 _alloc_stats->record(ref->msotype(), bytes, read_only);
921 if (ref->msotype() == MetaspaceObj::SymbolType) {
922 uintx delta = MetaspaceShared::object_delta(p);
923 if (delta > MAX_SHARED_DELTA) {
924 // This is just a sanity check and should not appear in any real world usage. This
925 // happens only if you allocate more than 2GB of Symbols and would require
926 // millions of shared classes.
927 vm_exit_during_initialization("Too many Symbols in the CDS archive",
928 "Please reduce the number of shared classes.");
929 }
930 }
931 }
932
933 static address get_new_loc(MetaspaceClosure::Ref* ref) {
934 address* pp = _new_loc_table->get(ref->obj());
935 assert(pp != NULL, "must be");
936 return *pp;
937 }
938
939 private:
940 // Makes a shallow copy of visited MetaspaceObj's
1025 ResourceMark rm;
1026 RefRelocator ext_reloc;
1027 iterate_roots(&ext_reloc);
1028 }
1029
1030 #ifdef ASSERT
1031 {
1032 tty->print_cr("Verifying external roots ... ");
1033 ResourceMark rm;
1034 IsRefInArchiveChecker checker;
1035 iterate_roots(&checker);
1036 }
1037 #endif
1038
1039
1040 // cleanup
1041 _ssc = NULL;
1042 }
1043
1044 // We must relocate the System::_well_known_klasses only after we have copied the
1045 // strings in during dump_string_and_symbols(): during the copy, we operate on old
1046 // String objects which assert that their klass is the old
1047 // SystemDictionary::String_klass().
1048 static void relocate_well_known_klasses() {
1049 {
1050 tty->print_cr("Relocating _well_known_klasses[] ... ");
1051 ResourceMark rm;
1052 RefRelocator ext_reloc;
1053 SystemDictionary::well_known_klasses_do(&ext_reloc);
1054 }
1055 // NOTE: after this point, we shouldn't have any globals that can reach the old
1056 // objects.
1057
1058 // We cannot use any of the objects in the heap anymore (except for the objects
1059 // in the CDS shared string regions) because their headers no longer point to
1060 // valid Klasses.
1061 }
1062
1063 static void iterate_roots(MetaspaceClosure* it) {
1064 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1065 for (int i=0; i<symbols->length(); i++) {
1066 it->push(symbols->adr_at(i));
1067 }
1068 if (_global_klass_objects != NULL) {
1069 // Need to fix up the pointers
1070 for (int i = 0; i < _global_klass_objects->length(); i++) {
1073 }
1074 }
1075 FileMapInfo::metaspace_pointers_do(it);
1076 SystemDictionary::classes_do(it);
1077 Universe::metaspace_pointers_do(it);
1078 SymbolTable::metaspace_pointers_do(it);
1079 vmSymbols::metaspace_pointers_do(it);
1080 }
1081
1082 static Klass* get_relocated_klass(Klass* orig_klass) {
1083 address* pp = _new_loc_table->get((address)orig_klass);
1084 assert(pp != NULL, "must be");
1085 Klass* klass = (Klass*)(*pp);
1086 assert(klass->is_klass(), "must be");
1087 return klass;
1088 }
1089 };
1090
1091 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1092 SortedSymbolClosure* ArchiveCompactor::_ssc;
1093 ArchiveCompactor::MyTable* ArchiveCompactor::_new_loc_table;
1094
1095 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1096 DumpRegion* dump_region, bool read_only, bool allow_exec) {
1097 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1098 }
1099
1100 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1101 tty->print_cr("Dumping string and symbol tables ...");
1102
1103 NOT_PRODUCT(SymbolTable::verify());
1104 NOT_PRODUCT(StringTable::verify());
1105 SymbolTable::write_to_archive();
1106
1107 // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1108 _string_regions = new GrowableArray<MemRegion>(2);
1109 size_t shared_string_bytes = 0;
1110 StringTable::write_to_archive(_string_regions, &shared_string_bytes);
1111 char* st_base = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
1112 char* st_top = st_base + shared_string_bytes;
1113 _st_region.init(st_base, st_top, st_top);
1114 _st_region.pack();
1115 }
1116
1117 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1118 // Reorder the system dictionary. Moving the symbols affects
1119 // how the hash table indices are calculated.
1120 SystemDictionary::reorder_dictionary_for_sharing();
1121 NOT_PRODUCT(SystemDictionary::verify();)
1122
1123 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1124 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1125 SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1126
1127 size_t table_bytes = SystemDictionary::count_bytes_for_table();
1128 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1129 SystemDictionary::copy_table(table_top, _ro_region.top());
1130
1131 // Write the other data to the output array.
1132 WriteClosure wc(&_ro_region);
1133 MetaspaceShared::serialize(&wc);
1134
1135 return buckets_top;
1136 }
1137
1138 void VM_PopulateDumpSharedSpace::doit() {
1139 Thread* THREAD = VMThread::vm_thread();
1140
1141 NOT_PRODUCT(SystemDictionary::verify();)
1142 // The following guarantee is meant to ensure that no loader constraints
1143 // exist yet, since the constraints table is not shared. This becomes
1144 // more important now that we don't re-initialize vtables/itables for
1145 // shared classes at runtime, where constraints were previously created.
1146 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1147 "loader constraints are not saved");
1148 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1149 "placeholders are not saved");
1150 // Revisit and implement this if we prelink method handle call sites:
1151 guarantee(SystemDictionary::invoke_method_table() == NULL ||
1152 SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1153 "invoke method table is not saved");
1154
1192 ArchiveCompactor::initialize();
1193 ArchiveCompactor::copy_and_compact();
1194
1195 dump_string_and_symbols();
1196 ArchiveCompactor::relocate_well_known_klasses();
1197
1198 char* read_only_tables_start = dump_read_only_tables();
1199 _ro_region.pack(&_md_region);
1200
1201 char* vtbl_list = _md_region.top();
1202 MetaspaceShared::allocate_cpp_vtable_clones();
1203 _md_region.pack(&_od_region);
1204
1205 // Relocate the archived class file data into the od region
1206 relocate_cached_class_file();
1207 _od_region.pack();
1208
1209 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1210 // is just the spaces between the two ends.
1211 size_t core_spaces_size = _od_region.end() - _mc_region.base();
1212 assert(core_spaces_size == (size_t)align_size_up(core_spaces_size, Metaspace::reserve_alignment()),
1213 "should already be aligned");
1214
1215 // Print statistics of all the regions
1216 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1217 _mc_region.reserved() + _md_region.reserved() +
1218 _st_region.reserved() + _od_region.reserved();
1219 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1220 _mc_region.used() + _md_region.used() +
1221 _st_region.used() + _od_region.used();
1222 const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1223
1224 _mc_region.print(total_reserved);
1225 _rw_region.print(total_reserved);
1226 _ro_region.print(total_reserved);
1227 _md_region.print(total_reserved);
1228 _st_region.print(total_reserved);
1229 _od_region.print(total_reserved);
1230
1231 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1232 total_bytes, total_reserved, total_u_perc);
1233
1234 // During patching, some virtual methods may be called, so at this point
1235 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1236 MetaspaceShared::patch_cpp_vtable_pointers();
1237
1238 // The vtable clones contain addresses of the current process.
1239 // We don't want to write these addresses into the archive.
1240 MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1241
1242 // Create and write the archive file that maps the shared spaces.
1243
1244 FileMapInfo* mapinfo = new FileMapInfo();
1245 mapinfo->populate_header(os::vm_allocation_granularity());
1246 mapinfo->set_read_only_tables_start(read_only_tables_start);
1247 mapinfo->set_misc_data_patching_start(vtbl_list);
1248 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1249 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1250 mapinfo->set_core_spaces_size(core_spaces_size);
1251
1252 for (int pass=1; pass<=2; pass++) {
1253 if (pass == 1) {
1254 // The first pass doesn't actually write the data to disk. All it
1255 // does is to update the fields in the mapinfo->_header.
1256 } else {
1257 // After the first pass, the contents of mapinfo->_header are finalized,
1258 // so we can compute the header's CRC, and write the contents of the header
1259 // and the regions into disk.
1260 mapinfo->open_for_write();
1261 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1262 }
1263 mapinfo->write_header();
1264
1265 // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1266 // so it needs to be read/write.
1267 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1268 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1269 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1270 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1271 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1272 mapinfo->write_string_regions(_string_regions);
1273 }
1274
1275 mapinfo->close();
1276
1277 // Restore the vtable in case we invoke any virtual methods.
1278 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1279
1280 if (log_is_enabled(Info, cds)) {
1281 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1282 int(_mc_region.used()), int(_md_region.used()));
1283 }
1284 }
1285
1286 // Update a Java object to point its Klass* to the new location after
1287 // shared archive has been compacted.
1288 void MetaspaceShared::relocate_klass_ptr(oop o) {
1289 assert(DumpSharedSpaces, "sanity");
1290 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1291 o->set_klass(k);
1292 }
1293
1294 class LinkSharedClassesClosure : public KlassClosure {
1295 Thread* THREAD;
1296 bool _made_progress;
1297 public:
1298 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1299
1300 void reset() { _made_progress = false; }
1301 bool made_progress() const { return _made_progress; }
1302
1303 void do_klass(Klass* k) {
1304 if (k->is_instance_klass()) {
1305 InstanceKlass* ik = InstanceKlass::cast(k);
1710
1711 if (UseSharedSpaces) {
1712 // remap the shared readonly space to shared readwrite, private
1713 FileMapInfo* mapinfo = FileMapInfo::current_info();
1714 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1715 return false;
1716 }
1717 _remapped_readwrite = true;
1718 }
1719 return true;
1720 }
1721
1722 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1723 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1724 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1725 // or so.
1726 _mc_region.print_out_of_space_msg(name, needed_bytes);
1727 _rw_region.print_out_of_space_msg(name, needed_bytes);
1728 _ro_region.print_out_of_space_msg(name, needed_bytes);
1729 _md_region.print_out_of_space_msg(name, needed_bytes);
1730 _st_region.print_out_of_space_msg(name, needed_bytes);
1731 _od_region.print_out_of_space_msg(name, needed_bytes);
1732
1733 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1734 "Please reduce the number of shared classes.");
1735 }
|
54 #include "runtime/timerTrace.hpp"
55 #include "runtime/os.hpp"
56 #include "runtime/signature.hpp"
57 #include "runtime/vmThread.hpp"
58 #include "runtime/vm_operations.hpp"
59 #include "utilities/align.hpp"
60 #include "utilities/defaultStream.hpp"
61 #include "utilities/hashtable.inline.hpp"
62 #include "memory/metaspaceClosure.hpp"
63
64 ReservedSpace MetaspaceShared::_shared_rs;
65 VirtualSpace MetaspaceShared::_shared_vs;
66 MetaspaceSharedStats MetaspaceShared::_stats;
67 bool MetaspaceShared::_has_error_classes;
68 bool MetaspaceShared::_archive_loading_failed = false;
69 bool MetaspaceShared::_remapped_readwrite = false;
70 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
71 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
72 size_t MetaspaceShared::_core_spaces_size = 0;
73
74 // The CDS archive is divided into the following regions:
75 // mc - misc code (the method entry trampolines)
76 // rw - read-write metadata
77 // ro - read-only metadata and read-only tables
78 // md - misc data (the c++ vtables)
79 // od - optional data (original class files)
80 //
81 // s0 - shared strings #0
82 // s1 - shared strings #1 (may be empty)
83 //
84 // Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from
85 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
86 // are page-aligned, and there's no gap between any consecutive regions.
87 //
88 // These 5 regions are populated in the following steps:
89 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
90 // temporarily allocated outside of the shared regions. Only the method entry
91 // trampolines are written into the mc region.
92 // [2] ArchiveCompactor copies RW metadata into the rw region.
93 // [3] ArchiveCompactor copies RO metadata into the ro region.
94 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
95 // are copied into the ro region as read-only tables.
96 // [5] C++ vtables are copied into the md region.
97 // [6] Original class files are copied into the od region.
98 //
99 // The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their
100 // layout is independent of the other 5 regions.
101
102 class DumpRegion {
103 private:
104 const char* _name;
105 char* _base;
106 char* _top;
107 char* _end;
108 bool _is_packed;
109
110 char* expand_top_to(char* newtop) {
111 assert(is_allocatable(), "must be initialized and not packed");
112 assert(newtop >= _top, "must not grow backwards");
113 if (newtop > _end) {
114 MetaspaceShared::report_out_of_space(_name, newtop - _top);
115 ShouldNotReachHere();
116 }
117 MetaspaceShared::commit_shared_space_to(newtop);
118 _top = newtop;
119 return _top;
120 }
121
122 public:
123 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
124
125 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
126 char* p = (char*)align_up(_top, alignment);
127 char* newtop = p + align_up(num_bytes, alignment);
128 expand_top_to(newtop);
129 memset(p, 0, newtop - p);
130 return p;
131 }
132
133 void append_intptr_t(intptr_t n) {
134 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
135 intptr_t *p = (intptr_t*)_top;
136 char* newtop = _top + sizeof(intptr_t);
137 expand_top_to(newtop);
138 *p = n;
139 }
140
141 char* base() const { return _base; }
142 char* top() const { return _top; }
143 char* end() const { return _end; }
144 size_t reserved() const { return _end - _base; }
145 size_t used() const { return _top - _base; }
146 bool is_packed() const { return _is_packed; }
147 bool is_allocatable() const {
148 return !is_packed() && _base != NULL;
149 }
150
151 double perc(size_t used, size_t total) const {
152 if (total == 0) {
153 total = 1;
154 }
155 return used / double(total) * 100.0;
156 }
157
158 void print(size_t total_bytes) const {
159 tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
160 _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
161 }
162 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
163 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
164 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
165 if (strcmp(_name, failing_region) == 0) {
166 tty->print_cr(" required = %d", int(needed_bytes));
167 } else {
168 tty->cr();
169 }
170 }
171
172 void init(const ReservedSpace* rs) {
173 _base = _top = rs->base();
174 _end = rs->end();
175 }
176 void init(char* b, char* t, char* e) {
177 _base = b;
178 _top = t;
179 _end = e;
180 }
181
182 void pack(DumpRegion* next = NULL) {
183 assert(!is_packed(), "sanity");
184 _end = (char*)align_up(_top, Metaspace::reserve_alignment());
185 _is_packed = true;
186 if (next != NULL) {
187 next->_base = next->_top = this->_end;
188 next->_end = MetaspaceShared::shared_rs()->end();
189 }
190 }
191 bool contains(char* p) {
192 return base() <= p && p < top();
193 }
194 };
195
196 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
197 DumpRegion _s0_region("s0"), _s1_region("s1");
198
199 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
200 return _mc_region.allocate(num_bytes);
201 }
202
203 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
204 return _ro_region.allocate(num_bytes);
205 }
206
207 void MetaspaceShared::initialize_shared_rs() {
208 const size_t reserve_alignment = Metaspace::reserve_alignment();
209 bool large_pages = false; // No large pages when dumping the CDS archive.
210 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
211
212 #ifdef _LP64
213 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
214 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
215 #else
216 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
217 size_t cds_total = align_down(256*M, reserve_alignment);
218 #endif
219
220 // First try to reserve the space at the specified SharedBaseAddress.
221 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
222 if (_shared_rs.is_reserved()) {
223 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
224 } else {
225 // Get a mmap region anywhere if the SharedBaseAddress fails.
226 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
227 }
228 if (!_shared_rs.is_reserved()) {
229 vm_exit_during_initialization("Unable to reserve memory for shared space",
230 err_msg(SIZE_FORMAT " bytes.", cds_total));
231 }
232
233 #ifdef _LP64
234 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
235 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
236 // will store Klasses into this space.
237 // + The lower 3 GB is used for the archive -- when preload_classes() is done,
238 // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
239 // then the RO parts.
240
241 assert(UseCompressedOops && UseCompressedClassPointers,
242 "UseCompressedOops and UseCompressedClassPointers must be set");
243
244 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
245 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
246 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
247 _shared_rs = _shared_rs.first_part(max_archive_size);
248
249 // Set up compress class pointers.
250 Universe::set_narrow_klass_base((address)_shared_rs.base());
251 if (UseAOT || cds_total > UnscaledClassSpaceMax) {
252 // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
253 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
254 } else {
255 Universe::set_narrow_klass_shift(0);
256 }
257
258 Metaspace::initialize_class_space(tmp_class_space);
259 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
260 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
261
262 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
263 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
264 #endif
265
266 // Start with 0 committed bytes. The memory will be committed as needed by
274 _shared_rs.size(), p2i(_shared_rs.base()));
275 }
276
277 void MetaspaceShared::commit_shared_space_to(char* newtop) {
278 assert(DumpSharedSpaces, "dump-time only");
279 char* base = _shared_rs.base();
280 size_t need_committed_size = newtop - base;
281 size_t has_committed_size = _shared_vs.committed_size();
282 if (need_committed_size < has_committed_size) {
283 return;
284 }
285
286 size_t min_bytes = need_committed_size - has_committed_size;
287 size_t preferred_bytes = 1 * M;
288 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
289
290 size_t commit = MAX2(min_bytes, preferred_bytes);
291 assert(commit <= uncommitted, "sanity");
292
293 bool result = _shared_vs.expand_by(commit, false);
294 if (!result) {
295 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
296 need_committed_size));
297 }
298
299 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
300 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
301 }
302
303 // Read/write a data stream for restoring/preserving metadata pointers and
304 // miscellaneous data from/to the shared archive file.
305
306 void MetaspaceShared::serialize(SerializeClosure* soc) {
307 int tag = 0;
308 soc->do_tag(--tag);
309
310 // Verify the sizes of various metadata in the system.
311 soc->do_tag(sizeof(Method));
312 soc->do_tag(sizeof(ConstMethod));
313 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
314 soc->do_tag(sizeof(ConstantPool));
315 soc->do_tag(sizeof(ConstantPoolCache));
316 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
317 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
318 soc->do_tag(sizeof(Symbol));
319
320 // Dump/restore miscellaneous metadata.
487 assert(DumpSharedSpaces, "dump-time only");
488 _info->zero();
489 }
490
491 // Switch the vtable pointer to point to the cloned vtable.
492 static void patch(Metadata* obj) {
493 assert(DumpSharedSpaces, "dump-time only");
494 *(void**)obj = (void*)(_info->cloned_vtable());
495 }
496
497 static bool is_valid_shared_object(const T* obj) {
498 intptr_t* vptr = *(intptr_t**)obj;
499 return vptr == _info->cloned_vtable();
500 }
501 };
502
503 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
504
505 template <class T>
506 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
507 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
508 int n = get_vtable_length(name);
509 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
510 _info->set_vtable_size(n);
511
512 intptr_t* p = clone_vtable(name, _info);
513 assert((char*)p == _md_region.top(), "must be");
514
515 return p;
516 }
517
518 template <class T>
519 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
520 if (!DumpSharedSpaces) {
521 assert(_info == 0, "_info is initialized only at dump time");
522 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
523 }
524 T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
525 int n = info->vtable_size();
526 intptr_t* srcvtable = vtable_of(tmp);
527 intptr_t* dstvtable = info->cloned_vtable();
725 }
726
727 public:
728 enum { RO = 0, RW = 1 };
729
730 int _counts[2][_number_of_types];
731 int _bytes [2][_number_of_types];
732
733 DumpAllocStats() {
734 memset(_counts, 0, sizeof(_counts));
735 memset(_bytes, 0, sizeof(_bytes));
736 };
737
738 void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
739 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
740 int which = (read_only) ? RO : RW;
741 _counts[which][type] ++;
742 _bytes [which][type] += byte_size;
743 }
744
745 void record_other_type(int byte_size, bool read_only) {
746 int which = (read_only) ? RO : RW;
747 _bytes [which][OtherType] += byte_size;
748 }
749 void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
750 };
751
752 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
753 // Calculate size of data that was not allocated by Metaspace::allocate()
754 MetaspaceSharedStats *stats = MetaspaceShared::stats();
755
756 // symbols
757 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
758 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
759
760 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
761 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
762
763 // strings
764 _counts[RO][StringHashentryType] = stats->string.hashentry_count;
765 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
766
767 _counts[RO][StringBucketType] = stats->string.bucket_count;
768 _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
776 ro_all = 1;
777 }
778 if (rw_all < 1) {
779 rw_all = 1;
780 }
781
782 int all_ro_count = 0;
783 int all_ro_bytes = 0;
784 int all_rw_count = 0;
785 int all_rw_bytes = 0;
786
787 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
788 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
789 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
790 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
791
792 ResourceMark rm;
793 LogMessage(cds) msg;
794 stringStream info_stream;
795
796 info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
797 info_stream.print_cr("%s", hdr);
798 info_stream.print_cr("%s", sep);
799 for (int type = 0; type < int(_number_of_types); type ++) {
800 const char *name = type_name((Type)type);
801 int ro_count = _counts[RO][type];
802 int ro_bytes = _bytes [RO][type];
803 int rw_count = _counts[RW][type];
804 int rw_bytes = _bytes [RW][type];
805 int count = ro_count + rw_count;
806 int bytes = ro_bytes + rw_bytes;
807
808 double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
809 double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
810 double perc = 100.0 * double(bytes) / double(ro_all + rw_all);
811
812 info_stream.print_cr(fmt_stats, name,
813 ro_count, ro_bytes, ro_perc,
814 rw_count, rw_bytes, rw_perc,
815 count, bytes, perc);
816
817 all_ro_count += ro_count;
818 all_ro_bytes += ro_bytes;
819 all_rw_count += rw_count;
820 all_rw_bytes += rw_bytes;
821 }
822
823 int all_count = all_ro_count + all_rw_count;
824 int all_bytes = all_ro_bytes + all_rw_bytes;
825
826 double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
827 double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
828 double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all);
829
830 info_stream.print_cr("%s", sep);
831 info_stream.print_cr(fmt_stats, "Total",
832 all_ro_count, all_ro_bytes, all_ro_perc,
833 all_rw_count, all_rw_bytes, all_rw_perc,
834 all_count, all_bytes, all_perc);
835
836 assert(all_ro_bytes == ro_all, "everything should have been counted");
837 assert(all_rw_bytes == rw_all, "everything should have been counted");
838
839 msg.info("%s", info_stream.as_string());
840 #undef fmt_stats
841 }
842
843 // Populate the shared space.
844
845 class VM_PopulateDumpSharedSpace: public VM_Operation {
846 private:
847 GrowableArray<MemRegion> *_string_regions;
848
849 void dump_string_and_symbols();
850 char* dump_read_only_tables();
851 void print_region_stats();
852 public:
853
854 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
855 void doit(); // outline because gdb sucks
856 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
857 }; // class VM_PopulateDumpSharedSpace
858
859 class SortedSymbolClosure: public SymbolClosure {
860 GrowableArray<Symbol*> _symbols;
861 virtual void do_symbol(Symbol** sym) {
862 assert((*sym)->is_permanent(), "archived symbols must be permanent");
863 _symbols.append(*sym);
864 }
865 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
866 if (a[0] < b[0]) {
867 return -1;
868 } else if (a[0] == b[0]) {
869 return 0;
870 } else {
871 return 1;
885 // ArchiveCompactor --
886 //
887 // This class is the central piece of shared archive compaction -- all metaspace data are
888 // initially allocated outside of the shared regions. ArchiveCompactor copies the
889 // metaspace data into their final location in the shared regions.
890
891 class ArchiveCompactor : AllStatic {
892 static DumpAllocStats* _alloc_stats;
893 static SortedSymbolClosure* _ssc;
894
895 static unsigned my_hash(const address& a) {
896 return primitive_hash<address>(a);
897 }
898 static bool my_equals(const address& a0, const address& a1) {
899 return primitive_equals<address>(a0, a1);
900 }
901 typedef ResourceHashtable<
902 address, address,
903 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address>
904 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
905 16384, ResourceObj::C_HEAP> RelocationTable;
906 static RelocationTable* _new_loc_table;
907
908 public:
909 static void initialize() {
910 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
911 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
912 }
913 static DumpAllocStats* alloc_stats() {
914 return _alloc_stats;
915 }
916
917 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
918 address obj = ref->obj();
919 int bytes = ref->size() * BytesPerWord;
920 char* p;
921 size_t alignment = BytesPerWord;
922 char* oldtop;
923 char* newtop;
924
925 if (read_only) {
926 oldtop = _ro_region.top();
927 p = _ro_region.allocate(bytes, alignment);
928 newtop = _ro_region.top();
929 } else {
930 oldtop = _rw_region.top();
931 p = _rw_region.allocate(bytes, alignment);
932 newtop = _rw_region.top();
933 }
934 memcpy(p, obj, bytes);
935 bool isnew = _new_loc_table->put(obj, (address)p);
936 assert(isnew, "must be");
937 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
938
939 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
940 if (ref->msotype() == MetaspaceObj::SymbolType) {
941 uintx delta = MetaspaceShared::object_delta(p);
942 if (delta > MAX_SHARED_DELTA) {
943 // This is just a sanity check and should not appear in any real world usage. This
944 // happens only if you allocate more than 2GB of Symbols and would require
945 // millions of shared classes.
946 vm_exit_during_initialization("Too many Symbols in the CDS archive",
947 "Please reduce the number of shared classes.");
948 }
949 }
950 }
951
952 static address get_new_loc(MetaspaceClosure::Ref* ref) {
953 address* pp = _new_loc_table->get(ref->obj());
954 assert(pp != NULL, "must be");
955 return *pp;
956 }
957
958 private:
959 // Makes a shallow copy of visited MetaspaceObj's
1044 ResourceMark rm;
1045 RefRelocator ext_reloc;
1046 iterate_roots(&ext_reloc);
1047 }
1048
1049 #ifdef ASSERT
1050 {
1051 tty->print_cr("Verifying external roots ... ");
1052 ResourceMark rm;
1053 IsRefInArchiveChecker checker;
1054 iterate_roots(&checker);
1055 }
1056 #endif
1057
1058
1059 // cleanup
1060 _ssc = NULL;
1061 }
1062
1063 // We must relocate the System::_well_known_klasses only after we have copied the
1064 // strings in during dump_string_and_symbols(): during the string copy, we operate on old
1065 // String objects which assert that their klass is the old
1066 // SystemDictionary::String_klass().
1067 static void relocate_well_known_klasses() {
1068 {
1069 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1070 ResourceMark rm;
1071 RefRelocator ext_reloc;
1072 SystemDictionary::well_known_klasses_do(&ext_reloc);
1073 }
1074 // NOTE: after this point, we shouldn't have any globals that can reach the old
1075 // objects.
1076
1077 // We cannot use any of the objects in the heap anymore (except for the objects
1078 // in the CDS shared string regions) because their headers no longer point to
1079 // valid Klasses.
1080 }
1081
1082 static void iterate_roots(MetaspaceClosure* it) {
1083 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1084 for (int i=0; i<symbols->length(); i++) {
1085 it->push(symbols->adr_at(i));
1086 }
1087 if (_global_klass_objects != NULL) {
1088 // Need to fix up the pointers
1089 for (int i = 0; i < _global_klass_objects->length(); i++) {
1092 }
1093 }
1094 FileMapInfo::metaspace_pointers_do(it);
1095 SystemDictionary::classes_do(it);
1096 Universe::metaspace_pointers_do(it);
1097 SymbolTable::metaspace_pointers_do(it);
1098 vmSymbols::metaspace_pointers_do(it);
1099 }
1100
1101 static Klass* get_relocated_klass(Klass* orig_klass) {
1102 address* pp = _new_loc_table->get((address)orig_klass);
1103 assert(pp != NULL, "must be");
1104 Klass* klass = (Klass*)(*pp);
1105 assert(klass->is_klass(), "must be");
1106 return klass;
1107 }
1108 };
1109
1110 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1111 SortedSymbolClosure* ArchiveCompactor::_ssc;
1112 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1113
1114 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1115 DumpRegion* dump_region, bool read_only, bool allow_exec) {
1116 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1117 }
1118
1119 void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
1120 tty->print_cr("Dumping string and symbol tables ...");
1121
1122 NOT_PRODUCT(SymbolTable::verify());
1123 NOT_PRODUCT(StringTable::verify());
1124 SymbolTable::write_to_archive();
1125
1126 // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
1127 _string_regions = new GrowableArray<MemRegion>(2);
1128 StringTable::write_to_archive(_string_regions);
1129 }
1130
1131 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1132 char* oldtop = _ro_region.top();
1133 // Reorder the system dictionary. Moving the symbols affects
1134 // how the hash table indices are calculated.
1135 SystemDictionary::reorder_dictionary_for_sharing();
1136 NOT_PRODUCT(SystemDictionary::verify();)
1137
1138 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1139 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1140 SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1141
1142 size_t table_bytes = SystemDictionary::count_bytes_for_table();
1143 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1144 SystemDictionary::copy_table(table_top, _ro_region.top());
1145
1146 // Write the other data to the output array.
1147 WriteClosure wc(&_ro_region);
1148 MetaspaceShared::serialize(&wc);
1149
1150 char* newtop = _ro_region.top();
1151 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1152 return buckets_top;
1153 }
1154
1155 void VM_PopulateDumpSharedSpace::doit() {
1156 Thread* THREAD = VMThread::vm_thread();
1157
1158 NOT_PRODUCT(SystemDictionary::verify();)
1159 // The following guarantee is meant to ensure that no loader constraints
1160 // exist yet, since the constraints table is not shared. This becomes
1161 // more important now that we don't re-initialize vtables/itables for
1162 // shared classes at runtime, where constraints were previously created.
1163 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1164 "loader constraints are not saved");
1165 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1166 "placeholders are not saved");
1167 // Revisit and implement this if we prelink method handle call sites:
1168 guarantee(SystemDictionary::invoke_method_table() == NULL ||
1169 SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1170 "invoke method table is not saved");
1171
1209 ArchiveCompactor::initialize();
1210 ArchiveCompactor::copy_and_compact();
1211
1212 dump_string_and_symbols();
1213 ArchiveCompactor::relocate_well_known_klasses();
1214
1215 char* read_only_tables_start = dump_read_only_tables();
1216 _ro_region.pack(&_md_region);
1217
1218 char* vtbl_list = _md_region.top();
1219 MetaspaceShared::allocate_cpp_vtable_clones();
1220 _md_region.pack(&_od_region);
1221
1222 // Relocate the archived class file data into the od region
1223 relocate_cached_class_file();
1224 _od_region.pack();
1225
1226 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1227 // is just the spaces between the two ends.
1228 size_t core_spaces_size = _od_region.end() - _mc_region.base();
1229 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1230 "should already be aligned");
1231
1232 // During patching, some virtual methods may be called, so at this point
1233 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1234 MetaspaceShared::patch_cpp_vtable_pointers();
1235
1236 // The vtable clones contain addresses of the current process.
1237 // We don't want to write these addresses into the archive.
1238 MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1239
1240 // Create and write the archive file that maps the shared spaces.
1241
1242 FileMapInfo* mapinfo = new FileMapInfo();
1243 mapinfo->populate_header(os::vm_allocation_granularity());
1244 mapinfo->set_read_only_tables_start(read_only_tables_start);
1245 mapinfo->set_misc_data_patching_start(vtbl_list);
1246 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1247 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1248 mapinfo->set_core_spaces_size(core_spaces_size);
1249
1250 char* s0_start, *s0_top, *s0_end;
1251 char* s1_start, *s1_top, *s1_end;
1252
1253 for (int pass=1; pass<=2; pass++) {
1254 if (pass == 1) {
1255 // The first pass doesn't actually write the data to disk. All it
1256 // does is to update the fields in the mapinfo->_header.
1257 } else {
1258 // After the first pass, the contents of mapinfo->_header are finalized,
1259 // so we can compute the header's CRC, and write the contents of the header
1260 // and the regions into disk.
1261 mapinfo->open_for_write();
1262 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1263 }
1264 mapinfo->write_header();
1265
1266 // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1267 // so it needs to be read/write.
1268 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1269 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1270 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1271 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1272 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1273
1274 mapinfo->write_string_regions(_string_regions,
1275 &s0_start, &s0_top, &s0_end,
1276 &s1_start, &s1_top, &s1_end);
1277 }
1278
1279 mapinfo->close();
1280
1281 // Restore the vtable in case we invoke any virtual methods.
1282 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1283
1284 _s0_region.init(s0_start, s0_top, s0_end);
1285 _s1_region.init(s1_start, s1_top, s1_end);
1286 print_region_stats();
1287
1288 if (log_is_enabled(Info, cds)) {
1289 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1290 int(_mc_region.used()), int(_md_region.used()));
1291 }
1292 }
1293
1294 void VM_PopulateDumpSharedSpace::print_region_stats() {
1295 // Print statistics of all the regions
1296 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1297 _mc_region.reserved() + _md_region.reserved() +
1298 _od_region.reserved() +
1299 _s0_region.reserved() + _s1_region.reserved();
1300 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1301 _mc_region.used() + _md_region.used() +
1302 _od_region.used() +
1303 _s0_region.used() + _s1_region.used();
1304 const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
1305
1306 _mc_region.print(total_reserved);
1307 _rw_region.print(total_reserved);
1308 _ro_region.print(total_reserved);
1309 _md_region.print(total_reserved);
1310 _od_region.print(total_reserved);
1311 _s0_region.print(total_reserved);
1312 _s1_region.print(total_reserved);
1313
1314 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1315 total_bytes, total_reserved, total_u_perc);
1316 }
1317
1318
1319 // Update a Java object to point its Klass* to the new location after
1320 // shared archive has been compacted.
1321 void MetaspaceShared::relocate_klass_ptr(oop o) {
1322 assert(DumpSharedSpaces, "sanity");
1323 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1324 o->set_klass(k);
1325 }
1326
1327 class LinkSharedClassesClosure : public KlassClosure {
1328 Thread* THREAD;
1329 bool _made_progress;
1330 public:
1331 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1332
1333 void reset() { _made_progress = false; }
1334 bool made_progress() const { return _made_progress; }
1335
1336 void do_klass(Klass* k) {
1337 if (k->is_instance_klass()) {
1338 InstanceKlass* ik = InstanceKlass::cast(k);
1743
1744 if (UseSharedSpaces) {
1745 // remap the shared readonly space to shared readwrite, private
1746 FileMapInfo* mapinfo = FileMapInfo::current_info();
1747 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
1748 return false;
1749 }
1750 _remapped_readwrite = true;
1751 }
1752 return true;
1753 }
1754
1755 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
1756 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1757 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1758 // or so.
1759 _mc_region.print_out_of_space_msg(name, needed_bytes);
1760 _rw_region.print_out_of_space_msg(name, needed_bytes);
1761 _ro_region.print_out_of_space_msg(name, needed_bytes);
1762 _md_region.print_out_of_space_msg(name, needed_bytes);
1763 _od_region.print_out_of_space_msg(name, needed_bytes);
1764
1765 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
1766 "Please reduce the number of shared classes.");
1767 }
|