33 #include "memory/iterator.inline.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "memory/metaspaceShared.inline.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/compressedOops.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/fieldDescriptor.inline.hpp"
41 #include "utilities/bitMap.inline.hpp"
42
43 #if INCLUDE_CDS_JAVA_HEAP
44 address HeapShared::_narrow_oop_base;
45 int HeapShared::_narrow_oop_shift;
46 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
47 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
48
49 // Get the subgraph_info for Klass k. A new subgraph_info is created if
50 // there is no existing one for k. The subgraph_info records the relocated
51 // Klass* of the original k.
52 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
53 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
54 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
55 if (info == NULL) {
56 _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
57 info = _dump_time_subgraph_info_table->get(relocated_k);
58 ++ _dump_time_subgraph_info_table->_count;
59 }
60 return info;
61 }
62
63 // Add an entry field to the current KlassSubGraphInfo.
64 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
65 assert(DumpSharedSpaces, "dump time only");
66 if (_subgraph_entry_fields == NULL) {
67 _subgraph_entry_fields =
68 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
69 }
70 _subgraph_entry_fields->append((juint)static_field_offset);
71 _subgraph_entry_fields->append(CompressedOops::encode(v));
72 }
154 _subgraph_object_klasses =
155 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
156 for (int i = 0; i < num_subgraphs_klasses; i++) {
157 Klass* subgraph_k = subgraph_object_klasses->at(i);
158 if (log_is_enabled(Info, cds, heap)) {
159 ResourceMark rm;
160 log_info(cds, heap)(
161 "Archived object klass %s (%2d) => %s",
162 _k->external_name(), i, subgraph_k->external_name());
163 }
164 _subgraph_object_klasses->at_put(i, subgraph_k);
165 }
166 }
167 }
168
169 struct CopyKlassSubGraphInfoToArchive : StackObj {
170 CompactHashtableWriter* _writer;
171 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
172
173 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
174
175 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
176 ArchivedKlassSubGraphInfoRecord* record =
177 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
178 record->init(&info);
179
180 unsigned int hash = primitive_hash<Klass*>(klass);
181 uintx deltax = MetaspaceShared::object_delta(record);
182 guarantee(deltax <= MAX_SHARED_DELTA, "must not be");
183 u4 delta = u4(deltax);
184 _writer->add(hash, delta);
185 }
186 return true; // keep on iterating
187 }
188 };
189
190 // Build the records of archived subgraph infos, which include:
191 // - Entry points to all subgraphs from the containing class mirror. The entry
192 // points are static fields in the mirror. For each entry point, the field
193 // offset and value are recorded in the sub-graph info. The value are stored
194 // back to the corresponding field at runtime.
195 // - A list of klasses that need to be loaded/initialized before archived
196 // java object sub-graph can be accessed at runtime.
197 void HeapShared::create_hashtables() {
198 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
199 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
200 CompactHashtableStats stats;
201
202 _run_time_subgraph_info_table.reset();
203
204 int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count);
205 CompactHashtableWriter writer(num_buckets, &stats);
206 CopyKlassSubGraphInfoToArchive copy(&writer);
207 _dump_time_subgraph_info_table->iterate(©);
208
209 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
210 }
211
212 // Read/write the headers of the hashtable(s) so they can be accessed quickly at runtime.
213 void HeapShared::serialize_hashtables(SerializeClosure* soc) {
214 _run_time_subgraph_info_table.serialize(soc);
215 }
216
217 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
218 if (!MetaspaceShared::open_archive_heap_region_mapped()) {
219 return; // nothing to do
220 }
221
222 unsigned int hash = primitive_hash<Klass*>(k);
223 ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
224
225 // Initialize from archived data. Currently this is done only
226 // during VM initialization time. No lock is needed.
227 if (record != NULL) {
228 Thread* THREAD = Thread::current();
229 if (log_is_enabled(Info, cds, heap)) {
230 ResourceMark rm;
231 log_info(cds, heap)("initialize_from_archived_subgraph %p %s", k,
232 k->external_name());
233 }
234
235 int i;
236 // Load/link/initialize the klasses of the objects in the subgraph.
237 // NULL class loader is used.
238 Array<Klass*>* klasses = record->subgraph_object_klasses();
239 if (klasses != NULL) {
240 for (i = 0; i < klasses->length(); i++) {
241 Klass* obj_k = klasses->at(i);
242 Klass* resolved_k = SystemDictionary::resolve_or_null(
243 (obj_k)->name(), THREAD);
244 if (resolved_k != obj_k) {
245 return;
246 }
247 if ((obj_k)->is_instance_klass()) {
248 InstanceKlass* ik = InstanceKlass::cast(obj_k);
249 ik->initialize(THREAD);
250 } else if ((obj_k)->is_objArray_klass()) {
251 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
262 return;
263 }
264
265 // Load the subgraph entry fields from the record and store them back to
266 // the corresponding fields within the mirror.
267 oop m = k->java_mirror();
268 Array<juint>* entry_field_records = record->entry_field_records();
269 if (entry_field_records != NULL) {
270 int efr_len = entry_field_records->length();
271 assert(efr_len % 2 == 0, "sanity");
272 for (i = 0; i < efr_len;) {
273 int field_offset = entry_field_records->at(i);
274 // The object refereced by the field becomes 'known' by GC from this
275 // point. All objects in the subgraph reachable from the object are
276 // also 'known' by GC.
277 oop v = MetaspaceShared::materialize_archived_object(
278 entry_field_records->at(i+1));
279 m->obj_field_put(field_offset, v);
280 i += 2;
281
282 log_debug(cds, heap)(" %p init field @ %2d = %p", k, field_offset, (address)v);
283 }
284
285 // Done. Java code can see the archived sub-graphs referenced from k's
286 // mirror after this point.
287 }
288 }
289 }
290
291 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
292 int _level;
293 bool _record_klasses_only;
294 KlassSubGraphInfo* _subgraph_info;
295 oop _orig_referencing_obj;
296 oop _archived_referencing_obj;
297 Thread* _thread;
298 public:
299 WalkOopAndArchiveClosure(int level, bool record_klasses_only,
300 KlassSubGraphInfo* subgraph_info,
301 oop orig, oop archived, TRAPS) :
302 _level(level), _record_klasses_only(record_klasses_only),
|
33 #include "memory/iterator.inline.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "memory/metaspaceShared.inline.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/compressedOops.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/fieldDescriptor.inline.hpp"
41 #include "utilities/bitMap.inline.hpp"
42
43 #if INCLUDE_CDS_JAVA_HEAP
44 address HeapShared::_narrow_oop_base;
45 int HeapShared::_narrow_oop_shift;
46 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
47 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
48
49 // Get the subgraph_info for Klass k. A new subgraph_info is created if
50 // there is no existing one for k. The subgraph_info records the relocated
51 // Klass* of the original k.
52 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
53 assert(DumpSharedSpaces, "dump time only");
54 Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
55 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
56 if (info == NULL) {
57 _dump_time_subgraph_info_table->put(relocated_k, KlassSubGraphInfo(relocated_k));
58 info = _dump_time_subgraph_info_table->get(relocated_k);
59 ++ _dump_time_subgraph_info_table->_count;
60 }
61 return info;
62 }
63
64 // Add an entry field to the current KlassSubGraphInfo.
65 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
66 assert(DumpSharedSpaces, "dump time only");
67 if (_subgraph_entry_fields == NULL) {
68 _subgraph_entry_fields =
69 new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
70 }
71 _subgraph_entry_fields->append((juint)static_field_offset);
72 _subgraph_entry_fields->append(CompressedOops::encode(v));
73 }
155 _subgraph_object_klasses =
156 MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
157 for (int i = 0; i < num_subgraphs_klasses; i++) {
158 Klass* subgraph_k = subgraph_object_klasses->at(i);
159 if (log_is_enabled(Info, cds, heap)) {
160 ResourceMark rm;
161 log_info(cds, heap)(
162 "Archived object klass %s (%2d) => %s",
163 _k->external_name(), i, subgraph_k->external_name());
164 }
165 _subgraph_object_klasses->at_put(i, subgraph_k);
166 }
167 }
168 }
169
170 struct CopyKlassSubGraphInfoToArchive : StackObj {
171 CompactHashtableWriter* _writer;
172 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
173
174 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
175 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
176 ArchivedKlassSubGraphInfoRecord* record =
177 (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
178 record->init(&info);
179
180 unsigned int hash = primitive_hash<Klass*>(klass);
181 uintx deltax = MetaspaceShared::object_delta(record);
182 guarantee(deltax <= MAX_SHARED_DELTA, "must not be");
183 u4 delta = u4(deltax);
184 _writer->add(hash, delta);
185 }
186 return true; // keep on iterating
187 }
188 };
189
190 // Build the records of archived subgraph infos, which include:
191 // - Entry points to all subgraphs from the containing class mirror. The entry
192 // points are static fields in the mirror. For each entry point, the field
193 // offset and value are recorded in the sub-graph info. The value are stored
194 // back to the corresponding field at runtime.
195 // - A list of klasses that need to be loaded/initialized before archived
196 // java object sub-graph can be accessed at runtime.
197 void HeapShared::write_subgraph_info_table() {
198 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
199 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
200 CompactHashtableStats stats;
201
202 _run_time_subgraph_info_table.reset();
203
204 int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count);
205 CompactHashtableWriter writer(num_buckets, &stats);
206 CopyKlassSubGraphInfoToArchive copy(&writer);
207 _dump_time_subgraph_info_table->iterate(©);
208
209 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
210 }
211
212 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
213 _run_time_subgraph_info_table.serialize_header(soc);
214 }
215
216 void HeapShared::initialize_from_archived_subgraph(Klass* k) {
217 if (!MetaspaceShared::open_archive_heap_region_mapped()) {
218 return; // nothing to do
219 }
220 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
221
222 unsigned int hash = primitive_hash<Klass*>(k);
223 ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
224
225 // Initialize from archived data. Currently this is done only
226 // during VM initialization time. No lock is needed.
227 if (record != NULL) {
228 Thread* THREAD = Thread::current();
229 if (log_is_enabled(Info, cds, heap)) {
230 ResourceMark rm;
231 log_info(cds, heap)("initialize_from_archived_subgraph " PTR_FORMAT " %s", p2i(k),
232 k->external_name());
233 }
234
235 int i;
236 // Load/link/initialize the klasses of the objects in the subgraph.
237 // NULL class loader is used.
238 Array<Klass*>* klasses = record->subgraph_object_klasses();
239 if (klasses != NULL) {
240 for (i = 0; i < klasses->length(); i++) {
241 Klass* obj_k = klasses->at(i);
242 Klass* resolved_k = SystemDictionary::resolve_or_null(
243 (obj_k)->name(), THREAD);
244 if (resolved_k != obj_k) {
245 return;
246 }
247 if ((obj_k)->is_instance_klass()) {
248 InstanceKlass* ik = InstanceKlass::cast(obj_k);
249 ik->initialize(THREAD);
250 } else if ((obj_k)->is_objArray_klass()) {
251 ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
262 return;
263 }
264
265 // Load the subgraph entry fields from the record and store them back to
266 // the corresponding fields within the mirror.
267 oop m = k->java_mirror();
268 Array<juint>* entry_field_records = record->entry_field_records();
269 if (entry_field_records != NULL) {
270 int efr_len = entry_field_records->length();
271 assert(efr_len % 2 == 0, "sanity");
272 for (i = 0; i < efr_len;) {
273 int field_offset = entry_field_records->at(i);
274 // The object refereced by the field becomes 'known' by GC from this
275 // point. All objects in the subgraph reachable from the object are
276 // also 'known' by GC.
277 oop v = MetaspaceShared::materialize_archived_object(
278 entry_field_records->at(i+1));
279 m->obj_field_put(field_offset, v);
280 i += 2;
281
282 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
283 }
284
285 // Done. Java code can see the archived sub-graphs referenced from k's
286 // mirror after this point.
287 }
288 }
289 }
290
291 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
292 int _level;
293 bool _record_klasses_only;
294 KlassSubGraphInfo* _subgraph_info;
295 oop _orig_referencing_obj;
296 oop _archived_referencing_obj;
297 Thread* _thread;
298 public:
299 WalkOopAndArchiveClosure(int level, bool record_klasses_only,
300 KlassSubGraphInfo* subgraph_info,
301 oop orig, oop archived, TRAPS) :
302 _level(level), _record_klasses_only(record_klasses_only),
|