18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/os.hpp"
39 #include "utilities/globalDefinitions.hpp"
40 #include "utilities/macros.hpp"
41 #include "utilities/stack.inline.hpp"
42
43 // HeapInspection
44
45 inline KlassInfoEntry::~KlassInfoEntry() {
46 if (_subclasses != NULL) {
47 delete _subclasses;
48 }
49 }
50
51 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
52 if (_subclasses == NULL) {
53 _subclasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
54 }
55 _subclasses->append(cie);
56 }
57
220 elt->set_count(elt->count() + 1);
221 elt->set_words(elt->words() + obj->size());
222 _size_of_instances_in_words += obj->size();
223 return true;
224 } else {
225 return false;
226 }
227 }
228
229 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
230 assert(_buckets != NULL, "Allocation failure should have been caught");
231 for (int index = 0; index < _num_buckets; index++) {
232 _buckets[index].iterate(cic);
233 }
234 }
235
236 size_t KlassInfoTable::size_of_instances_in_words() const {
237 return _size_of_instances_in_words;
238 }
239
240 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
241 return (*e1)->compare(*e1,*e2);
242 }
243
244 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
245 _cit(cit) {
246 _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
247 }
248
249 KlassInfoHisto::~KlassInfoHisto() {
250 delete _elements;
251 }
252
253 void KlassInfoHisto::add(KlassInfoEntry* cie) {
254 elements()->append(cie);
255 }
256
257 void KlassInfoHisto::sort() {
258 elements()->sort(KlassInfoHisto::sort_helper);
259 }
465 void KlassInfoHisto::print_histo_on(outputStream* st) {
466 st->print_cr(" num #instances #bytes class name (module)");
467 st->print_cr("-------------------------------------------------------");
468 print_elements(st);
469 }
470
471 class HistoClosure : public KlassInfoClosure {
472 private:
473 KlassInfoHisto* _cih;
474 public:
475 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
476
477 void do_cinfo(KlassInfoEntry* cie) {
478 _cih->add(cie);
479 }
480 };
481
482 class RecordInstanceClosure : public ObjectClosure {
483 private:
484 KlassInfoTable* _cit;
485 size_t _missed_count;
486 BoolObjectClosure* _filter;
487 public:
488 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
489 _cit(cit), _missed_count(0), _filter(filter) {}
490
491 void do_object(oop obj) {
492 if (should_visit(obj)) {
493 if (!_cit->record_instance(obj)) {
494 _missed_count++;
495 }
496 }
497 }
498
499 size_t missed_count() { return _missed_count; }
500
501 private:
502 bool should_visit(oop obj) {
503 return _filter == NULL || _filter->do_object_b(obj);
504 }
505 };
506
507 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
508 ResourceMark rm;
509
510 RecordInstanceClosure ric(cit, filter);
511 Universe::heap()->object_iterate(&ric);
512 return ric.missed_count();
513 }
514
515 void HeapInspection::heap_inspection(outputStream* st) {
516 ResourceMark rm;
517
518 KlassInfoTable cit(false);
519 if (!cit.allocation_failed()) {
520 // populate table with object allocation info
521 size_t missed_count = populate_table(&cit);
522 if (missed_count != 0) {
523 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
524 " total instances in data below",
525 missed_count);
526 }
527
528 // Sort and print klass instance info
529 KlassInfoHisto histo(&cit);
530 HistoClosure hc(&histo);
531
532 cit.iterate(&hc);
533
534 histo.sort();
535 histo.print_histo_on(st);
536 } else {
537 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
538 }
539 st->flush();
540 }
541
542 class FindInstanceClosure : public ObjectClosure {
543 private:
544 Klass* _klass;
545 GrowableArray<oop>* _result;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/os.hpp"
40 #include "utilities/globalDefinitions.hpp"
41 #include "utilities/macros.hpp"
42 #include "utilities/stack.inline.hpp"
43
44 // HeapInspection
45
46 inline KlassInfoEntry::~KlassInfoEntry() {
47 if (_subclasses != NULL) {
48 delete _subclasses;
49 }
50 }
51
52 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
53 if (_subclasses == NULL) {
54 _subclasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
55 }
56 _subclasses->append(cie);
57 }
58
221 elt->set_count(elt->count() + 1);
222 elt->set_words(elt->words() + obj->size());
223 _size_of_instances_in_words += obj->size();
224 return true;
225 } else {
226 return false;
227 }
228 }
229
230 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
231 assert(_buckets != NULL, "Allocation failure should have been caught");
232 for (int index = 0; index < _num_buckets; index++) {
233 _buckets[index].iterate(cic);
234 }
235 }
236
237 size_t KlassInfoTable::size_of_instances_in_words() const {
238 return _size_of_instances_in_words;
239 }
240
241 // Return false if the entry could not be recorded on account
242 // of running out of space required to create a new entry.
243 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
244 Klass* k = cie->klass();
245 KlassInfoEntry* elt = lookup(k);
246 // elt may be NULL if it's a new klass for which we
247 // could not allocate space for a new entry in the hashtable.
248 if (elt != NULL) {
249 elt->set_count(elt->count() + cie->count());
250 elt->set_words(elt->words() + cie->words());
251 _size_of_instances_in_words += cie->words();
252 return true;
253 }
254 return false;
255 }
256
257 class KlassInfoTableMergeClosure : public KlassInfoClosure {
258 private:
259 KlassInfoTable* _dest;
260 bool _success;
261 public:
262 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
263 void do_cinfo(KlassInfoEntry* cie) {
264 _success &= _dest->merge_entry(cie);
265 }
266 bool success() { return _success; }
267 };
268
269 // merge from table
270 bool KlassInfoTable::merge(KlassInfoTable* table) {
271 KlassInfoTableMergeClosure closure(this);
272 table->iterate(&closure);
273 return closure.success();
274 }
275
276 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
277 return (*e1)->compare(*e1,*e2);
278 }
279
280 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
281 _cit(cit) {
282 _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
283 }
284
285 KlassInfoHisto::~KlassInfoHisto() {
286 delete _elements;
287 }
288
289 void KlassInfoHisto::add(KlassInfoEntry* cie) {
290 elements()->append(cie);
291 }
292
293 void KlassInfoHisto::sort() {
294 elements()->sort(KlassInfoHisto::sort_helper);
295 }
501 void KlassInfoHisto::print_histo_on(outputStream* st) {
502 st->print_cr(" num #instances #bytes class name (module)");
503 st->print_cr("-------------------------------------------------------");
504 print_elements(st);
505 }
506
507 class HistoClosure : public KlassInfoClosure {
508 private:
509 KlassInfoHisto* _cih;
510 public:
511 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
512
513 void do_cinfo(KlassInfoEntry* cie) {
514 _cih->add(cie);
515 }
516 };
517
518 class RecordInstanceClosure : public ObjectClosure {
519 private:
520 KlassInfoTable* _cit;
521 uint _missed_count;
522 BoolObjectClosure* _filter;
523 public:
524 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
525 _cit(cit), _missed_count(0), _filter(filter) {}
526
527 void do_object(oop obj) {
528 if (should_visit(obj)) {
529 if (!_cit->record_instance(obj)) {
530 _missed_count++;
531 }
532 }
533 }
534
535 uint missed_count() { return _missed_count; }
536
537 private:
538 bool should_visit(oop obj) {
539 return _filter == NULL || _filter->do_object_b(obj);
540 }
541 };
542
543 // Heap inspection for every worker.
544 // When native OOM hanppens for KlassInfoTable, set _success to false.
545 void ParHeapInspectTask::work(uint worker_id) {
546 uint missed_count = 0;
547 bool merge_success = true;
548 if (!Atomic::load(&_success)) {
549 // other worker has failed on parallel iteration.
550 return;
551 }
552
553 KlassInfoTable cit(false);
554 if (cit.allocation_failed()) {
555 // fail to allocate memory, stop parallel mode
556 Atomic::store(&_success, false);
557 return;
558 }
559 RecordInstanceClosure ric(&cit, _filter);
560 _poi->object_iterate(&ric, worker_id);
561 missed_count = ric.missed_count();
562 {
563 MutexLocker x(&_mutex);
564 merge_success = _shared_cit->merge(&cit);
565 }
566 if (merge_success) {
567 Atomic::add(&_missed_count, missed_count);
568 } else {
569 Atomic::store(&_success, false);
570 }
571 }
572
573 uint HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
574
575 // Try parallel first.
576 if (parallel_thread_num > 1) {
577 ResourceMark rm;
578 ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
579 if (poi != NULL) {
580 ParHeapInspectTask task(poi, cit, filter);
581 Universe::heap()->run_task(&task);
582 delete poi;
583 if (task.success()) {
584 return task.missed_count();
585 }
586 }
587 }
588
589 ResourceMark rm;
590 // If no parallel iteration available, run serially.
591 RecordInstanceClosure ric(cit, filter);
592 Universe::heap()->object_iterate(&ric);
593 return ric.missed_count();
594 }
595
596 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
597 ResourceMark rm;
598
599 KlassInfoTable cit(false);
600 if (!cit.allocation_failed()) {
601 // populate table with object allocation info
602 uint missed_count = populate_table(&cit, NULL, parallel_thread_num);
603 if (missed_count != 0) {
604 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
605 " total instances in data below",
606 (uintx)missed_count);
607 }
608
609 // Sort and print klass instance info
610 KlassInfoHisto histo(&cit);
611 HistoClosure hc(&histo);
612
613 cit.iterate(&hc);
614
615 histo.sort();
616 histo.print_histo_on(st);
617 } else {
618 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
619 }
620 st->flush();
621 }
622
623 class FindInstanceClosure : public ObjectClosure {
624 private:
625 Klass* _klass;
626 GrowableArray<oop>* _result;
|