18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/os.hpp"
39 #include "utilities/globalDefinitions.hpp"
40 #include "utilities/macros.hpp"
41 #include "utilities/stack.inline.hpp"
42
43 // HeapInspection
44
45 inline KlassInfoEntry::~KlassInfoEntry() {
46 if (_subclasses != NULL) {
47 delete _subclasses;
48 }
49 }
50
51 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
52 if (_subclasses == NULL) {
53 _subclasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
54 }
55 _subclasses->append(cie);
56 }
57
220 elt->set_count(elt->count() + 1);
221 elt->set_words(elt->words() + obj->size());
222 _size_of_instances_in_words += obj->size();
223 return true;
224 } else {
225 return false;
226 }
227 }
228
229 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
230 assert(_buckets != NULL, "Allocation failure should have been caught");
231 for (int index = 0; index < _num_buckets; index++) {
232 _buckets[index].iterate(cic);
233 }
234 }
235
236 size_t KlassInfoTable::size_of_instances_in_words() const {
237 return _size_of_instances_in_words;
238 }
239
240 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
241 return (*e1)->compare(*e1,*e2);
242 }
243
244 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
245 _cit(cit) {
246 _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
247 }
248
249 KlassInfoHisto::~KlassInfoHisto() {
250 delete _elements;
251 }
252
253 void KlassInfoHisto::add(KlassInfoEntry* cie) {
254 elements()->append(cie);
255 }
256
257 void KlassInfoHisto::sort() {
258 elements()->sort(KlassInfoHisto::sort_helper);
259 }
465 void KlassInfoHisto::print_histo_on(outputStream* st) {
466 st->print_cr(" num #instances #bytes class name (module)");
467 st->print_cr("-------------------------------------------------------");
468 print_elements(st);
469 }
470
471 class HistoClosure : public KlassInfoClosure {
472 private:
473 KlassInfoHisto* _cih;
474 public:
475 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
476
477 void do_cinfo(KlassInfoEntry* cie) {
478 _cih->add(cie);
479 }
480 };
481
482 class RecordInstanceClosure : public ObjectClosure {
483 private:
484 KlassInfoTable* _cit;
485 size_t _missed_count;
486 BoolObjectClosure* _filter;
487 public:
488 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
489 _cit(cit), _missed_count(0), _filter(filter) {}
490
491 void do_object(oop obj) {
492 if (should_visit(obj)) {
493 if (!_cit->record_instance(obj)) {
494 _missed_count++;
495 }
496 }
497 }
498
499 size_t missed_count() { return _missed_count; }
500
501 private:
502 bool should_visit(oop obj) {
503 return _filter == NULL || _filter->do_object_b(obj);
504 }
505 };
506
507 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
508 ResourceMark rm;
509
510 RecordInstanceClosure ric(cit, filter);
511 Universe::heap()->object_iterate(&ric);
512 return ric.missed_count();
513 }
514
515 void HeapInspection::heap_inspection(outputStream* st) {
516 ResourceMark rm;
517
518 KlassInfoTable cit(false);
519 if (!cit.allocation_failed()) {
520 // populate table with object allocation info
521 size_t missed_count = populate_table(&cit);
522 if (missed_count != 0) {
523 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
524 " total instances in data below",
525 missed_count);
526 }
527
528 // Sort and print klass instance info
529 KlassInfoHisto histo(&cit);
530 HistoClosure hc(&histo);
531
532 cit.iterate(&hc);
533
534 histo.sort();
535 histo.print_histo_on(st);
536 } else {
537 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
538 }
539 st->flush();
540 }
541
542 class FindInstanceClosure : public ObjectClosure {
543 private:
544 Klass* _klass;
545 GrowableArray<oop>* _result;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/os.hpp"
40 #include "utilities/globalDefinitions.hpp"
41 #include "utilities/macros.hpp"
42 #include "utilities/stack.inline.hpp"
43
44 // HeapInspection
45
46 inline KlassInfoEntry::~KlassInfoEntry() {
47 if (_subclasses != NULL) {
48 delete _subclasses;
49 }
50 }
51
52 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
53 if (_subclasses == NULL) {
54 _subclasses = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
55 }
56 _subclasses->append(cie);
57 }
58
221 elt->set_count(elt->count() + 1);
222 elt->set_words(elt->words() + obj->size());
223 _size_of_instances_in_words += obj->size();
224 return true;
225 } else {
226 return false;
227 }
228 }
229
230 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
231 assert(_buckets != NULL, "Allocation failure should have been caught");
232 for (int index = 0; index < _num_buckets; index++) {
233 _buckets[index].iterate(cic);
234 }
235 }
236
237 size_t KlassInfoTable::size_of_instances_in_words() const {
238 return _size_of_instances_in_words;
239 }
240
241 // Return false if the entry could not be recorded on account
242 // of running out of space required to create a new entry.
243 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
244 Klass* k = cie->klass();
245 KlassInfoEntry* elt = lookup(k);
246 // elt may be NULL if it's a new klass for which we
247 // could not allocate space for a new entry in the hashtable.
248 if (elt != NULL) {
249 elt->set_count(elt->count() + cie->count());
250 elt->set_words(elt->words() + cie->words());
251 _size_of_instances_in_words += cie->words();
252 return true;
253 }
254
255 return false;
256 }
257
258 class KlassInfoTableMergeClosure : public KlassInfoClosure {
259 private:
260 KlassInfoTable* _dest;
261 bool _success;
262 public:
263 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
264 void do_cinfo(KlassInfoEntry* cie) {
265 _success &= _dest->merge_entry(cie);
266 }
267 bool success() { return _success; }
268 };
269
270 // merge from table
271 bool KlassInfoTable::merge(KlassInfoTable* table) {
272 KlassInfoTableMergeClosure closure(this);
273 table->iterate(&closure);
274 return closure.success();
275 }
276
277 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
278 return (*e1)->compare(*e1,*e2);
279 }
280
281 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
282 _cit(cit) {
283 _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
284 }
285
286 KlassInfoHisto::~KlassInfoHisto() {
287 delete _elements;
288 }
289
290 void KlassInfoHisto::add(KlassInfoEntry* cie) {
291 elements()->append(cie);
292 }
293
294 void KlassInfoHisto::sort() {
295 elements()->sort(KlassInfoHisto::sort_helper);
296 }
502 void KlassInfoHisto::print_histo_on(outputStream* st) {
503 st->print_cr(" num #instances #bytes class name (module)");
504 st->print_cr("-------------------------------------------------------");
505 print_elements(st);
506 }
507
508 class HistoClosure : public KlassInfoClosure {
509 private:
510 KlassInfoHisto* _cih;
511 public:
512 HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
513
514 void do_cinfo(KlassInfoEntry* cie) {
515 _cih->add(cie);
516 }
517 };
518
519 class RecordInstanceClosure : public ObjectClosure {
520 private:
521 KlassInfoTable* _cit;
522 uint _missed_count;
523 BoolObjectClosure* _filter;
524 public:
525 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
526 _cit(cit), _missed_count(0), _filter(filter) {}
527
528 void do_object(oop obj) {
529 if (should_visit(obj)) {
530 if (!_cit->record_instance(obj)) {
531 _missed_count++;
532 }
533 }
534 }
535
536 uint missed_count() { return _missed_count; }
537
538 private:
539 bool should_visit(oop obj) {
540 return _filter == NULL || _filter->do_object_b(obj);
541 }
542 };
543
544 // Heap inspection for every worker.
545 // When native OOM hanppens for KlassInfoTable, set _success to false.
546 void ParHeapInspectTask::work(uint worker_id) {
547 uint missed_count = 0;
548 bool merge_success = true;
549 if (!Atomic::load(&_success)) {
550 // other worker has failed on parallel iteration.
551 return;
552 }
553
554 KlassInfoTable cit(false);
555 if (cit.allocation_failed()) {
556 // fail to allocate memory, stop parallel mode
557 Atomic::store(&_success, false);
558 return;
559 }
560 RecordInstanceClosure ric(&cit, _filter);
561 _poi->object_iterate(&ric, worker_id);
562 missed_count = ric.missed_count();
563 {
564 MutexLocker x(&_mutex);
565 merge_success = _shared_cit->merge(&cit);
566 }
567 if (merge_success) {
568 Atomic::add(&_missed_count, missed_count);
569 } else {
570 Atomic::store(&_success, false);
571 return;
572 }
573 }
574
575 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
576
577 // Try parallel first.
578 if (parallel_thread_num > 1) {
579 ResourceMark rm;
580 ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
581 if (poi != NULL) {
582 ParHeapInspectTask task(poi, cit, filter);
583 Universe::heap()->run_task(&task);
584 delete poi;
585 if (task.success()) {
586 return task.missed_count();
587 }
588 }
589 }
590
591 ResourceMark rm;
592 // If no parallel iteration available, run serially.
593 RecordInstanceClosure ric(cit, filter);
594 Universe::heap()->object_iterate(&ric);
595 return ric.missed_count();
596 }
597
598 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
599 ResourceMark rm;
600
601 KlassInfoTable cit(false);
602 if (!cit.allocation_failed()) {
603 // populate table with object allocation info
604 uint missed_count = populate_table(&cit, NULL, parallel_thread_num);
605 if (missed_count != 0) {
606 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
607 " total instances in data below",
608 (uintx)missed_count);
609 }
610
611 // Sort and print klass instance info
612 KlassInfoHisto histo(&cit);
613 HistoClosure hc(&histo);
614
615 cit.iterate(&hc);
616
617 histo.sort();
618 histo.print_histo_on(st);
619 } else {
620 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
621 }
622 st->flush();
623 }
624
625 class FindInstanceClosure : public ObjectClosure {
626 private:
627 Klass* _klass;
628 GrowableArray<oop>* _result;
|