18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/os.hpp"
39 #include "utilities/globalDefinitions.hpp"
40 #include "utilities/macros.hpp"
41 #include "utilities/stack.inline.hpp"
42
43 // HeapInspection
44
45 inline KlassInfoEntry::~KlassInfoEntry() {
46 if (_subclasses != NULL) {
47 delete _subclasses;
48 }
49 }
50
51 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
52 if (_subclasses == NULL) {
53 _subclasses = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(4, true);
54 }
55 _subclasses->append(cie);
56 }
57
220 elt->set_count(elt->count() + 1);
221 elt->set_words(elt->words() + obj->size());
222 _size_of_instances_in_words += obj->size();
223 return true;
224 } else {
225 return false;
226 }
227 }
228
229 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
230 assert(_buckets != NULL, "Allocation failure should have been caught");
231 for (int index = 0; index < _num_buckets; index++) {
232 _buckets[index].iterate(cic);
233 }
234 }
235
236 size_t KlassInfoTable::size_of_instances_in_words() const {
237 return _size_of_instances_in_words;
238 }
239
240 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
241 return (*e1)->compare(*e1,*e2);
242 }
243
244 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
245 _cit(cit) {
246 _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
247 }
248
249 KlassInfoHisto::~KlassInfoHisto() {
250 delete _elements;
251 }
252
253 void KlassInfoHisto::add(KlassInfoEntry* cie) {
254 elements()->append(cie);
255 }
256
257 void KlassInfoHisto::sort() {
258 elements()->sort(KlassInfoHisto::sort_helper);
259 }
487 public:
488 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
489 _cit(cit), _missed_count(0), _filter(filter) {}
490
491 void do_object(oop obj) {
492 if (should_visit(obj)) {
493 if (!_cit->record_instance(obj)) {
494 _missed_count++;
495 }
496 }
497 }
498
499 size_t missed_count() { return _missed_count; }
500
501 private:
502 bool should_visit(oop obj) {
503 return _filter == NULL || _filter->do_object_b(obj);
504 }
505 };
506
507 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
508 ResourceMark rm;
509
510 RecordInstanceClosure ric(cit, filter);
511 Universe::heap()->object_iterate(&ric);
512 return ric.missed_count();
513 }
514
515 void HeapInspection::heap_inspection(outputStream* st) {
516 ResourceMark rm;
517
518 KlassInfoTable cit(false);
519 if (!cit.allocation_failed()) {
520 // populate table with object allocation info
521 size_t missed_count = populate_table(&cit);
522 if (missed_count != 0) {
523 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
524 " total instances in data below",
525 missed_count);
526 }
527
528 // Sort and print klass instance info
529 KlassInfoHisto histo(&cit);
530 HistoClosure hc(&histo);
531
532 cit.iterate(&hc);
533
534 histo.sort();
535 histo.print_histo_on(st);
536 } else {
537 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
538 }
539 st->flush();
540 }
541
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.inline.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "classfile/moduleEntry.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/heapInspection.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/reflectionAccessorImplKlassHelper.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/os.hpp"
40 #include "utilities/globalDefinitions.hpp"
41 #include "utilities/macros.hpp"
42 #include "utilities/stack.inline.hpp"
43
44 // HeapInspection
45
46 inline KlassInfoEntry::~KlassInfoEntry() {
47 if (_subclasses != NULL) {
48 delete _subclasses;
49 }
50 }
51
52 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
53 if (_subclasses == NULL) {
54 _subclasses = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(4, true);
55 }
56 _subclasses->append(cie);
57 }
58
221 elt->set_count(elt->count() + 1);
222 elt->set_words(elt->words() + obj->size());
223 _size_of_instances_in_words += obj->size();
224 return true;
225 } else {
226 return false;
227 }
228 }
229
230 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
231 assert(_buckets != NULL, "Allocation failure should have been caught");
232 for (int index = 0; index < _num_buckets; index++) {
233 _buckets[index].iterate(cic);
234 }
235 }
236
237 size_t KlassInfoTable::size_of_instances_in_words() const {
238 return _size_of_instances_in_words;
239 }
240
241 // Return false if the entry could not be recorded on account
242 // of running out of space required to create a new entry.
243 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
244 Klass* k = cie->klass();
245 KlassInfoEntry* elt = lookup(k);
246 // elt may be NULL if it's a new klass for which we
247 // could not allocate space for a new entry in the hashtable.
248 if (elt != NULL) {
249 elt->set_count(elt->count() + cie->count());
250 elt->set_words(elt->words() + cie->words());
251 _size_of_instances_in_words += cie->words();
252 return true;
253 } else {
254 return false;
255 }
256 }
257
258 class KlassInfoTableMergeClosure : public KlassInfoClosure {
259 private:
260 KlassInfoTable* _dest;
261 bool _success;
262 public:
263 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
264 void do_cinfo(KlassInfoEntry* cie) {
265 _success &= _dest->merge_entry(cie);
266 }
267 bool is_success() { return _success; }
268 };
269
270 // merge from table
271 bool KlassInfoTable::merge(KlassInfoTable* table) {
272 KlassInfoTableMergeClosure closure(this);
273 table->iterate(&closure);
274 return closure.is_success();
275 }
276
277 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
278 return (*e1)->compare(*e1,*e2);
279 }
280
281 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
282 _cit(cit) {
283 _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
284 }
285
286 KlassInfoHisto::~KlassInfoHisto() {
287 delete _elements;
288 }
289
290 void KlassInfoHisto::add(KlassInfoEntry* cie) {
291 elements()->append(cie);
292 }
293
294 void KlassInfoHisto::sort() {
295 elements()->sort(KlassInfoHisto::sort_helper);
296 }
524 public:
525 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
526 _cit(cit), _missed_count(0), _filter(filter) {}
527
528 void do_object(oop obj) {
529 if (should_visit(obj)) {
530 if (!_cit->record_instance(obj)) {
531 _missed_count++;
532 }
533 }
534 }
535
536 size_t missed_count() { return _missed_count; }
537
538 private:
539 bool should_visit(oop obj) {
540 return _filter == NULL || _filter->do_object_b(obj);
541 }
542 };
543
544 // Heap inspection for every worker.
545 // When native OOM hanppens for KlassInfoTable, set _success to false.
546 // TODO(?) it seems atomically set/get _success is unnecessary becasue it
547 // is set to true at constructor and can only be set to false here.
548 // the only risk seems a worker may continue inspect heap when another
549 // worker set _success to false, but this is OK because the current worker
550 // doesn't change _success if everything is OK for it's inpection work, and
551 // the _success will be false finally and serial heap inpection can be tried.
552 void ParHeapInspectTask::work(uint worker_id) {
553 size_t missed_count = 0;
554 if (!Atomic::load(&_success)) {
555 // other worker has failed on parallel iteration.
556 return;
557 }
558
559 KlassInfoTable cit(false);
560 if (!cit.allocation_failed()) {
561 RecordInstanceClosure ric(&cit, _filter);
562 _poi->object_iterate(&ric, worker_id);
563 // _heap->object_iterate_parallel(&ric, worker_id, _par_thread_num);
564 missed_count = ric.missed_count();
565 } else {
566 // fail to allocate memory, stop parallel mode
567 Atomic::store(&_success, false);
568 return;
569 }
570 {
571 MutexLocker x(&_mutex);
572
573 if (!_shared_cit->merge(&cit)) {
574 Atomic::store(&_success, false);
575 return;
576 }
577 _shared_missed_count += missed_count;
578 }
579 }
580
581 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
582 ResourceMark rm;
583
584 // Try parallel first.
585 if (parallel_thread_num > 1) {
586 ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
587 if (poi != NULL) {
588 ParHeapInspectTask task(poi, cit, filter);
589 Universe::heap()->run_task(&task);
590 delete poi;
591 if (task.success()) {
592 return task.missed_count();
593 }
594 }
595 }
596
597 // If no parallel iteration available, run serially.
598 RecordInstanceClosure ric(cit, filter);
599 Universe::heap()->object_iterate(&ric);
600 return ric.missed_count();
601 }
602
603 void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) {
604 ResourceMark rm;
605
606 KlassInfoTable cit(false);
607 if (!cit.allocation_failed()) {
608 size_t missed_count = 0;;
609 // populate table with object allocation info
610 missed_count = populate_table(&cit, NULL, parallel_thread_num);
611 if (missed_count != 0) {
612 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
613 " total instances in data below",
614 missed_count);
615 }
616
617 // Sort and print klass instance info
618 KlassInfoHisto histo(&cit);
619 HistoClosure hc(&histo);
620
621 cit.iterate(&hc);
622
623 histo.sort();
624 histo.print_histo_on(st);
625 } else {
626 st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
627 }
628 st->flush();
629 }
630
|