--- old/src/hotspot/share/memory/heapInspection.cpp 2020-02-18 14:16:45.059474888 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-02-18 14:16:44.909477075 +0800 @@ -236,6 +236,42 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } else { + return false; + } +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool is_success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.is_success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -503,21 +539,68 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { +void ParHeapInspectTask::work(uint worker_id) { + size_t missed_count = 0; + if (!_success) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + RecordInstanceClosure ric(&cit, _filter); + do_object_iterate_parallel(&ric, worker_id); + // _heap->object_iterate_parallel(&ric, worker_id, _par_thread_num); + missed_count = ric.missed_count(); + } else { + // fail to allocate memory, stop parallel mode + _success = false; + return; + } + { + MutexLocker x(&_mutex); + + if (!_shared_cit->merge(&cit)) { + _success = false; + return; + } + *_shared_missed_count += missed_count; + } +} + +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { ResourceMark rm; + size_t missed_count = 0; + bool do_serial = (parallel_thread_num == 1); + // try parallel first. + if (parallel_thread_num > 1) { + bool succ = Universe::heap()->run_par_heap_inspect_task(cit, filter, &missed_count, parallel_thread_num); + if (succ) { + do_serial = false; + } else { + // heap does not support parallel iteration, or parallel task fail because of native memory oom. + // use object_iterate. + do_serial = true; + missed_count = 0; + } + } - RecordInstanceClosure ric(cit, filter); - Universe::heap()->object_iterate(&ric); - return ric.missed_count(); + if (do_serial) { + RecordInstanceClosure ric(cit, filter); + Universe::heap()->object_iterate(&ric); + missed_count = ric.missed_count(); + } + return missed_count; } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { + size_t missed_count = 0;; // populate table with object allocation info - size_t missed_count = populate_table(&cit); + missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below",