--- old/src/hotspot/share/memory/heapInspection.cpp 2020-04-28 11:56:01.319664282 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-04-28 11:56:01.159665102 +0800 @@ -35,6 +35,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/reflectionAccessorImplKlassHelper.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -237,6 +238,42 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } else { + return false; + } +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool is_success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.is_success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -504,21 +541,67 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { +// Heap inspection for every worker. +// When native OOM hanppens for KlassInfoTable, set _success to false. +void ParHeapInspectTask::work(uint worker_id) { + size_t missed_count = 0; + bool merge_success = true; + if (!Atomic::load(&_success)) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); + } else { + // fail to allocate memory, stop parallel mode + Atomic::store(&_success, false); + return; + } + { + MutexLocker x(&_mutex); + merge_success = _shared_cit->merge(&cit); + } + if (!merge_success) { + Atomic::store(&_success, false); + return; + } + Atomic::add(&_shared_missed_count, missed_count); +} + +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { ResourceMark rm; + // Try parallel first. + if (parallel_thread_num > 1) { + ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); + if (poi != NULL) { + ParHeapInspectTask task(poi, cit, filter); + Universe::heap()->run_task(&task); + delete poi; + if (task.success()) { + return task.missed_count(); + } + } + } + + // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { + size_t missed_count = 0;; // populate table with object allocation info - size_t missed_count = populate_table(&cit); + missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below",