--- old/src/hotspot/share/memory/heapInspection.cpp 2020-04-25 20:53:37.571321886 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-04-25 20:53:37.391322436 +0800 @@ -237,6 +237,42 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } else { + return false; + } +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool is_success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.is_success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -504,21 +540,72 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { +// Heap inspection for every worker. +// When native OOM hanppens for KlassInfoTable, set _success to false. +// TODO(?) it seems atomically set/get _success is unnecessary becasue it +// is set to true at constructor and can only be set to false here. +// the only risk seems a worker may continue inspect heap when another +// worker set _success to false, but this is OK because the current worker +// doesn't change _success if everything is OK for it's inpection work, and +// the _success will be false finally and serial heap inpection can be tried. +void ParHeapInspectTask::work(uint worker_id) { + size_t missed_count = 0; + if (!_success) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); + } else { + // fail to allocate memory, stop parallel mode. + _success = false; + return; + } + { + MutexLocker x(&_mutex); + + if (!_shared_cit->merge(&cit)) { + _success = false; + return; + } + _shared_missed_count += missed_count; + } +} + +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { ResourceMark rm; + // Try parallel first. + if (parallel_thread_num > 1) { + ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); + if (poi != NULL) { + ParHeapInspectTask task(poi, cit, filter); + Universe::heap()->run_task(&task); + delete poi; + if (task.success()) { + return task.missed_count(); + } + } + } + + // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { + size_t missed_count = 0;; // populate table with object allocation info - size_t missed_count = populate_table(&cit); + missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below",