--- old/src/hotspot/share/memory/heapInspection.cpp 2020-07-09 14:50:44.680350223 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-07-09 14:50:44.524350718 +0800 @@ -241,7 +241,7 @@ // Return false if the entry could not be recorded on account // of running out of space required to create a new entry. bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { - Klass* k = cie->klass(); + Klass* k = cie->klass(); KlassInfoEntry* elt = lookup(k); // elt may be NULL if it's a new klass for which we // could not allocate space for a new entry in the hashtable. @@ -250,9 +250,9 @@ elt->set_words(elt->words() + cie->words()); _size_of_instances_in_words += cie->words(); return true; - } else { - return false; } + + return false; } class KlassInfoTableMergeClosure : public KlassInfoClosure { @@ -264,14 +264,14 @@ void do_cinfo(KlassInfoEntry* cie) { _success &= _dest->merge_entry(cie); } - bool is_success() { return _success; } + bool success() { return _success; } }; // merge from table bool KlassInfoTable::merge(KlassInfoTable* table) { KlassInfoTableMergeClosure closure(this); table->iterate(&closure); - return closure.is_success(); + return closure.success(); } int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { @@ -519,7 +519,7 @@ class RecordInstanceClosure : public ObjectClosure { private: KlassInfoTable* _cit; - size_t _missed_count; + uint _missed_count; BoolObjectClosure* _filter; public: RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : @@ -533,7 +533,7 @@ } } - size_t missed_count() { return _missed_count; } + uint missed_count() { return _missed_count; } private: bool should_visit(oop obj) { @@ -544,7 +544,7 @@ // Heap inspection for every worker. // When native OOM hanppens for KlassInfoTable, set _success to false. void ParHeapInspectTask::work(uint worker_id) { - size_t missed_count = 0; + uint missed_count = 0; bool merge_success = true; if (!Atomic::load(&_success)) { // other worker has failed on parallel iteration. @@ -552,31 +552,31 @@ } KlassInfoTable cit(false); - if (!cit.allocation_failed()) { - RecordInstanceClosure ric(&cit, _filter); - _poi->object_iterate(&ric, worker_id); - missed_count = ric.missed_count(); - } else { + if (cit.allocation_failed()) { // fail to allocate memory, stop parallel mode Atomic::store(&_success, false); return; } + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); { MutexLocker x(&_mutex); merge_success = _shared_cit->merge(&cit); } - if (!merge_success) { + if (merge_success) { + Atomic::add(&_missed_count, missed_count); + } else { Atomic::store(&_success, false); return; } - Atomic::add(&_shared_missed_count, missed_count); } -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { - ResourceMark rm; +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) { // Try parallel first. if (parallel_thread_num > 1) { + ResourceMark rm; ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); if (poi != NULL) { ParHeapInspectTask task(poi, cit, filter); @@ -588,13 +588,14 @@ } } + ResourceMark rm; // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { +void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false);