< prev index next >
src/hotspot/share/memory/heapInspection.cpp
Print this page
@@ -33,10 +33,11 @@
#include "memory/heapInspection.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "oops/reflectionAccessorImplKlassHelper.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/stack.inline.hpp"
@@ -235,10 +236,46 @@
size_t KlassInfoTable::size_of_instances_in_words() const {
return _size_of_instances_in_words;
}
+// Return false if the entry could not be recorded on account
+// of running out of space required to create a new entry.
+bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
+ Klass* k = cie->klass();
+ KlassInfoEntry* elt = lookup(k);
+ // elt may be NULL if it's a new klass for which we
+ // could not allocate space for a new entry in the hashtable.
+ if (elt != NULL) {
+ elt->set_count(elt->count() + cie->count());
+ elt->set_words(elt->words() + cie->words());
+ _size_of_instances_in_words += cie->words();
+ return true;
+ }
+
+ return false;
+}
+
+class KlassInfoTableMergeClosure : public KlassInfoClosure {
+private:
+ KlassInfoTable* _dest;
+ bool _success;
+public:
+ KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
+ void do_cinfo(KlassInfoEntry* cie) {
+ _success &= _dest->merge_entry(cie);
+ }
+ bool success() { return _success; }
+};
+
+// merge from table
+bool KlassInfoTable::merge(KlassInfoTable* table) {
+ KlassInfoTableMergeClosure closure(this);
+ table->iterate(&closure);
+ return closure.success();
+}
+
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
return (*e1)->compare(*e1,*e2);
}
KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
@@ -480,11 +517,11 @@
};
class RecordInstanceClosure : public ObjectClosure {
private:
KlassInfoTable* _cit;
- size_t _missed_count;
+ uint _missed_count;
BoolObjectClosure* _filter;
public:
RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
_cit(cit), _missed_count(0), _filter(filter) {}
@@ -494,37 +531,83 @@
_missed_count++;
}
}
}
- size_t missed_count() { return _missed_count; }
+ uint missed_count() { return _missed_count; }
private:
bool should_visit(oop obj) {
return _filter == NULL || _filter->do_object_b(obj);
}
};
-size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
+// Heap inspection for every worker.
+// When native OOM hanppens for KlassInfoTable, set _success to false.
+void ParHeapInspectTask::work(uint worker_id) {
+ uint missed_count = 0;
+ bool merge_success = true;
+ if (!Atomic::load(&_success)) {
+ // other worker has failed on parallel iteration.
+ return;
+ }
+
+ KlassInfoTable cit(false);
+ if (cit.allocation_failed()) {
+ // fail to allocate memory, stop parallel mode
+ Atomic::store(&_success, false);
+ return;
+ }
+ RecordInstanceClosure ric(&cit, _filter);
+ _poi->object_iterate(&ric, worker_id);
+ missed_count = ric.missed_count();
+ {
+ MutexLocker x(&_mutex);
+ merge_success = _shared_cit->merge(&cit);
+ }
+ if (merge_success) {
+ Atomic::add(&_missed_count, missed_count);
+ } else {
+ Atomic::store(&_success, false);
+ return;
+ }
+}
+
+size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
+
+ // Try parallel first.
+ if (parallel_thread_num > 1) {
ResourceMark rm;
+ ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
+ if (poi != NULL) {
+ ParHeapInspectTask task(poi, cit, filter);
+ Universe::heap()->run_task(&task);
+ delete poi;
+ if (task.success()) {
+ return task.missed_count();
+ }
+ }
+ }
+ ResourceMark rm;
+ // If no parallel iteration available, run serially.
RecordInstanceClosure ric(cit, filter);
Universe::heap()->object_iterate(&ric);
return ric.missed_count();
}
-void HeapInspection::heap_inspection(outputStream* st) {
+void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
ResourceMark rm;
KlassInfoTable cit(false);
if (!cit.allocation_failed()) {
// populate table with object allocation info
- size_t missed_count = populate_table(&cit);
+ uint missed_count = populate_table(&cit, NULL, parallel_thread_num);
if (missed_count != 0) {
- log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
+ log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
" total instances in data below",
- missed_count);
+ (uintx)missed_count);
}
// Sort and print klass instance info
KlassInfoHisto histo(&cit);
HistoClosure hc(&histo);
< prev index next >