src/share/vm/memory/heapInspection.cpp

Print this page
rev 4462 : 8012086: The object count event should only send events for instances occupying more than 0.5% of the heap


  96 }
  97 
  98 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
  99   KlassInfoEntry* elt = _list;
 100   while (elt != NULL) {
 101     cic->do_cinfo(elt);
 102     elt = elt->next();
 103   }
 104 }
 105 
 106 void KlassInfoBucket::empty() {
 107   KlassInfoEntry* elt = _list;
 108   _list = NULL;
 109   while (elt != NULL) {
 110     KlassInfoEntry* next = elt->next();
 111     delete elt;
 112     elt = next;
 113   }
 114 }
 115 
 116 KlassInfoTable::KlassInfoTable(HeapWord* ref) {
 117   _size = 0;
 118   _ref = ref;
 119   _buckets = (KlassInfoBucket *) os::malloc(sizeof(KlassInfoBucket) * _num_buckets, mtInternal);
 120   if (_buckets != NULL) {
 121     _size = _num_buckets;
 122     for (int index = 0; index < _size; index++) {
 123       _buckets[index].initialize();
 124     }
 125   }
 126 }
 127 
 128 KlassInfoTable::~KlassInfoTable() {
 129   if (_buckets != NULL) {
 130     for (int index = 0; index < _size; index++) {
 131       _buckets[index].empty();
 132     }
 133     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
 134     _size = 0;
 135   }
 136 }
 137 
 138 uint KlassInfoTable::hash(klassOop p) {


 143 KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) {
 144   uint         idx = hash(k) % _size;
 145   assert(_buckets != NULL, "Allocation failure should have been caught");
 146   KlassInfoEntry*  e   = _buckets[idx].lookup(k);
 147   // Lookup may fail if this is a new klass for which we
 148   // could not allocate space for an new entry.
 149   assert(e == NULL || k == e->klass(), "must be equal");
 150   return e;
 151 }
 152 
 153 // Return false if the entry could not be recorded on account
 154 // of running out of space required to create a new entry.
 155 bool KlassInfoTable::record_instance(const oop obj) {
 156   klassOop      k = obj->klass();
 157   KlassInfoEntry* elt = lookup(k);
 158   // elt may be NULL if it's a new klass for which we
 159   // could not allocate space for a new entry in the hashtable.
 160   if (elt != NULL) {
 161     elt->set_count(elt->count() + 1);
 162     elt->set_words(elt->words() + obj->size());

 163     return true;
 164   } else {
 165     return false;
 166   }
 167 }
 168 
 169 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 170   assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
 171   for (int index = 0; index < _size; index++) {
 172     _buckets[index].iterate(cic);
 173   }
 174 }
 175 




 176 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 177   return (*e1)->compare(*e1,*e2);
 178 }
 179 
 180 KlassInfoHisto::KlassInfoHisto(const char* title) :
 181   _title(title) {
 182   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
 183 }
 184 
 185 KlassInfoHisto::~KlassInfoHisto() {
 186   delete _elements;
 187 }
 188 
 189 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 190   elements()->append(cie);
 191 }
 192 
 193 void KlassInfoHisto::sort() {
 194   elements()->sort(KlassInfoHisto::sort_helper);
 195 }


 265 bool HeapInspection::is_shared_heap() {
 266   CollectedHeap* heap = Universe::heap();
 267   return heap->kind() == CollectedHeap::G1CollectedHeap ||
 268          heap->kind() == CollectedHeap::GenCollectedHeap;
 269 }
 270 
 271 void HeapInspection::prologue() {
 272   if (is_shared_heap()) {
 273     SharedHeap* sh = SharedHeap::heap();
 274     sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
 275   }
 276 }
 277 
 278 void HeapInspection::epilogue() {
 279   if (is_shared_heap()) {
 280     SharedHeap* sh = SharedHeap::heap();
 281     sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
 282   }
 283 }
 284 
 285 size_t HeapInspection::instance_inspection(KlassInfoTable* cit,
 286                                            KlassInfoClosure* cl,
 287                                            bool need_prologue,
 288                                            BoolObjectClosure* filter) {
 289   ResourceMark rm;
 290 
 291   if (need_prologue) {
 292     prologue();
 293   }
 294 
 295   RecordInstanceClosure ric(cit, filter);
 296   Universe::heap()->object_iterate(&ric);
 297   cit->iterate(cl);
 298 
 299   // need to run epilogue if we run prologue
 300   if (need_prologue) {
 301     epilogue();
 302   }
 303 
 304   return ric.missed_count();
 305 }
 306 
 307 void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
 308   ResourceMark rm;
 309 
 310   KlassInfoTable cit(start_of_perm_gen());
 311   if (!cit.allocation_failed()) {
 312     KlassInfoHisto histo("\n"
 313                      " num     #instances         #bytes  class name\n"
 314                      "----------------------------------------------");
 315     HistoClosure hc(&histo);
 316 
 317     size_t missed_count = instance_inspection(&cit, &hc, need_prologue);
 318     if (missed_count != 0) {
 319       st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 320                    " total instances in data below",
 321                    missed_count);
 322     }








 323     histo.sort();
 324     histo.print_on(st);
 325   } else {
 326     st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
 327   }
 328   st->flush();
 329 }
 330 
 331 class FindInstanceClosure : public ObjectClosure {
 332  private:
 333   klassOop _klass;
 334   GrowableArray<oop>* _result;
 335 
 336  public:
 337   FindInstanceClosure(klassOop k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
 338 
 339   void do_object(oop obj) {
 340     if (obj->is_a(_klass)) {
 341       _result->append(obj);
 342     }


  96 }
  97 
  98 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
  99   KlassInfoEntry* elt = _list;
 100   while (elt != NULL) {
 101     cic->do_cinfo(elt);
 102     elt = elt->next();
 103   }
 104 }
 105 
 106 void KlassInfoBucket::empty() {
 107   KlassInfoEntry* elt = _list;
 108   _list = NULL;
 109   while (elt != NULL) {
 110     KlassInfoEntry* next = elt->next();
 111     delete elt;
 112     elt = next;
 113   }
 114 }
 115 
 116 KlassInfoTable::KlassInfoTable(HeapWord* ref) :
 117   _size(0), _ref(ref), _size_of_instances_in_words(0) {

 118   _buckets = (KlassInfoBucket *) os::malloc(sizeof(KlassInfoBucket) * _num_buckets, mtInternal);
 119   if (_buckets != NULL) {
 120     _size = _num_buckets;
 121     for (int index = 0; index < _size; index++) {
 122       _buckets[index].initialize();
 123     }
 124   }
 125 }
 126 
 127 KlassInfoTable::~KlassInfoTable() {
 128   if (_buckets != NULL) {
 129     for (int index = 0; index < _size; index++) {
 130       _buckets[index].empty();
 131     }
 132     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
 133     _size = 0;
 134   }
 135 }
 136 
 137 uint KlassInfoTable::hash(klassOop p) {


 142 KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) {
 143   uint         idx = hash(k) % _size;
 144   assert(_buckets != NULL, "Allocation failure should have been caught");
 145   KlassInfoEntry*  e   = _buckets[idx].lookup(k);
 146   // Lookup may fail if this is a new klass for which we
 147   // could not allocate space for an new entry.
 148   assert(e == NULL || k == e->klass(), "must be equal");
 149   return e;
 150 }
 151 
 152 // Return false if the entry could not be recorded on account
 153 // of running out of space required to create a new entry.
 154 bool KlassInfoTable::record_instance(const oop obj) {
 155   klassOop      k = obj->klass();
 156   KlassInfoEntry* elt = lookup(k);
 157   // elt may be NULL if it's a new klass for which we
 158   // could not allocate space for a new entry in the hashtable.
 159   if (elt != NULL) {
 160     elt->set_count(elt->count() + 1);
 161     elt->set_words(elt->words() + obj->size());
 162     _size_of_instances_in_words += obj->size();
 163     return true;
 164   } else {
 165     return false;
 166   }
 167 }
 168 
 169 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 170   assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
 171   for (int index = 0; index < _size; index++) {
 172     _buckets[index].iterate(cic);
 173   }
 174 }
 175 
 176 size_t KlassInfoTable::size_of_instances_in_words() const {
 177   return _size_of_instances_in_words;
 178 }
 179 
 180 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 181   return (*e1)->compare(*e1,*e2);
 182 }
 183 
 184 KlassInfoHisto::KlassInfoHisto(const char* title) :
 185   _title(title) {
 186   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
 187 }
 188 
 189 KlassInfoHisto::~KlassInfoHisto() {
 190   delete _elements;
 191 }
 192 
 193 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 194   elements()->append(cie);
 195 }
 196 
 197 void KlassInfoHisto::sort() {
 198   elements()->sort(KlassInfoHisto::sort_helper);
 199 }


 269 bool HeapInspection::is_shared_heap() {
 270   CollectedHeap* heap = Universe::heap();
 271   return heap->kind() == CollectedHeap::G1CollectedHeap ||
 272          heap->kind() == CollectedHeap::GenCollectedHeap;
 273 }
 274 
 275 void HeapInspection::prologue() {
 276   if (is_shared_heap()) {
 277     SharedHeap* sh = SharedHeap::heap();
 278     sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
 279   }
 280 }
 281 
 282 void HeapInspection::epilogue() {
 283   if (is_shared_heap()) {
 284     SharedHeap* sh = SharedHeap::heap();
 285     sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
 286   }
 287 }
 288 
 289 size_t HeapInspection::populate_table(KlassInfoTable* cit,

 290                                       bool need_prologue,
 291                                       BoolObjectClosure *filter) {
 292   ResourceMark rm;
 293 
 294   if (need_prologue) {
 295     prologue();
 296   }
 297 
 298   RecordInstanceClosure ric(cit, filter);
 299   Universe::heap()->object_iterate(&ric);

 300 
 301   // need to run epilogue if we run prologue
 302   if (need_prologue) {
 303     epilogue();
 304   }
 305 
 306   return ric.missed_count();
 307 }
 308 
 309 void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
 310   ResourceMark rm;
 311 
 312   KlassInfoTable cit(start_of_perm_gen());
 313   if (!cit.allocation_failed()) {
 314     size_t missed_count = populate_table(&cit, need_prologue);





 315     if (missed_count != 0) {
 316       st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 317                    " total instances in data below",
 318                    missed_count);
 319     }
 320 
 321     KlassInfoHisto histo("\n"
 322                      " num     #instances         #bytes  class name\n"
 323                      "----------------------------------------------");
 324     HistoClosure hc(&histo);
 325 
 326     cit.iterate(&hc);
 327 
 328     histo.sort();
 329     histo.print_on(st);
 330   } else {
 331     st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
 332   }
 333   st->flush();
 334 }
 335 
 336 class FindInstanceClosure : public ObjectClosure {
 337  private:
 338   klassOop _klass;
 339   GrowableArray<oop>* _result;
 340 
 341  public:
 342   FindInstanceClosure(klassOop k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
 343 
 344   void do_object(oop obj) {
 345     if (obj->is_a(_klass)) {
 346       _result->append(obj);
 347     }