src/share/vm/memory/heapInspection.cpp

Print this page
rev 4303 : 8010294: Refactor HeapInspection to make it more reusable


  70     if (_klass == Universe::constantPoolKlassObj())      name = "<constantPoolKlass>";      else
  71     if (_klass == Universe::constantPoolCacheKlassObj()) name = "<constantPoolCacheKlass>"; else
  72     if (_klass == Universe::compiledICHolderKlassObj())  name = "<compiledICHolderKlass>";  else
  73       name = "<no name>";
  74   }
  75   // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
  76   st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
  77                (jlong)  _instance_count,
  78                (julong) _instance_words * HeapWordSize,
  79                name);
  80 }
  81 
  82 KlassInfoEntry* KlassInfoBucket::lookup(const klassOop k) {
  83   KlassInfoEntry* elt = _list;
  84   while (elt != NULL) {
  85     if (elt->is_equal(k)) {
  86       return elt;
  87     }
  88     elt = elt->next();
  89   }
  90   elt = new KlassInfoEntry(k, list());
  91   // We may be out of space to allocate the new entry.
  92   if (elt != NULL) {
  93     set_list(elt);
  94   }
  95   return elt;
  96 }
  97 
  98 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
  99   KlassInfoEntry* elt = _list;
 100   while (elt != NULL) {
 101     cic->do_cinfo(elt);
 102     elt = elt->next();
 103   }
 104 }
 105 
 106 void KlassInfoBucket::empty() {
 107   KlassInfoEntry* elt = _list;
 108   _list = NULL;
 109   while (elt != NULL) {
 110     KlassInfoEntry* next = elt->next();
 111     delete elt;
 112     elt = next;
 113   }
 114 }
 115 
 116 KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
 117   _size = 0;
 118   _ref = ref;
 119   _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal);
 120   if (_buckets != NULL) {
 121     _size = size;
 122     for (int index = 0; index < _size; index++) {
 123       _buckets[index].initialize();
 124     }
 125   }
 126 }
 127 
 128 KlassInfoTable::~KlassInfoTable() {
 129   if (_buckets != NULL) {
 130     for (int index = 0; index < _size; index++) {
 131       _buckets[index].empty();
 132     }
 133     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
 134     _size = 0;
 135   }
 136 }
 137 
 138 uint KlassInfoTable::hash(klassOop p) {
 139   assert(Universe::heap()->is_in_permanent((HeapWord*)p), "all klasses in permgen");
 140   return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
 141 }


 160   if (elt != NULL) {
 161     elt->set_count(elt->count() + 1);
 162     elt->set_words(elt->words() + obj->size());
 163     return true;
 164   } else {
 165     return false;
 166   }
 167 }
 168 
 169 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 170   assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
 171   for (int index = 0; index < _size; index++) {
 172     _buckets[index].iterate(cic);
 173   }
 174 }
 175 
 176 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 177   return (*e1)->compare(*e1,*e2);
 178 }
 179 
 180 KlassInfoHisto::KlassInfoHisto(const char* title, int estimatedCount) :
 181   _title(title) {
 182   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
 183 }
 184 
 185 KlassInfoHisto::~KlassInfoHisto() {
 186   delete _elements;
 187 }
 188 
 189 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 190   elements()->append(cie);
 191 }
 192 
 193 void KlassInfoHisto::sort() {
 194   elements()->sort(KlassInfoHisto::sort_helper);
 195 }
 196 
 197 void KlassInfoHisto::print_elements(outputStream* st) const {
 198   // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit
 199   jlong total = 0;
 200   julong totalw = 0;
 201   for(int i=0; i < elements()->length(); i++) {
 202     st->print("%4d: ", i+1);


 211 void KlassInfoHisto::print_on(outputStream* st) const {
 212   st->print_cr("%s",title());
 213   print_elements(st);
 214 }
 215 
 216 class HistoClosure : public KlassInfoClosure {
 217  private:
 218   KlassInfoHisto* _cih;
 219  public:
 220   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 221 
 222   void do_cinfo(KlassInfoEntry* cie) {
 223     _cih->add(cie);
 224   }
 225 };
 226 
 227 class RecordInstanceClosure : public ObjectClosure {
 228  private:
 229   KlassInfoTable* _cit;
 230   size_t _missed_count;

 231  public:
 232   RecordInstanceClosure(KlassInfoTable* cit) :
 233     _cit(cit), _missed_count(0) {}
 234 
 235   void do_object(oop obj) {

 236     if (!_cit->record_instance(obj)) {
 237       _missed_count++;
 238     }
 239   }

 240 
 241   size_t missed_count() { return _missed_count; }




 242 };
 243 
 244 void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
 245   ResourceMark rm;
 246   HeapWord* ref;









 247 

 248   CollectedHeap* heap = Universe::heap();
 249   bool is_shared_heap = false;
 250   switch (heap->kind()) {
 251     case CollectedHeap::G1CollectedHeap:
 252     case CollectedHeap::GenCollectedHeap: {
 253       is_shared_heap = true;
 254       SharedHeap* sh = (SharedHeap*)heap;
 255       if (need_prologue) {
 256         sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
 257       }
 258       ref = sh->perm_gen()->used_region().start();
 259       break;




 260     }
 261 #ifndef SERIALGC
 262     case CollectedHeap::ParallelScavengeHeap: {
 263       ParallelScavengeHeap* psh = (ParallelScavengeHeap*)heap;
 264       ref = psh->perm_gen()->object_space()->used_region().start();
 265       break;





 266     }
 267 #endif // SERIALGC
 268     default:
 269       ShouldNotReachHere(); // Unexpected heap kind for this op





 270   }
 271   // Collect klass instance info
 272   KlassInfoTable cit(KlassInfoTable::cit_size, ref);






 273   if (!cit.allocation_failed()) {
 274     // Iterate over objects in the heap
 275     RecordInstanceClosure ric(&cit);
 276     // If this operation encounters a bad object when using CMS,
 277     // consider using safe_object_iterate() which avoids perm gen
 278     // objects that may contain bad references.
 279     Universe::heap()->object_iterate(&ric);
 280 
 281     // Report if certain classes are not counted because of
 282     // running out of C-heap for the histogram.
 283     size_t missed_count = ric.missed_count();
 284     if (missed_count != 0) {
 285       st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 286                    " total instances in data below",
 287                    missed_count);
 288     }
 289     // Sort and print klass instance info
 290     KlassInfoHisto histo("\n"
 291                      " num     #instances         #bytes  class name\n"
 292                      "----------------------------------------------",
 293                      KlassInfoHisto::histo_initial_size);
 294     HistoClosure hc(&histo);
 295     cit.iterate(&hc);
 296     histo.sort();
 297     histo.print_on(st);
 298   } else {
 299     st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
 300   }
 301   st->flush();
 302 
 303   if (need_prologue && is_shared_heap) {
 304     SharedHeap* sh = (SharedHeap*)heap;
 305     sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
 306   }
 307 }
 308 
 309 class FindInstanceClosure : public ObjectClosure {
 310  private:
 311   klassOop _klass;
 312   GrowableArray<oop>* _result;
 313 
 314  public:
 315   FindInstanceClosure(klassOop k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
 316 
 317   void do_object(oop obj) {
 318     if (obj->is_a(_klass)) {
 319       _result->append(obj);
 320     }
 321   }
 322 };
 323 
 324 void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result) {
 325   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 326   assert(Heap_lock->is_locked(), "should have the Heap_lock");


  70     if (_klass == Universe::constantPoolKlassObj())      name = "<constantPoolKlass>";      else
  71     if (_klass == Universe::constantPoolCacheKlassObj()) name = "<constantPoolCacheKlass>"; else
  72     if (_klass == Universe::compiledICHolderKlassObj())  name = "<compiledICHolderKlass>";  else
  73       name = "<no name>";
  74   }
  75   // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
  76   st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
  77                (jlong)  _instance_count,
  78                (julong) _instance_words * HeapWordSize,
  79                name);
  80 }
  81 
  82 KlassInfoEntry* KlassInfoBucket::lookup(const klassOop k) {
  83   KlassInfoEntry* elt = _list;
  84   while (elt != NULL) {
  85     if (elt->is_equal(k)) {
  86       return elt;
  87     }
  88     elt = elt->next();
  89   }
  90   elt = new (std::nothrow) KlassInfoEntry(k, list());
  91   // We may be out of space to allocate the new entry.
  92   if (elt != NULL) {
  93     set_list(elt);
  94   }
  95   return elt;
  96 }
  97 
  98 void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
  99   KlassInfoEntry* elt = _list;
 100   while (elt != NULL) {
 101     cic->do_cinfo(elt);
 102     elt = elt->next();
 103   }
 104 }
 105 
 106 void KlassInfoBucket::empty() {
 107   KlassInfoEntry* elt = _list;
 108   _list = NULL;
 109   while (elt != NULL) {
 110     KlassInfoEntry* next = elt->next();
 111     delete elt;
 112     elt = next;
 113   }
 114 }
 115 
 116 KlassInfoTable::KlassInfoTable(HeapWord* ref) {
 117   _size = 0;
 118   _ref = ref;
 119   _buckets = (KlassInfoBucket *) os::malloc(sizeof(KlassInfoBucket) * _num_buckets, mtInternal);
 120   if (_buckets != NULL) {
 121     _size = _num_buckets;
 122     for (int index = 0; index < _size; index++) {
 123       _buckets[index].initialize();
 124     }
 125   }
 126 }
 127 
 128 KlassInfoTable::~KlassInfoTable() {
 129   if (_buckets != NULL) {
 130     for (int index = 0; index < _size; index++) {
 131       _buckets[index].empty();
 132     }
 133     FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
 134     _size = 0;
 135   }
 136 }
 137 
 138 uint KlassInfoTable::hash(klassOop p) {
 139   assert(Universe::heap()->is_in_permanent((HeapWord*)p), "all klasses in permgen");
 140   return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
 141 }


 160   if (elt != NULL) {
 161     elt->set_count(elt->count() + 1);
 162     elt->set_words(elt->words() + obj->size());
 163     return true;
 164   } else {
 165     return false;
 166   }
 167 }
 168 
 169 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 170   assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
 171   for (int index = 0; index < _size; index++) {
 172     _buckets[index].iterate(cic);
 173   }
 174 }
 175 
 176 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 177   return (*e1)->compare(*e1,*e2);
 178 }
 179 
 180 KlassInfoHisto::KlassInfoHisto(const char* title) :
 181   _title(title) {
 182   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size,true);
 183 }
 184 
 185 KlassInfoHisto::~KlassInfoHisto() {
 186   delete _elements;
 187 }
 188 
 189 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 190   elements()->append(cie);
 191 }
 192 
 193 void KlassInfoHisto::sort() {
 194   elements()->sort(KlassInfoHisto::sort_helper);
 195 }
 196 
 197 void KlassInfoHisto::print_elements(outputStream* st) const {
 198   // simplify the formatting (ILP32 vs LP64) - store the sum in 64-bit
 199   jlong total = 0;
 200   julong totalw = 0;
 201   for(int i=0; i < elements()->length(); i++) {
 202     st->print("%4d: ", i+1);


 211 void KlassInfoHisto::print_on(outputStream* st) const {
 212   st->print_cr("%s",title());
 213   print_elements(st);
 214 }
 215 
 216 class HistoClosure : public KlassInfoClosure {
 217  private:
 218   KlassInfoHisto* _cih;
 219  public:
 220   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 221 
 222   void do_cinfo(KlassInfoEntry* cie) {
 223     _cih->add(cie);
 224   }
 225 };
 226 
 227 class RecordInstanceClosure : public ObjectClosure {
 228  private:
 229   KlassInfoTable* _cit;
 230   size_t _missed_count;
 231   BoolObjectClosure* _filter;
 232  public:
 233   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 234     _cit(cit), _missed_count(0), _filter(filter) {}
 235 
 236   void do_object(oop obj) {
 237     if (should_visit(obj)) {
 238       if (!_cit->record_instance(obj)) {
 239         _missed_count++;
 240       }
 241     }
 242   }
 243 
 244   size_t missed_count() { return _missed_count; }
 245  private:
 246   bool should_visit(oop obj) {
 247     return _filter == NULL || _filter->do_object_b(obj);
 248   }
 249 };
 250 
 251 HeapWord* HeapInspection::start_of_perm_gen() {
 252   if (is_shared_heap()) {
 253     SharedHeap* sh = (SharedHeap*)Universe::heap();
 254     return sh->perm_gen()->used_region().start();
 255   }
 256 #ifndef SERIALGC
 257   ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap();
 258   return psh->perm_gen()->object_space()->used_region().start();
 259 #endif // SERIALGC
 260   ShouldNotReachHere();
 261   return NULL;
 262 }
 263 
 264 bool HeapInspection::is_shared_heap() {
 265   CollectedHeap* heap = Universe::heap();
 266   return heap->kind() == CollectedHeap::G1CollectedHeap ||
 267          heap->kind() == CollectedHeap::GenCollectedHeap;
 268 }
 269 
 270 void HeapInspection::prologue() {
 271   if (is_shared_heap()) {
 272     SharedHeap* sh = (SharedHeap*)Universe::heap();
 273     sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
 274   }
 275 }
 276 
 277 void HeapInspection::epilogue() {
 278   if (is_shared_heap()) {
 279     SharedHeap* sh = (SharedHeap*)Universe::heap();
 280     sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
 281   }
 282 }
 283 
 284 size_t HeapInspection::instance_inspection(KlassInfoTable* cit,
 285                                            KlassInfoClosure* cl,
 286                                            bool need_prologue,
 287                                            BoolObjectClosure* filter) {
 288   ResourceMark rm;
 289 
 290   if (need_prologue) {
 291     prologue();
 292   }
 293 
 294   RecordInstanceClosure ric(cit, filter);
 295   Universe::heap()->object_iterate(&ric);
 296   cit->iterate(cl);
 297 
 298   bool need_epilogue = need_prologue; // need to run epilogue if we run prologue
 299   if (need_epilogue) {
 300     epilogue();
 301   }
 302 
 303   return ric.missed_count();
 304 }
 305 
 306 void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
 307   ResourceMark rm;
 308 
 309   KlassInfoTable cit(start_of_perm_gen());
 310   if (!cit.allocation_failed()) {
 311     KlassInfoHisto histo("\n"
 312                      " num     #instances         #bytes  class name\n"
 313                      "----------------------------------------------");
 314     HistoClosure hc(&histo);


 315 
 316     size_t missed_count = instance_inspection(&cit, &hc, need_prologue);
 317     if (missed_count > 0) {


 318       st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 319                    " total instances in data below",
 320                    missed_count);
 321     }







 322     histo.sort();
 323     histo.print_on(st);
 324   } else {
 325     st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
 326   }
 327   st->flush();





 328 }
 329 
 330 class FindInstanceClosure : public ObjectClosure {
 331  private:
 332   klassOop _klass;
 333   GrowableArray<oop>* _result;
 334 
 335  public:
 336   FindInstanceClosure(klassOop k, GrowableArray<oop>* result) : _klass(k), _result(result) {};
 337 
 338   void do_object(oop obj) {
 339     if (obj->is_a(_klass)) {
 340       _result->append(obj);
 341     }
 342   }
 343 };
 344 
 345 void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result) {
 346   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 347   assert(Heap_lock->is_locked(), "should have the Heap_lock");