< prev index next >

src/hotspot/share/memory/heapInspection.cpp

Print this page




 233     _buckets[index].iterate(cic);
 234   }
 235 }
 236 
 237 size_t KlassInfoTable::size_of_instances_in_words() const {
 238   return _size_of_instances_in_words;
 239 }
 240 
 241 // Return false if the entry could not be recorded on account
 242 // of running out of space required to create a new entry.
 243 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
 244   Klass*        k = cie->klass();
 245   KlassInfoEntry* elt = lookup(k);
 246   // elt may be NULL if it's a new klass for which we
 247   // could not allocate space for a new entry in the hashtable.
 248   if (elt != NULL) {
 249     elt->set_count(elt->count() + cie->count());
 250     elt->set_words(elt->words() + cie->words());
 251     _size_of_instances_in_words += cie->words();
 252     return true;
 253   } else {
 254     return false;
 255   }


 256 }
 257 
 258 class KlassInfoTableMergeClosure : public KlassInfoClosure {
 259 private:
 260   KlassInfoTable* _dest;
 261   bool _success;
 262 public:
 263   KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
 264   void do_cinfo(KlassInfoEntry* cie) {
 265     _success &= _dest->merge_entry(cie);
 266   }
 267   bool is_success() { return _success; }
 268 };
 269 
 270 // merge from table
 271 bool KlassInfoTable::merge(KlassInfoTable* table) {
 272   KlassInfoTableMergeClosure closure(this);
 273   table->iterate(&closure);
 274   return closure.is_success();
 275 }
 276 
 277 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 278   return (*e1)->compare(*e1,*e2);
 279 }
 280 
 281 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
 282   _cit(cit) {
 283   _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
 284 }
 285 
 286 KlassInfoHisto::~KlassInfoHisto() {
 287   delete _elements;
 288 }
 289 
 290 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 291   elements()->append(cie);
 292 }
 293 
 294 void KlassInfoHisto::sort() {


 502 void KlassInfoHisto::print_histo_on(outputStream* st) {
 503   st->print_cr(" num     #instances         #bytes  class name (module)");
 504   st->print_cr("-------------------------------------------------------");
 505   print_elements(st);
 506 }
 507 
 508 class HistoClosure : public KlassInfoClosure {
 509  private:
 510   KlassInfoHisto* _cih;
 511  public:
 512   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 513 
 514   void do_cinfo(KlassInfoEntry* cie) {
 515     _cih->add(cie);
 516   }
 517 };
 518 
 519 class RecordInstanceClosure : public ObjectClosure {
 520  private:
 521   KlassInfoTable* _cit;
 522   size_t _missed_count;
 523   BoolObjectClosure* _filter;
 524  public:
 525   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 526     _cit(cit), _missed_count(0), _filter(filter) {}
 527 
 528   void do_object(oop obj) {
 529     if (should_visit(obj)) {
 530       if (!_cit->record_instance(obj)) {
 531         _missed_count++;
 532       }
 533     }
 534   }
 535 
 536   size_t missed_count() { return _missed_count; }
 537 
 538  private:
 539   bool should_visit(oop obj) {
 540     return _filter == NULL || _filter->do_object_b(obj);
 541   }
 542 };
 543 
 544 // Heap inspection for every worker.
 545 // When native OOM hanppens for KlassInfoTable, set _success to false.
 546 void ParHeapInspectTask::work(uint worker_id) {
 547   size_t missed_count = 0;
 548   bool merge_success = true;
 549   if (!Atomic::load(&_success)) {
 550     // other worker has failed on parallel iteration.
 551     return;
 552   }
 553 
 554   KlassInfoTable cit(false);
 555   if (!cit.allocation_failed()) {
 556     RecordInstanceClosure ric(&cit, _filter);
 557     _poi->object_iterate(&ric, worker_id);
 558     missed_count = ric.missed_count();
 559   } else {
 560     // fail to allocate memory, stop parallel mode
 561     Atomic::store(&_success, false);
 562     return;
 563   }



 564   {
 565     MutexLocker x(&_mutex);
 566     merge_success = _shared_cit->merge(&cit);
 567   }
 568   if (!merge_success) {


 569     Atomic::store(&_success, false);
 570     return;
 571    }
 572    Atomic::add(&_shared_missed_count, missed_count);
 573 }
 574 
 575 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
 576   ResourceMark rm;
 577 
 578   // Try parallel first.
 579   if (parallel_thread_num > 1) {

 580     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 581     if (poi != NULL) {
 582       ParHeapInspectTask task(poi, cit, filter);
 583       Universe::heap()->run_task(&task);
 584       delete poi;
 585       if (task.success()) {
 586         return task.missed_count();
 587       }
 588     }
 589   }
 590 

 591   // If no parallel iteration available, run serially.
 592   RecordInstanceClosure ric(cit, filter);
 593   Universe::heap()->object_iterate(&ric);
 594   return ric.missed_count();
 595 }
 596 
 597 void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) {
 598   ResourceMark rm;
 599 
 600   KlassInfoTable cit(false);
 601   if (!cit.allocation_failed()) {
 602     size_t missed_count = 0;;
 603     // populate table with object allocation info
 604     missed_count = populate_table(&cit, NULL, parallel_thread_num);
 605     if (missed_count != 0) {
 606       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 607                                " total instances in data below",
 608                                missed_count);
 609     }
 610 
 611     // Sort and print klass instance info
 612     KlassInfoHisto histo(&cit);
 613     HistoClosure hc(&histo);
 614 
 615     cit.iterate(&hc);
 616 
 617     histo.sort();




 233     _buckets[index].iterate(cic);
 234   }
 235 }
 236 
 237 size_t KlassInfoTable::size_of_instances_in_words() const {
 238   return _size_of_instances_in_words;
 239 }
 240 
 241 // Return false if the entry could not be recorded on account
 242 // of running out of space required to create a new entry.
 243 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
 244   Klass*          k = cie->klass();
 245   KlassInfoEntry* elt = lookup(k);
 246   // elt may be NULL if it's a new klass for which we
 247   // could not allocate space for a new entry in the hashtable.
 248   if (elt != NULL) {
 249     elt->set_count(elt->count() + cie->count());
 250     elt->set_words(elt->words() + cie->words());
 251     _size_of_instances_in_words += cie->words();
 252     return true;


 253   }
 254 
 255   return false;
 256 }
 257 
 258 class KlassInfoTableMergeClosure : public KlassInfoClosure {
 259 private:
 260   KlassInfoTable* _dest;
 261   bool _success;
 262 public:
 263   KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
 264   void do_cinfo(KlassInfoEntry* cie) {
 265     _success &= _dest->merge_entry(cie);
 266   }
 267   bool success() { return _success; }
 268 };
 269 
 270 // merge from table
 271 bool KlassInfoTable::merge(KlassInfoTable* table) {
 272   KlassInfoTableMergeClosure closure(this);
 273   table->iterate(&closure);
 274   return closure.success();
 275 }
 276 
 277 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 278   return (*e1)->compare(*e1,*e2);
 279 }
 280 
 281 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
 282   _cit(cit) {
 283   _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
 284 }
 285 
 286 KlassInfoHisto::~KlassInfoHisto() {
 287   delete _elements;
 288 }
 289 
 290 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 291   elements()->append(cie);
 292 }
 293 
 294 void KlassInfoHisto::sort() {


 502 void KlassInfoHisto::print_histo_on(outputStream* st) {
 503   st->print_cr(" num     #instances         #bytes  class name (module)");
 504   st->print_cr("-------------------------------------------------------");
 505   print_elements(st);
 506 }
 507 
 508 class HistoClosure : public KlassInfoClosure {
 509  private:
 510   KlassInfoHisto* _cih;
 511  public:
 512   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 513 
 514   void do_cinfo(KlassInfoEntry* cie) {
 515     _cih->add(cie);
 516   }
 517 };
 518 
 519 class RecordInstanceClosure : public ObjectClosure {
 520  private:
 521   KlassInfoTable* _cit;
 522   uint _missed_count;
 523   BoolObjectClosure* _filter;
 524  public:
 525   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 526     _cit(cit), _missed_count(0), _filter(filter) {}
 527 
 528   void do_object(oop obj) {
 529     if (should_visit(obj)) {
 530       if (!_cit->record_instance(obj)) {
 531         _missed_count++;
 532       }
 533     }
 534   }
 535 
 536   uint missed_count() { return _missed_count; }
 537 
 538  private:
 539   bool should_visit(oop obj) {
 540     return _filter == NULL || _filter->do_object_b(obj);
 541   }
 542 };
 543 
 544 // Heap inspection for every worker.
 545 // When native OOM hanppens for KlassInfoTable, set _success to false.
 546 void ParHeapInspectTask::work(uint worker_id) {
 547   uint missed_count = 0;
 548   bool merge_success = true;
 549   if (!Atomic::load(&_success)) {
 550     // other worker has failed on parallel iteration.
 551     return;
 552   }
 553 
 554   KlassInfoTable cit(false);
 555   if (cit.allocation_failed()) {




 556     // fail to allocate memory, stop parallel mode
 557     Atomic::store(&_success, false);
 558     return;
 559   }
 560   RecordInstanceClosure ric(&cit, _filter);
 561   _poi->object_iterate(&ric, worker_id);
 562   missed_count = ric.missed_count();
 563   {
 564     MutexLocker x(&_mutex);
 565     merge_success = _shared_cit->merge(&cit);
 566   }
 567   if (merge_success) {
 568     Atomic::add(&_missed_count, missed_count);
 569   } else {
 570     Atomic::store(&_success, false);
 571     return;
 572    }

 573 }
 574 
 575 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {

 576 
 577   // Try parallel first.
 578   if (parallel_thread_num > 1) {
 579     ResourceMark rm;
 580     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 581     if (poi != NULL) {
 582       ParHeapInspectTask task(poi, cit, filter);
 583       Universe::heap()->run_task(&task);
 584       delete poi;
 585       if (task.success()) {
 586         return task.missed_count();
 587       }
 588     }
 589   }
 590 
 591   ResourceMark rm;
 592   // If no parallel iteration available, run serially.
 593   RecordInstanceClosure ric(cit, filter);
 594   Universe::heap()->object_iterate(&ric);
 595   return ric.missed_count();
 596 }
 597 
 598 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
 599   ResourceMark rm;
 600 
 601   KlassInfoTable cit(false);
 602   if (!cit.allocation_failed()) {
 603     size_t missed_count = 0;;
 604     // populate table with object allocation info
 605     missed_count = populate_table(&cit, NULL, parallel_thread_num);
 606     if (missed_count != 0) {
 607       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 608                                " total instances in data below",
 609                                missed_count);
 610     }
 611 
 612     // Sort and print klass instance info
 613     KlassInfoHisto histo(&cit);
 614     HistoClosure hc(&histo);
 615 
 616     cit.iterate(&hc);
 617 
 618     histo.sort();


< prev index next >