< prev index next >

src/hotspot/share/memory/heapInspection.cpp

Print this page




 501 void KlassInfoHisto::print_histo_on(outputStream* st) {
 502   st->print_cr(" num     #instances         #bytes  class name (module)");
 503   st->print_cr("-------------------------------------------------------");
 504   print_elements(st);
 505 }
 506 
 507 class HistoClosure : public KlassInfoClosure {
 508  private:
 509   KlassInfoHisto* _cih;
 510  public:
 511   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 512 
 513   void do_cinfo(KlassInfoEntry* cie) {
 514     _cih->add(cie);
 515   }
 516 };
 517 
 518 class RecordInstanceClosure : public ObjectClosure {
 519  private:
 520   KlassInfoTable* _cit;
 521   uint _missed_count;
 522   BoolObjectClosure* _filter;
 523  public:
 524   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 525     _cit(cit), _missed_count(0), _filter(filter) {}
 526 
 527   void do_object(oop obj) {
 528     if (should_visit(obj)) {
 529       if (!_cit->record_instance(obj)) {
 530         _missed_count++;
 531       }
 532     }
 533   }
 534 
 535   uint missed_count() { return _missed_count; }
 536 
 537  private:
 538   bool should_visit(oop obj) {
 539     return _filter == NULL || _filter->do_object_b(obj);
 540   }
 541 };
 542 
 543 // Heap inspection for every worker.
 544 // When native OOM hanppens for KlassInfoTable, set _success to false.
 545 void ParHeapInspectTask::work(uint worker_id) {
 546   uint missed_count = 0;
 547   bool merge_success = true;
 548   if (!Atomic::load(&_success)) {
 549     // other worker has failed on parallel iteration.
 550     return;
 551   }
 552 
 553   KlassInfoTable cit(false);
 554   if (cit.allocation_failed()) {
 555     // fail to allocate memory, stop parallel mode
 556     Atomic::store(&_success, false);
 557     return;
 558   }
 559   RecordInstanceClosure ric(&cit, _filter);
 560   _poi->object_iterate(&ric, worker_id);
 561   missed_count = ric.missed_count();
 562   {
 563     MutexLocker x(&_mutex);
 564     merge_success = _shared_cit->merge(&cit);
 565   }
 566   if (merge_success) {
 567     Atomic::add(&_missed_count, missed_count);
 568   } else {
 569     Atomic::store(&_success, false);
 570   }
 571 }
 572 
 573 uint HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
 574 
 575   // Try parallel first.
 576   if (parallel_thread_num > 1) {
 577     ResourceMark rm;
 578     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 579     if (poi != NULL) {
 580       ParHeapInspectTask task(poi, cit, filter);
 581       Universe::heap()->run_task(&task);
 582       delete poi;
 583       if (task.success()) {
 584         return task.missed_count();
 585       }
 586     }
 587   }
 588 
 589   ResourceMark rm;
 590   // If no parallel iteration available, run serially.
 591   RecordInstanceClosure ric(cit, filter);
 592   Universe::heap()->object_iterate(&ric);
 593   return ric.missed_count();
 594 }
 595 
 596 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
 597   ResourceMark rm;
 598 
 599   KlassInfoTable cit(false);
 600   if (!cit.allocation_failed()) {
 601     // populate table with object allocation info
 602     uint missed_count = populate_table(&cit, NULL, parallel_thread_num);
 603     if (missed_count != 0) {
 604       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
 605                                " total instances in data below",
 606                                (uintx)missed_count);
 607     }
 608 
 609     // Sort and print klass instance info
 610     KlassInfoHisto histo(&cit);
 611     HistoClosure hc(&histo);
 612 
 613     cit.iterate(&hc);
 614 
 615     histo.sort();
 616     histo.print_histo_on(st);
 617   } else {
 618     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
 619   }
 620   st->flush();
 621 }
 622 




 501 void KlassInfoHisto::print_histo_on(outputStream* st) {
 502   st->print_cr(" num     #instances         #bytes  class name (module)");
 503   st->print_cr("-------------------------------------------------------");
 504   print_elements(st);
 505 }
 506 
 507 class HistoClosure : public KlassInfoClosure {
 508  private:
 509   KlassInfoHisto* _cih;
 510  public:
 511   HistoClosure(KlassInfoHisto* cih) : _cih(cih) {}
 512 
 513   void do_cinfo(KlassInfoEntry* cie) {
 514     _cih->add(cie);
 515   }
 516 };
 517 
 518 class RecordInstanceClosure : public ObjectClosure {
 519  private:
 520   KlassInfoTable* _cit;
 521   uintx _missed_count;
 522   BoolObjectClosure* _filter;
 523  public:
 524   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 525     _cit(cit), _missed_count(0), _filter(filter) {}
 526 
 527   void do_object(oop obj) {
 528     if (should_visit(obj)) {
 529       if (!_cit->record_instance(obj)) {
 530         _missed_count++;
 531       }
 532     }
 533   }
 534 
 535   uintx missed_count() { return _missed_count; }
 536 
 537  private:
 538   bool should_visit(oop obj) {
 539     return _filter == NULL || _filter->do_object_b(obj);
 540   }
 541 };
 542 
 543 // Heap inspection for every worker.
 544 // When native OOM hanppens for KlassInfoTable, set _success to false.
 545 void ParHeapInspectTask::work(uint worker_id) {
 546   uintx missed_count = 0;
 547   bool merge_success = true;
 548   if (!Atomic::load(&_success)) {
 549     // other worker has failed on parallel iteration.
 550     return;
 551   }
 552 
 553   KlassInfoTable cit(false);
 554   if (cit.allocation_failed()) {
 555     // fail to allocate memory, stop parallel mode
 556     Atomic::store(&_success, false);
 557     return;
 558   }
 559   RecordInstanceClosure ric(&cit, _filter);
 560   _poi->object_iterate(&ric, worker_id);
 561   missed_count = ric.missed_count();
 562   {
 563     MutexLocker x(&_mutex);
 564     merge_success = _shared_cit->merge(&cit);
 565   }
 566   if (merge_success) {
 567     Atomic::add(&_missed_count, missed_count);
 568   } else {
 569     Atomic::store(&_success, false);
 570   }
 571 }
 572 
 573 uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
 574 
 575   // Try parallel first.
 576   if (parallel_thread_num > 1) {
 577     ResourceMark rm;
 578     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 579     if (poi != NULL) {
 580       ParHeapInspectTask task(poi, cit, filter);
 581       Universe::heap()->run_task(&task);
 582       delete poi;
 583       if (task.success()) {
 584         return task.missed_count();
 585       }
 586     }
 587   }
 588 
 589   ResourceMark rm;
 590   // If no parallel iteration available, run serially.
 591   RecordInstanceClosure ric(cit, filter);
 592   Universe::heap()->object_iterate(&ric);
 593   return ric.missed_count();
 594 }
 595 
 596 void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
 597   ResourceMark rm;
 598 
 599   KlassInfoTable cit(false);
 600   if (!cit.allocation_failed()) {
 601     // populate table with object allocation info
 602     uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
 603     if (missed_count != 0) {
 604       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
 605                                " total instances in data below",
 606                                (uintx)missed_count);
 607     }
 608 
 609     // Sort and print klass instance info
 610     KlassInfoHisto histo(&cit);
 611     HistoClosure hc(&histo);
 612 
 613     cit.iterate(&hc);
 614 
 615     histo.sort();
 616     histo.print_histo_on(st);
 617   } else {
 618     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
 619   }
 620   st->flush();
 621 }
 622 


< prev index next >