< prev index next >

src/hotspot/share/memory/heapInspection.cpp

Print this page
rev 58082 : 8214535: Parallel heap inspection for jmap histo (G1)
Summary: Add parallel heap inspection to speedup jmap -histo, this patch support G1
Reviewed-by:
Contributed-by: lzang


 219     elt->set_count(elt->count() + 1);
 220     elt->set_words(elt->words() + obj->size());
 221     _size_of_instances_in_words += obj->size();
 222     return true;
 223   } else {
 224     return false;
 225   }
 226 }
 227 
 228 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 229   assert(_buckets != NULL, "Allocation failure should have been caught");
 230   for (int index = 0; index < _num_buckets; index++) {
 231     _buckets[index].iterate(cic);
 232   }
 233 }
 234 
 235 size_t KlassInfoTable::size_of_instances_in_words() const {
 236   return _size_of_instances_in_words;
 237 }
 238 




































 239 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 240   return (*e1)->compare(*e1,*e2);
 241 }
 242 
 243 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
 244   _cit(cit) {
 245   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
 246 }
 247 
 248 KlassInfoHisto::~KlassInfoHisto() {
 249   delete _elements;
 250 }
 251 
 252 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 253   elements()->append(cie);
 254 }
 255 
 256 void KlassInfoHisto::sort() {
 257   elements()->sort(KlassInfoHisto::sort_helper);
 258 }


 486  public:
 487   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 488     _cit(cit), _missed_count(0), _filter(filter) {}
 489 
 490   void do_object(oop obj) {
 491     if (should_visit(obj)) {
 492       if (!_cit->record_instance(obj)) {
 493         _missed_count++;
 494       }
 495     }
 496   }
 497 
 498   size_t missed_count() { return _missed_count; }
 499 
 500  private:
 501   bool should_visit(oop obj) {
 502     return _filter == NULL || _filter->do_object_b(obj);
 503   }
 504 };
 505 
 506 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {





























 507   ResourceMark rm;














 508 

 509   RecordInstanceClosure ric(cit, filter);
 510   Universe::heap()->object_iterate(&ric);
 511   return ric.missed_count();


 512 }
 513 
 514 void HeapInspection::heap_inspection(outputStream* st) {
 515   ResourceMark rm;
 516 
 517   KlassInfoTable cit(false);
 518   if (!cit.allocation_failed()) {

 519     // populate table with object allocation info
 520     size_t missed_count = populate_table(&cit);
 521     if (missed_count != 0) {
 522       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 523                                " total instances in data below",
 524                                missed_count);
 525     }
 526 
 527     // Sort and print klass instance info
 528     KlassInfoHisto histo(&cit);
 529     HistoClosure hc(&histo);
 530 
 531     cit.iterate(&hc);
 532 
 533     histo.sort();
 534     histo.print_histo_on(st);
 535   } else {
 536     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
 537   }
 538   st->flush();
 539 }
 540 




 219     elt->set_count(elt->count() + 1);
 220     elt->set_words(elt->words() + obj->size());
 221     _size_of_instances_in_words += obj->size();
 222     return true;
 223   } else {
 224     return false;
 225   }
 226 }
 227 
 228 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
 229   assert(_buckets != NULL, "Allocation failure should have been caught");
 230   for (int index = 0; index < _num_buckets; index++) {
 231     _buckets[index].iterate(cic);
 232   }
 233 }
 234 
 235 size_t KlassInfoTable::size_of_instances_in_words() const {
 236   return _size_of_instances_in_words;
 237 }
 238 
 239 // Return false if the entry could not be recorded on account
 240 // of running out of space required to create a new entry.
 241 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
 242   Klass*        k = cie->klass();
 243   KlassInfoEntry* elt = lookup(k);
 244   // elt may be NULL if it's a new klass for which we
 245   // could not allocate space for a new entry in the hashtable.
 246   if (elt != NULL) {
 247     elt->set_count(elt->count() + cie->count());
 248     elt->set_words(elt->words() + cie->words());
 249     _size_of_instances_in_words += cie->words();
 250     return true;
 251   } else {
 252     return false;
 253   }
 254 }
 255 
 256 class KlassInfoTableMergeClosure : public KlassInfoClosure {
 257 private:
 258   KlassInfoTable* _dest;
 259   bool _success;
 260 public:
 261   KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
 262   void do_cinfo(KlassInfoEntry* cie) {
 263     _success &= _dest->merge_entry(cie);
 264   }
 265   bool is_success() { return _success; }
 266 };
 267 
 268 // merge from table
 269 bool KlassInfoTable::merge(KlassInfoTable* table) {
 270   KlassInfoTableMergeClosure closure(this);
 271   table->iterate(&closure);
 272   return closure.is_success();
 273 }
 274 
 275 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
 276   return (*e1)->compare(*e1,*e2);
 277 }
 278 
 279 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
 280   _cit(cit) {
 281   _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
 282 }
 283 
 284 KlassInfoHisto::~KlassInfoHisto() {
 285   delete _elements;
 286 }
 287 
 288 void KlassInfoHisto::add(KlassInfoEntry* cie) {
 289   elements()->append(cie);
 290 }
 291 
 292 void KlassInfoHisto::sort() {
 293   elements()->sort(KlassInfoHisto::sort_helper);
 294 }


 522  public:
 523   RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
 524     _cit(cit), _missed_count(0), _filter(filter) {}
 525 
 526   void do_object(oop obj) {
 527     if (should_visit(obj)) {
 528       if (!_cit->record_instance(obj)) {
 529         _missed_count++;
 530       }
 531     }
 532   }
 533 
 534   size_t missed_count() { return _missed_count; }
 535 
 536  private:
 537   bool should_visit(oop obj) {
 538     return _filter == NULL || _filter->do_object_b(obj);
 539   }
 540 };
 541 
 542 void ParHeapInspectTask::work(uint worker_id) {
 543   size_t missed_count = 0;
 544   if (!_success) {
 545     // other worker has failed on parallel iteration.
 546     return;
 547   }
 548 
 549   KlassInfoTable cit(false);
 550   if (!cit.allocation_failed()) {
 551     RecordInstanceClosure ric(&cit, _filter);
 552     do_object_iterate_parallel(&ric, worker_id);
 553     // _heap->object_iterate_parallel(&ric, worker_id, _par_thread_num);
 554     missed_count = ric.missed_count();
 555   } else {
 556     // fail to allocate memory, stop parallel mode
 557     _success = false;
 558     return;
 559   }
 560   {
 561     MutexLocker x(&_mutex);
 562 
 563     if (!_shared_cit->merge(&cit)) {
 564       _success = false;
 565       return;
 566     }
 567     *_shared_missed_count += missed_count;
 568   }
 569 }
 570 
 571 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
 572   ResourceMark rm;
 573   size_t missed_count = 0;
 574   bool do_serial = (parallel_thread_num == 1);
 575   // try parallel first.
 576   if (parallel_thread_num > 1) {
 577     bool succ = Universe::heap()->run_par_heap_inspect_task(cit, filter, &missed_count, parallel_thread_num);
 578     if (succ) {
 579      do_serial = false;
 580     } else {
 581       // heap does not support parallel iteration, or parallel task fail because of native memory oom.
 582       // use object_iterate.
 583       do_serial = true;
 584       missed_count = 0;
 585     }
 586   }
 587 
 588   if (do_serial) {
 589     RecordInstanceClosure ric(cit, filter);
 590     Universe::heap()->object_iterate(&ric);
 591     missed_count = ric.missed_count();
 592   }
 593   return missed_count;
 594 }
 595 
 596 void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) {
 597   ResourceMark rm;
 598 
 599   KlassInfoTable cit(false);
 600   if (!cit.allocation_failed()) {
 601     size_t missed_count = 0;;
 602     // populate table with object allocation info
 603     missed_count = populate_table(&cit, NULL, parallel_thread_num);
 604     if (missed_count != 0) {
 605       log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 606                                " total instances in data below",
 607                                missed_count);
 608     }
 609 
 610     // Sort and print klass instance info
 611     KlassInfoHisto histo(&cit);
 612     HistoClosure hc(&histo);
 613 
 614     cit.iterate(&hc);
 615 
 616     histo.sort();
 617     histo.print_histo_on(st);
 618   } else {
 619     st->print_cr("ERROR: Ran out of C-heap; histogram not generated");
 620   }
 621   st->flush();
 622 }
 623 


< prev index next >