220 elt->set_count(elt->count() + 1); 221 elt->set_words(elt->words() + obj->size()); 222 _size_of_instances_in_words += obj->size(); 223 return true; 224 } else { 225 return false; 226 } 227 } 228 229 void KlassInfoTable::iterate(KlassInfoClosure* cic) { 230 assert(_buckets != NULL, "Allocation failure should have been caught"); 231 for (int index = 0; index < _num_buckets; index++) { 232 _buckets[index].iterate(cic); 233 } 234 } 235 236 size_t KlassInfoTable::size_of_instances_in_words() const { 237 return _size_of_instances_in_words; 238 } 239 240 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { 241 return (*e1)->compare(*e1,*e2); 242 } 243 244 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) : 245 _cit(cit) { 246 _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true); 247 } 248 249 KlassInfoHisto::~KlassInfoHisto() { 250 delete _elements; 251 } 252 253 void KlassInfoHisto::add(KlassInfoEntry* cie) { 254 elements()->append(cie); 255 } 256 257 void KlassInfoHisto::sort() { 258 elements()->sort(KlassInfoHisto::sort_helper); 259 } 487 public: 488 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : 489 _cit(cit), _missed_count(0), _filter(filter) {} 490 491 void do_object(oop obj) { 492 if (should_visit(obj)) { 493 if (!_cit->record_instance(obj)) { 494 _missed_count++; 495 } 496 } 497 } 498 499 size_t missed_count() { return _missed_count; } 500 501 private: 502 bool should_visit(oop obj) { 503 return _filter == NULL || _filter->do_object_b(obj); 504 } 505 }; 506 507 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { 508 ResourceMark rm; 509 510 RecordInstanceClosure ric(cit, filter); 511 Universe::heap()->object_iterate(&ric); 512 return ric.missed_count(); 513 } 514 515 void HeapInspection::heap_inspection(outputStream* st) { 516 ResourceMark rm; 517 518 KlassInfoTable cit(false); 519 if (!cit.allocation_failed()) { 520 // populate table with object allocation info 521 size_t missed_count = populate_table(&cit); 522 if (missed_count != 0) { 523 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT 524 " total instances in data below", 525 missed_count); 526 } 527 528 // Sort and print klass instance info 529 KlassInfoHisto histo(&cit); 530 HistoClosure hc(&histo); 531 532 cit.iterate(&hc); 533 534 histo.sort(); 535 histo.print_histo_on(st); 536 } else { 537 st->print_cr("ERROR: Ran out of C-heap; histogram not generated"); 538 } 539 st->flush(); 540 } 541 | 220 elt->set_count(elt->count() + 1); 221 elt->set_words(elt->words() + obj->size()); 222 _size_of_instances_in_words += obj->size(); 223 return true; 224 } else { 225 return false; 226 } 227 } 228 229 void KlassInfoTable::iterate(KlassInfoClosure* cic) { 230 assert(_buckets != NULL, "Allocation failure should have been caught"); 231 for (int index = 0; index < _num_buckets; index++) { 232 _buckets[index].iterate(cic); 233 } 234 } 235 236 size_t KlassInfoTable::size_of_instances_in_words() const { 237 return _size_of_instances_in_words; 238 } 239 240 // Return false if the entry could not be recorded on account 241 // of running out of space required to create a new entry. 242 bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { 243 Klass* k = cie->klass(); 244 KlassInfoEntry* elt = lookup(k); 245 // elt may be NULL if it's a new klass for which we 246 // could not allocate space for a new entry in the hashtable. 247 if (elt != NULL) { 248 elt->set_count(elt->count() + cie->count()); 249 elt->set_words(elt->words() + cie->words()); 250 _size_of_instances_in_words += cie->words(); 251 return true; 252 } else { 253 return false; 254 } 255 } 256 257 class KlassInfoTableMergeClosure : public KlassInfoClosure { 258 private: 259 KlassInfoTable* _dest; 260 bool _success; 261 public: 262 KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} 263 void do_cinfo(KlassInfoEntry* cie) { 264 _success &= _dest->merge_entry(cie); 265 } 266 bool is_success() { return _success; } 267 }; 268 269 // merge from table 270 bool KlassInfoTable::merge(KlassInfoTable* table) { 271 KlassInfoTableMergeClosure closure(this); 272 table->iterate(&closure); 273 return closure.is_success(); 274 } 275 276 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { 277 return (*e1)->compare(*e1,*e2); 278 } 279 280 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) : 281 _cit(cit) { 282 _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true); 283 } 284 285 KlassInfoHisto::~KlassInfoHisto() { 286 delete _elements; 287 } 288 289 void KlassInfoHisto::add(KlassInfoEntry* cie) { 290 elements()->append(cie); 291 } 292 293 void KlassInfoHisto::sort() { 294 elements()->sort(KlassInfoHisto::sort_helper); 295 } 523 public: 524 RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : 525 _cit(cit), _missed_count(0), _filter(filter) {} 526 527 void do_object(oop obj) { 528 if (should_visit(obj)) { 529 if (!_cit->record_instance(obj)) { 530 _missed_count++; 531 } 532 } 533 } 534 535 size_t missed_count() { return _missed_count; } 536 537 private: 538 bool should_visit(oop obj) { 539 return _filter == NULL || _filter->do_object_b(obj); 540 } 541 }; 542 543 void ParHeapInspectTask::work(uint worker_id) { 544 size_t missed_count = 0; 545 if (!_success) { 546 // other worker has failed on parallel iteration. 547 return; 548 } 549 550 KlassInfoTable cit(false); 551 if (!cit.allocation_failed()) { 552 RecordInstanceClosure ric(&cit, _filter); 553 do_object_iterate_parallel(&ric, worker_id); 554 missed_count = ric.missed_count(); 555 } else { 556 // fail to allocate memory, stop parallel mode. 557 _success = false; 558 return; 559 } 560 { 561 MutexLocker x(&_mutex); 562 563 if (!_shared_cit->merge(&cit)) { 564 _success = false; 565 return; 566 } 567 *_shared_missed_count += missed_count; 568 } 569 } 570 571 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { 572 ResourceMark rm; 573 size_t missed_count = 0; 574 bool do_serial = (parallel_thread_num == 1); 575 // try parallel first. 576 if (parallel_thread_num > 1) { 577 bool succ = Universe::heap()->run_par_heap_inspect_task(cit, filter, &missed_count, parallel_thread_num); 578 if (succ) { 579 do_serial = false; 580 } else { 581 // heap does not support parallel iteration, or parallel task fail because of native memory oom. 582 // use object_iterate. 583 do_serial = true; 584 missed_count = 0; 585 } 586 } 587 588 if (do_serial) { 589 RecordInstanceClosure ric(cit, filter); 590 Universe::heap()->object_iterate(&ric); 591 missed_count = ric.missed_count(); 592 } 593 return missed_count; 594 } 595 596 void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { 597 ResourceMark rm; 598 599 KlassInfoTable cit(false); 600 if (!cit.allocation_failed()) { 601 size_t missed_count = 0;; 602 // populate table with object allocation info 603 missed_count = populate_table(&cit, NULL, parallel_thread_num); 604 if (missed_count != 0) { 605 log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT 606 " total instances in data below", 607 missed_count); 608 } 609 610 // Sort and print klass instance info 611 KlassInfoHisto histo(&cit); 612 HistoClosure hc(&histo); 613 614 cit.iterate(&hc); 615 616 histo.sort(); 617 histo.print_histo_on(st); 618 } else { 619 st->print_cr("ERROR: Ran out of C-heap; histogram not generated"); 620 } 621 st->flush(); 622 } 623 |