< prev index next >

src/hotspot/share/memory/heapInspection.cpp

Print this page




 526     _cit(cit), _missed_count(0), _filter(filter) {}
 527 
 528   void do_object(oop obj) {
 529     if (should_visit(obj)) {
 530       if (!_cit->record_instance(obj)) {
 531         _missed_count++;
 532       }
 533     }
 534   }
 535 
 536   size_t missed_count() { return _missed_count; }
 537 
 538  private:
 539   bool should_visit(oop obj) {
 540     return _filter == NULL || _filter->do_object_b(obj);
 541   }
 542 };
 543 
 544 // Heap inspection for every worker.
 545 // When native OOM hanppens for KlassInfoTable, set _success to false.
 546 // TODO(?) it seems atomically set/get _success is unnecessary becasue it
 547 // is set to true at constructor and can only be set to false here.
 548 // the only risk seems a worker may continue inspect heap when another
 549 // worker set _success to false, but this is OK because the current worker
 550 // doesn't change _success if everything is OK for it's inpection work, and
 551 // the _success will be false finally and serial heap inpection can be tried.
 552 void ParHeapInspectTask::work(uint worker_id) {
 553   size_t missed_count = 0;

 554   if (!Atomic::load(&_success)) {
 555     // other worker has failed on parallel iteration.
 556     return;
 557   }
 558 
 559   KlassInfoTable cit(false);
 560   if (!cit.allocation_failed()) {
 561     RecordInstanceClosure ric(&cit, _filter);
 562     _poi->object_iterate(&ric, worker_id);
 563     // _heap->object_iterate_parallel(&ric, worker_id, _par_thread_num);
 564     missed_count = ric.missed_count();
 565   } else {
 566     // fail to allocate memory, stop parallel mode
 567     Atomic::store(&_success, false);
 568     return;
 569   }
 570   {
 571     MutexLocker x(&_mutex);
 572 
 573     if (!_shared_cit->merge(&cit)) {

 574       Atomic::store(&_success, false);
 575       return;
 576     }
 577     _shared_missed_count += missed_count;
 578   }
 579 }
 580 
 581 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
 582   ResourceMark rm;
 583 
 584   // Try parallel first.
 585   if (parallel_thread_num > 1) {
 586     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 587     if (poi != NULL) {
 588       ParHeapInspectTask task(poi, cit, filter);
 589       Universe::heap()->run_task(&task);
 590       delete poi;
 591       if (task.success()) {
 592         return task.missed_count();
 593       }
 594     }
 595   }
 596 
 597   // If no parallel iteration available, run serially.
 598   RecordInstanceClosure ric(cit, filter);




 526     _cit(cit), _missed_count(0), _filter(filter) {}
 527 
 528   void do_object(oop obj) {
 529     if (should_visit(obj)) {
 530       if (!_cit->record_instance(obj)) {
 531         _missed_count++;
 532       }
 533     }
 534   }
 535 
 536   size_t missed_count() { return _missed_count; }
 537 
 538  private:
 539   bool should_visit(oop obj) {
 540     return _filter == NULL || _filter->do_object_b(obj);
 541   }
 542 };
 543 
 544 // Heap inspection for every worker.
 545 // When native OOM hanppens for KlassInfoTable, set _success to false.






 546 void ParHeapInspectTask::work(uint worker_id) {
 547   size_t missed_count = 0;
 548   bool merge_success = true;
 549   if (!Atomic::load(&_success)) {
 550     // other worker has failed on parallel iteration.
 551     return;
 552   }
 553 
 554   KlassInfoTable cit(false);
 555   if (!cit.allocation_failed()) {
 556     RecordInstanceClosure ric(&cit, _filter);
 557     _poi->object_iterate(&ric, worker_id);

 558     missed_count = ric.missed_count();
 559   } else {
 560     // fail to allocate memory, stop parallel mode
 561     Atomic::store(&_success, false);
 562     return;
 563   }
 564   {
 565     MutexLocker x(&_mutex);
 566     merge_success = _shared_cit->merge(&cit);
 567   }
 568   if (!merge_success) {
 569     Atomic::store(&_success, false);
 570     return;
 571    }
 572    Atomic::add(&_shared_missed_count, missed_count);

 573 }
 574 
 575 size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
 576   ResourceMark rm;
 577 
 578   // Try parallel first.
 579   if (parallel_thread_num > 1) {
 580     ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
 581     if (poi != NULL) {
 582       ParHeapInspectTask task(poi, cit, filter);
 583       Universe::heap()->run_task(&task);
 584       delete poi;
 585       if (task.success()) {
 586         return task.missed_count();
 587       }
 588     }
 589   }
 590 
 591   // If no parallel iteration available, run serially.
 592   RecordInstanceClosure ric(cit, filter);


< prev index next >