< prev index next >
src/hotspot/share/memory/heapInspection.cpp
Print this page
*** 541,583 ****
}
};
// Heap inspection for every worker.
// When native OOM hanppens for KlassInfoTable, set _success to false.
- // TODO(?) it seems atomically set/get _success is unnecessary becasue it
- // is set to true at constructor and can only be set to false here.
- // the only risk seems a worker may continue inspect heap when another
- // worker set _success to false, but this is OK because the current worker
- // doesn't change _success if everything is OK for it's inpection work, and
- // the _success will be false finally and serial heap inpection can be tried.
void ParHeapInspectTask::work(uint worker_id) {
size_t missed_count = 0;
if (!Atomic::load(&_success)) {
// other worker has failed on parallel iteration.
return;
}
KlassInfoTable cit(false);
if (!cit.allocation_failed()) {
RecordInstanceClosure ric(&cit, _filter);
_poi->object_iterate(&ric, worker_id);
- // _heap->object_iterate_parallel(&ric, worker_id, _par_thread_num);
missed_count = ric.missed_count();
} else {
// fail to allocate memory, stop parallel mode
Atomic::store(&_success, false);
return;
}
{
MutexLocker x(&_mutex);
!
! if (!_shared_cit->merge(&cit)) {
Atomic::store(&_success, false);
return;
}
! _shared_missed_count += missed_count;
! }
}
size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
ResourceMark rm;
--- 541,577 ----
}
};
// Heap inspection for every worker.
// When native OOM hanppens for KlassInfoTable, set _success to false.
void ParHeapInspectTask::work(uint worker_id) {
size_t missed_count = 0;
+ bool merge_success = true;
if (!Atomic::load(&_success)) {
// other worker has failed on parallel iteration.
return;
}
KlassInfoTable cit(false);
if (!cit.allocation_failed()) {
RecordInstanceClosure ric(&cit, _filter);
_poi->object_iterate(&ric, worker_id);
missed_count = ric.missed_count();
} else {
// fail to allocate memory, stop parallel mode
Atomic::store(&_success, false);
return;
}
{
MutexLocker x(&_mutex);
! merge_success = _shared_cit->merge(&cit);
! }
! if (!merge_success) {
Atomic::store(&_success, false);
return;
}
! Atomic::add(&_shared_missed_count, missed_count);
}
size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) {
ResourceMark rm;
< prev index next >