< prev index next >
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Print this page
rev 53920 : [mq]: 8218880-g1-crashes-periodic-gc-gclocker
*** 2111,2129 ****
// waiting in VM_G1CollectForAllocation::doit_epilogue().
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
uint gc_count_before;
uint old_marking_count_before;
uint full_gc_count_before;
- bool retry_gc;
-
- do {
- retry_gc = false;
{
MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock
--- 2111,2135 ----
// waiting in VM_G1CollectForAllocation::doit_epilogue().
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
+ attempt_collect(cause, true);
+ }
+
+ bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_vmop_failure) {
assert_heap_not_locked();
+ bool vmop_succeeded;
+ bool should_retry_vmop;
+
+ do {
+ should_retry_vmop = false;
+
uint gc_count_before;
uint old_marking_count_before;
uint full_gc_count_before;
{
MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock
*** 2140,2164 ****
gc_count_before,
cause,
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
! if (!op.pause_succeeded()) {
if (old_marking_count_before == _old_marking_cycles_started) {
! retry_gc = op.should_retry_gc();
} else {
// A Full GC happened while we were trying to schedule the
! // initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
! if (retry_gc) {
! if (GCLocker::is_active_and_needs_gc()) {
GCLocker::stall_until_clear();
}
}
- }
} else {
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause. We're setting word_size
--- 2146,2169 ----
gc_count_before,
cause,
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
! vmop_succeeded = op.pause_succeeded();
! if (!vmop_succeeded && retry_on_vmop_failure) {
if (old_marking_count_before == _old_marking_cycles_started) {
! should_retry_vmop = op.should_retry_gc();
} else {
// A Full GC happened while we were trying to schedule the
! // concurrent cycle. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
! if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) {
GCLocker::stall_until_clear();
}
}
} else {
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause. We're setting word_size
*** 2167,2183 ****
gc_count_before,
cause,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
} else {
// Schedule a Full GC.
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op);
}
}
! } while (retry_gc);
}
bool G1CollectedHeap::is_in(const void* p) const {
if (_hrm->reserved().contains(p)) {
// Given that we know that p is in the reserved space,
--- 2172,2191 ----
gc_count_before,
cause,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
+ vmop_succeeded = op.pause_succeeded();
} else {
// Schedule a Full GC.
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op);
+ vmop_succeeded = op.pause_succeeded();
}
}
! } while (should_retry_vmop);
! return vmop_succeeded;
}
bool G1CollectedHeap::is_in(const void* p) const {
if (_hrm->reserved().contains(p)) {
// Given that we know that p is in the reserved space,
< prev index next >