< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 53920 : imported patch 8218880-g1-crashes-periodic-gc-gclocker
rev 53921 : [mq]: 8218880-shade-review

*** 2114,2131 **** void G1CollectedHeap::collect(GCCause::Cause cause) { attempt_collect(cause, true); } ! bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_vmop_failure) { assert_heap_not_locked(); ! bool vmop_succeeded; ! bool should_retry_vmop; do { ! should_retry_vmop = false; uint gc_count_before; uint old_marking_count_before; uint full_gc_count_before; --- 2114,2131 ---- void G1CollectedHeap::collect(GCCause::Cause cause) { attempt_collect(cause, true); } ! bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_gc_failure) { assert_heap_not_locked(); ! bool gc_succeeded; ! bool should_retry_gc; do { ! should_retry_gc = false; uint gc_count_before; uint old_marking_count_before; uint full_gc_count_before;
*** 2146,2166 **** gc_count_before, cause, true, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); ! vmop_succeeded = op.pause_succeeded(); ! if (!vmop_succeeded && retry_on_vmop_failure) { if (old_marking_count_before == _old_marking_cycles_started) { ! should_retry_vmop = op.should_retry_gc(); } else { // A Full GC happened while we were trying to schedule the // concurrent cycle. No point in starting a new cycle given // that the whole heap was collected anyway. } ! if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) { GCLocker::stall_until_clear(); } } } else { if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc --- 2146,2166 ---- gc_count_before, cause, true, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); ! gc_succeeded = op.gc_succeeded(); ! if (!gc_succeeded && retry_on_gc_failure) { if (old_marking_count_before == _old_marking_cycles_started) { ! should_retry_gc = op.should_retry_gc(); } else { // A Full GC happened while we were trying to schedule the // concurrent cycle. No point in starting a new cycle given // that the whole heap was collected anyway. } ! if (should_retry_gc && GCLocker::is_active_and_needs_gc()) { GCLocker::stall_until_clear(); } } } else { if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
*** 2172,2191 **** gc_count_before, cause, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); ! vmop_succeeded = op.pause_succeeded(); } else { // Schedule a Full GC. VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); VMThread::execute(&op); ! vmop_succeeded = op.pause_succeeded(); } } ! } while (should_retry_vmop); ! return vmop_succeeded; } bool G1CollectedHeap::is_in(const void* p) const { if (_hrm->reserved().contains(p)) { // Given that we know that p is in the reserved space, --- 2172,2191 ---- gc_count_before, cause, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); ! gc_succeeded = op.gc_succeeded(); } else { // Schedule a Full GC. VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); VMThread::execute(&op); ! gc_succeeded = op.gc_succeeded(); } } ! } while (should_retry_gc); ! return gc_succeeded; } bool G1CollectedHeap::is_in(const void* p) const { if (_hrm->reserved().contains(p)) { // Given that we know that p is in the reserved space,
*** 2592,2602 **** false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); HeapWord* result = op.result(); ! bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); assert(result == NULL || ret_succeeded, "the result should be NULL if the VM did not succeed"); *succeeded = ret_succeeded; assert_heap_not_locked(); --- 2592,2602 ---- false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms()); VMThread::execute(&op); HeapWord* result = op.result(); ! bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded(); assert(result == NULL || ret_succeeded, "the result should be NULL if the VM did not succeed"); *succeeded = ret_succeeded; assert_heap_not_locked();
< prev index next >