2096 _old_marking_cycles_started, _old_marking_cycles_completed);
2097
2098 _old_marking_cycles_completed += 1;
2099
2100 // We need to clear the "in_progress" flag in the CM thread before
2101 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2102 // is set) so that if a waiter requests another System.gc() it doesn't
2103 // incorrectly see that a marking cycle is still in progress.
2104 if (concurrent) {
2105 _cm_thread->set_idle();
2106 }
2107
2108 // This notify_all() will ensure that a thread that called
2109 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2110 // and it's waiting for a full GC to finish will be woken up. It is
2111 // waiting in VM_G1CollectForAllocation::doit_epilogue().
2112 FullGCCount_lock->notify_all();
2113 }
2114
2115 void G1CollectedHeap::collect(GCCause::Cause cause) {
2116 attempt_collect(cause, true);
2117 }
2118
2119 bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_gc_failure) {
2120 assert_heap_not_locked();
2121
2122 bool gc_succeeded;
2123 bool should_retry_gc;
2124
2125 do {
2126 should_retry_gc = false;
2127
2128 uint gc_count_before;
2129 uint old_marking_count_before;
2130 uint full_gc_count_before;
2131
2132 {
2133 MutexLocker ml(Heap_lock);
2134
2135 // Read the GC count while holding the Heap_lock
2136 gc_count_before = total_collections();
2137 full_gc_count_before = total_full_collections();
2138 old_marking_count_before = _old_marking_cycles_started;
2139 }
|
2096 _old_marking_cycles_started, _old_marking_cycles_completed);
2097
2098 _old_marking_cycles_completed += 1;
2099
2100 // We need to clear the "in_progress" flag in the CM thread before
2101 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2102 // is set) so that if a waiter requests another System.gc() it doesn't
2103 // incorrectly see that a marking cycle is still in progress.
2104 if (concurrent) {
2105 _cm_thread->set_idle();
2106 }
2107
2108 // This notify_all() will ensure that a thread that called
2109 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2110 // and it's waiting for a full GC to finish will be woken up. It is
2111 // waiting in VM_G1CollectForAllocation::doit_epilogue().
2112 FullGCCount_lock->notify_all();
2113 }
2114
2115 void G1CollectedHeap::collect(GCCause::Cause cause) {
2116 try_collect(cause, true);
2117 }
2118
2119 bool G1CollectedHeap::try_collect(GCCause::Cause cause, bool retry_on_gc_failure) {
2120 assert_heap_not_locked();
2121
2122 bool gc_succeeded;
2123 bool should_retry_gc;
2124
2125 do {
2126 should_retry_gc = false;
2127
2128 uint gc_count_before;
2129 uint old_marking_count_before;
2130 uint full_gc_count_before;
2131
2132 {
2133 MutexLocker ml(Heap_lock);
2134
2135 // Read the GC count while holding the Heap_lock
2136 gc_count_before = total_collections();
2137 full_gc_count_before = total_full_collections();
2138 old_marking_count_before = _old_marking_cycles_started;
2139 }
|