2096 _old_marking_cycles_started, _old_marking_cycles_completed);
2097
2098 _old_marking_cycles_completed += 1;
2099
2100 // We need to clear the "in_progress" flag in the CM thread before
2101 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2102 // is set) so that if a waiter requests another System.gc() it doesn't
2103 // incorrectly see that a marking cycle is still in progress.
2104 if (concurrent) {
2105 _cm_thread->set_idle();
2106 }
2107
2108 // This notify_all() will ensure that a thread that called
2109 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2110 // and it's waiting for a full GC to finish will be woken up. It is
2111 // waiting in VM_G1CollectForAllocation::doit_epilogue().
2112 FullGCCount_lock->notify_all();
2113 }
2114
2115 void G1CollectedHeap::collect(GCCause::Cause cause) {
2116 assert_heap_not_locked();
2117
2118 uint gc_count_before;
2119 uint old_marking_count_before;
2120 uint full_gc_count_before;
2121 bool retry_gc;
2122
2123 do {
2124 retry_gc = false;
2125
2126 {
2127 MutexLocker ml(Heap_lock);
2128
2129 // Read the GC count while holding the Heap_lock
2130 gc_count_before = total_collections();
2131 full_gc_count_before = total_full_collections();
2132 old_marking_count_before = _old_marking_cycles_started;
2133 }
2134
2135 if (should_do_concurrent_full_gc(cause)) {
2136 // Schedule an initial-mark evacuation pause that will start a
2137 // concurrent cycle. We're setting word_size to 0 which means that
2138 // we are not requesting a post-GC allocation.
2139 VM_G1CollectForAllocation op(0, /* word_size */
2140 gc_count_before,
2141 cause,
2142 true, /* should_initiate_conc_mark */
2143 g1_policy()->max_pause_time_ms());
2144 VMThread::execute(&op);
2145 if (!op.pause_succeeded()) {
2146 if (old_marking_count_before == _old_marking_cycles_started) {
2147 retry_gc = op.should_retry_gc();
2148 } else {
2149 // A Full GC happened while we were trying to schedule the
2150 // initial-mark GC. No point in starting a new cycle given
2151 // that the whole heap was collected anyway.
2152 }
2153
2154 if (retry_gc) {
2155 if (GCLocker::is_active_and_needs_gc()) {
2156 GCLocker::stall_until_clear();
2157 }
2158 }
2159 }
2160 } else {
2161 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2162 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2163
2164 // Schedule a standard evacuation pause. We're setting word_size
2165 // to 0 which means that we are not requesting a post-GC allocation.
2166 VM_G1CollectForAllocation op(0, /* word_size */
2167 gc_count_before,
2168 cause,
2169 false, /* should_initiate_conc_mark */
2170 g1_policy()->max_pause_time_ms());
2171 VMThread::execute(&op);
2172 } else {
2173 // Schedule a Full GC.
2174 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2175 VMThread::execute(&op);
2176 }
2177 }
2178 } while (retry_gc);
2179 }
2180
2181 bool G1CollectedHeap::is_in(const void* p) const {
2182 if (_hrm->reserved().contains(p)) {
2183 // Given that we know that p is in the reserved space,
2184 // heap_region_containing() should successfully
2185 // return the containing region.
2186 HeapRegion* hr = heap_region_containing(p);
2187 return hr->is_in(p);
2188 } else {
2189 return false;
2190 }
2191 }
2192
2193 #ifdef ASSERT
2194 bool G1CollectedHeap::is_in_exact(const void* p) const {
2195 bool contains = reserved_region().contains(p);
2196 bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2197 if (contains && available) {
2198 return true;
|
2096 _old_marking_cycles_started, _old_marking_cycles_completed);
2097
2098 _old_marking_cycles_completed += 1;
2099
2100 // We need to clear the "in_progress" flag in the CM thread before
2101 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2102 // is set) so that if a waiter requests another System.gc() it doesn't
2103 // incorrectly see that a marking cycle is still in progress.
2104 if (concurrent) {
2105 _cm_thread->set_idle();
2106 }
2107
2108 // This notify_all() will ensure that a thread that called
2109 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2110 // and it's waiting for a full GC to finish will be woken up. It is
2111 // waiting in VM_G1CollectForAllocation::doit_epilogue().
2112 FullGCCount_lock->notify_all();
2113 }
2114
2115 void G1CollectedHeap::collect(GCCause::Cause cause) {
2116 attempt_collect(cause, true);
2117 }
2118
2119 bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_vmop_failure) {
2120 assert_heap_not_locked();
2121
2122 bool vmop_succeeded;
2123 bool should_retry_vmop;
2124
2125 do {
2126 should_retry_vmop = false;
2127
2128 uint gc_count_before;
2129 uint old_marking_count_before;
2130 uint full_gc_count_before;
2131
2132 {
2133 MutexLocker ml(Heap_lock);
2134
2135 // Read the GC count while holding the Heap_lock
2136 gc_count_before = total_collections();
2137 full_gc_count_before = total_full_collections();
2138 old_marking_count_before = _old_marking_cycles_started;
2139 }
2140
2141 if (should_do_concurrent_full_gc(cause)) {
2142 // Schedule an initial-mark evacuation pause that will start a
2143 // concurrent cycle. We're setting word_size to 0 which means that
2144 // we are not requesting a post-GC allocation.
2145 VM_G1CollectForAllocation op(0, /* word_size */
2146 gc_count_before,
2147 cause,
2148 true, /* should_initiate_conc_mark */
2149 g1_policy()->max_pause_time_ms());
2150 VMThread::execute(&op);
2151 vmop_succeeded = op.pause_succeeded();
2152 if (!vmop_succeeded && retry_on_vmop_failure) {
2153 if (old_marking_count_before == _old_marking_cycles_started) {
2154 should_retry_vmop = op.should_retry_gc();
2155 } else {
2156 // A Full GC happened while we were trying to schedule the
2157 // concurrent cycle. No point in starting a new cycle given
2158 // that the whole heap was collected anyway.
2159 }
2160
2161 if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) {
2162 GCLocker::stall_until_clear();
2163 }
2164 }
2165 } else {
2166 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2167 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2168
2169 // Schedule a standard evacuation pause. We're setting word_size
2170 // to 0 which means that we are not requesting a post-GC allocation.
2171 VM_G1CollectForAllocation op(0, /* word_size */
2172 gc_count_before,
2173 cause,
2174 false, /* should_initiate_conc_mark */
2175 g1_policy()->max_pause_time_ms());
2176 VMThread::execute(&op);
2177 vmop_succeeded = op.pause_succeeded();
2178 } else {
2179 // Schedule a Full GC.
2180 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2181 VMThread::execute(&op);
2182 vmop_succeeded = op.pause_succeeded();
2183 }
2184 }
2185 } while (should_retry_vmop);
2186 return vmop_succeeded;
2187 }
2188
2189 bool G1CollectedHeap::is_in(const void* p) const {
2190 if (_hrm->reserved().contains(p)) {
2191 // Given that we know that p is in the reserved space,
2192 // heap_region_containing() should successfully
2193 // return the containing region.
2194 HeapRegion* hr = heap_region_containing(p);
2195 return hr->is_in(p);
2196 } else {
2197 return false;
2198 }
2199 }
2200
2201 #ifdef ASSERT
2202 bool G1CollectedHeap::is_in_exact(const void* p) const {
2203 bool contains = reserved_region().contains(p);
2204 bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2205 if (contains && available) {
2206 return true;
|