1984 size_t result() { return _used; }
1985 };
1986
1987 size_t G1CollectedHeap::recalculate_used() const {
1988 SumUsedClosure blk;
1989 heap_region_iterate(&blk);
1990 return blk.result();
1991 }
1992
1993 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1994 switch (cause) {
1995 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1996 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
1997 case GCCause::_wb_conc_mark: return true;
1998 default : return false;
1999 }
2000 }
2001
2002 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2003 switch (cause) {
2004 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2005 case GCCause::_g1_humongous_allocation: return true;
2006 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2007 default: return is_user_requested_concurrent_full_gc(cause);
2008 }
2009 }
2010
2011 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
2012 if (policy()->force_upgrade_to_full()) {
2013 return true;
2014 } else if (should_do_concurrent_full_gc(_gc_cause)) {
2015 return false;
2016 } else if (has_regions_left_for_allocation()) {
2017 return false;
2018 } else {
2019 return true;
2020 }
2021 }
2022
2023 #ifndef PRODUCT
2024 void G1CollectedHeap::allocate_dummy_regions() {
2264 assert_heap_not_locked();
2265
2266 // Lock to get consistent set of values.
2267 uint gc_count_before;
2268 uint full_gc_count_before;
2269 uint old_marking_started_before;
2270 {
2271 MutexLocker ml(Heap_lock);
2272 gc_count_before = total_collections();
2273 full_gc_count_before = total_full_collections();
2274 old_marking_started_before = _old_marking_cycles_started;
2275 }
2276
2277 if (should_do_concurrent_full_gc(cause)) {
2278 return try_collect_concurrently(cause,
2279 gc_count_before,
2280 old_marking_started_before);
2281 } else if (GCLocker::should_discard(cause, gc_count_before)) {
2282 // Indicate failure to be consistent with VMOp failure due to
2283 // another collection slipping in after our gc_count but before
2284 // our request is processed. _gc_locker collections upgraded by
2285 // GCLockerInvokesConcurrent are handled above and never discarded.
2286 return false;
2287 } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2288 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2289
2290 // Schedule a standard evacuation pause. We're setting word_size
2291 // to 0 which means that we are not requesting a post-GC allocation.
2292 VM_G1CollectForAllocation op(0, /* word_size */
2293 gc_count_before,
2294 cause,
2295 policy()->max_pause_time_ms());
2296 VMThread::execute(&op);
2297 return op.gc_succeeded();
2298 } else {
2299 // Schedule a Full GC.
2300 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2301 VMThread::execute(&op);
2302 return op.gc_succeeded();
2303 }
2304 }
2305
|
1984 size_t result() { return _used; }
1985 };
1986
1987 size_t G1CollectedHeap::recalculate_used() const {
1988 SumUsedClosure blk;
1989 heap_region_iterate(&blk);
1990 return blk.result();
1991 }
1992
1993 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1994 switch (cause) {
1995 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1996 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
1997 case GCCause::_wb_conc_mark: return true;
1998 default : return false;
1999 }
2000 }
2001
2002 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2003 switch (cause) {
2004 case GCCause::_g1_humongous_allocation: return true;
2005 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2006 default: return is_user_requested_concurrent_full_gc(cause);
2007 }
2008 }
2009
2010 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
2011 if (policy()->force_upgrade_to_full()) {
2012 return true;
2013 } else if (should_do_concurrent_full_gc(_gc_cause)) {
2014 return false;
2015 } else if (has_regions_left_for_allocation()) {
2016 return false;
2017 } else {
2018 return true;
2019 }
2020 }
2021
2022 #ifndef PRODUCT
2023 void G1CollectedHeap::allocate_dummy_regions() {
2263 assert_heap_not_locked();
2264
2265 // Lock to get consistent set of values.
2266 uint gc_count_before;
2267 uint full_gc_count_before;
2268 uint old_marking_started_before;
2269 {
2270 MutexLocker ml(Heap_lock);
2271 gc_count_before = total_collections();
2272 full_gc_count_before = total_full_collections();
2273 old_marking_started_before = _old_marking_cycles_started;
2274 }
2275
2276 if (should_do_concurrent_full_gc(cause)) {
2277 return try_collect_concurrently(cause,
2278 gc_count_before,
2279 old_marking_started_before);
2280 } else if (GCLocker::should_discard(cause, gc_count_before)) {
2281 // Indicate failure to be consistent with VMOp failure due to
2282 // another collection slipping in after our gc_count but before
2283 // our request is processed.
2284 return false;
2285 } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2286 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2287
2288 // Schedule a standard evacuation pause. We're setting word_size
2289 // to 0 which means that we are not requesting a post-GC allocation.
2290 VM_G1CollectForAllocation op(0, /* word_size */
2291 gc_count_before,
2292 cause,
2293 policy()->max_pause_time_ms());
2294 VMThread::execute(&op);
2295 return op.gc_succeeded();
2296 } else {
2297 // Schedule a Full GC.
2298 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2299 VMThread::execute(&op);
2300 return op.gc_succeeded();
2301 }
2302 }
2303
|