1973
1974 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1975 switch (cause) {
1976 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1977 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
1978 case GCCause::_wb_conc_mark: return true;
1979 default : return false;
1980 }
1981 }
1982
1983 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1984 switch (cause) {
1985 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
1986 case GCCause::_g1_humongous_allocation: return true;
1987 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
1988 default: return is_user_requested_concurrent_full_gc(cause);
1989 }
1990 }
1991
1992 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
1993 if(policy()->force_upgrade_to_full()) {
1994 return true;
1995 } else if (should_do_concurrent_full_gc(_gc_cause)) {
1996 return false;
1997 } else if (has_regions_left_for_allocation()) {
1998 return false;
1999 } else {
2000 return true;
2001 }
2002 }
2003
2004 #ifndef PRODUCT
2005 void G1CollectedHeap::allocate_dummy_regions() {
2006 // Let's fill up most of the region
2007 size_t word_size = HeapRegion::GrainWords - 1024;
2008 // And as a result the region we'll allocate will be humongous.
2009 guarantee(is_humongous(word_size), "sanity");
2010
2011 // _filler_array_max_size is set to humongous object threshold
2012 // but temporarily change it to use CollectedHeap::fill_with_object().
2013 SizeTFlagSetting fs(_filler_array_max_size, word_size);
2069
2070 _old_marking_cycles_completed += 1;
2071
2072 // We need to clear the "in_progress" flag in the CM thread before
2073 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2074 // is set) so that if a waiter requests another System.gc() it doesn't
2075 // incorrectly see that a marking cycle is still in progress.
2076 if (concurrent) {
2077 _cm_thread->set_idle();
2078 }
2079
2080 // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2081 // for a full GC to finish that their wait is over.
2082 ml.notify_all();
2083 }
2084
2085 void G1CollectedHeap::collect(GCCause::Cause cause) {
2086 try_collect(cause);
2087 }
2088
2089 static bool gc_counter_less_than(uint x, uint y) {
2090 return (x - y) > (UINT_MAX/2);
2091 }
2092
2093 // LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
2094 // Macro so msg printing is format-checked.
2095 #define LOG_COLLECT_CONCURRENTLY(cause, ...) \
2096 do { \
2097 LogTarget(Debug, gc) LOG_COLLECT_CONCURRENTLY_lt; \
2098 if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) { \
2099 ResourceMark rm; /* For thread name. */ \
2100 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
2101 LOG_COLLECT_CONCURRENTLY_s.print("Try Collect Concurrently (%s) for %s: ", \
2102 GCCause::to_string(cause), \
2103 Thread::current()->name()); \
2104 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
2105 } \
2106 } while (0)
2107
2108 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
2109 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
2110
2111 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
2112 uint gc_counter,
2113 uint old_marking_started_before) {
2114 assert_heap_not_locked();
2115 assert(should_do_concurrent_full_gc(cause),
2116 "Non-concurrent cause %s", GCCause::to_string(cause));
2117
2118 for (uint i = 1; true; ++i) {
2119 // Try to schedule an initial-mark evacuation pause that will
2120 // start a concurrent cycle.
2121 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
2122 VM_G1TryInitiateConcMark op(gc_counter,
2123 cause,
|
1973
1974 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1975 switch (cause) {
1976 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1977 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
1978 case GCCause::_wb_conc_mark: return true;
1979 default : return false;
1980 }
1981 }
1982
1983 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1984 switch (cause) {
1985 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
1986 case GCCause::_g1_humongous_allocation: return true;
1987 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
1988 default: return is_user_requested_concurrent_full_gc(cause);
1989 }
1990 }
1991
1992 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
1993 if (policy()->force_upgrade_to_full()) {
1994 return true;
1995 } else if (should_do_concurrent_full_gc(_gc_cause)) {
1996 return false;
1997 } else if (has_regions_left_for_allocation()) {
1998 return false;
1999 } else {
2000 return true;
2001 }
2002 }
2003
2004 #ifndef PRODUCT
2005 void G1CollectedHeap::allocate_dummy_regions() {
2006 // Let's fill up most of the region
2007 size_t word_size = HeapRegion::GrainWords - 1024;
2008 // And as a result the region we'll allocate will be humongous.
2009 guarantee(is_humongous(word_size), "sanity");
2010
2011 // _filler_array_max_size is set to humongous object threshold
2012 // but temporarily change it to use CollectedHeap::fill_with_object().
2013 SizeTFlagSetting fs(_filler_array_max_size, word_size);
2069
2070 _old_marking_cycles_completed += 1;
2071
2072 // We need to clear the "in_progress" flag in the CM thread before
2073 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2074 // is set) so that if a waiter requests another System.gc() it doesn't
2075 // incorrectly see that a marking cycle is still in progress.
2076 if (concurrent) {
2077 _cm_thread->set_idle();
2078 }
2079
2080 // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2081 // for a full GC to finish that their wait is over.
2082 ml.notify_all();
2083 }
2084
2085 void G1CollectedHeap::collect(GCCause::Cause cause) {
2086 try_collect(cause);
2087 }
2088
2089 // Return true if (x < y) with allowance for wraparound.
2090 static bool gc_counter_less_than(uint x, uint y) {
2091 return (x - y) > (UINT_MAX/2);
2092 }
2093
2094 // LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
2095 // Macro so msg printing is format-checked.
2096 #define LOG_COLLECT_CONCURRENTLY(cause, ...) \
2097 do { \
2098 LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt; \
2099 if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) { \
2100 ResourceMark rm; /* For thread name. */ \
2101 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
2102 LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
2103 Thread::current()->name(), \
2104 GCCause::to_string(cause)); \
2105 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
2106 } \
2107 } while (0)
2108
2109 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
2110 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
2111
2112 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
2113 uint gc_counter,
2114 uint old_marking_started_before) {
2115 assert_heap_not_locked();
2116 assert(should_do_concurrent_full_gc(cause),
2117 "Non-concurrent cause %s", GCCause::to_string(cause));
2118
2119 for (uint i = 1; true; ++i) {
2120 // Try to schedule an initial-mark evacuation pause that will
2121 // start a concurrent cycle.
2122 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
2123 VM_G1TryInitiateConcMark op(gc_counter,
2124 cause,
|