2030 ResourceMark rm; /* For thread name. */ \
2031 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
2032 LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
2033 Thread::current()->name(), \
2034 GCCause::to_string(cause)); \
2035 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
2036 } \
2037 } while (0)
2038
2039 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
2040 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
2041
2042 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
2043 uint gc_counter,
2044 uint old_marking_started_before) {
2045 assert_heap_not_locked();
2046 assert(should_do_concurrent_full_gc(cause),
2047 "Non-concurrent cause %s", GCCause::to_string(cause));
2048
2049 for (uint i = 1; true; ++i) {
2050 // Try to schedule an initial-mark evacuation pause that will
2051 // start a concurrent cycle.
2052 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
2053 VM_G1TryInitiateConcMark op(gc_counter,
2054 cause,
2055 policy()->max_pause_time_ms());
2056 VMThread::execute(&op);
2057
2058 // Request is trivially finished.
2059 if (cause == GCCause::_g1_periodic_collection) {
2060 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
2061 return op.gc_succeeded();
2062 }
2063
2064 // If VMOp skipped initiating concurrent marking cycle because
2065 // we're terminating, then we're done.
2066 if (op.terminating()) {
2067 LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating");
2068 return false;
2069 }
2070
2099 // to start a concurrent cycle.
2100 if (old_marking_started_before != old_marking_started_after) {
2101 LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
2102 old_marking_started_before = old_marking_started_after;
2103 }
2104 } else if (!GCCause::is_user_requested_gc(cause)) {
2105 // For an "automatic" (not user-requested) collection, we just need to
2106 // ensure that progress is made.
2107 //
2108 // Request is finished if any of
2109 // (1) the VMOp successfully performed a GC,
2110 // (2) a concurrent cycle was already in progress,
2111 // (3) whitebox is controlling concurrent cycles,
2112 // (4) a new cycle was started (by this thread or some other), or
2113 // (5) a Full GC was performed.
2114 // Cases (4) and (5) are detected together by a change to
2115 // _old_marking_cycles_started.
2116 //
2117 // Note that (1) does not imply (4). If we're still in the mixed
2118 // phase of an earlier concurrent collection, the request to make the
2119 // collection an initial-mark won't be honored. If we don't check for
2120 // both conditions we'll spin doing back-to-back collections.
2121 if (op.gc_succeeded() ||
2122 op.cycle_already_in_progress() ||
2123 op.whitebox_attached() ||
2124 (old_marking_started_before != old_marking_started_after)) {
2125 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2126 return true;
2127 }
2128 } else { // User-requested GC.
2129 // For a user-requested collection, we want to ensure that a complete
2130 // full collection has been performed before returning, but without
2131 // waiting for more than needed.
2132
2133 // For user-requested GCs (unlike non-UR), a successful VMOp implies a
2134 // new cycle was started. That's good, because it's not clear what we
2135 // should do otherwise. Trying again just does back to back GCs.
2136 // Can't wait for someone else to start a cycle. And returning fails
2137 // to meet the goal of ensuring a full collection was performed.
2138 assert(!op.gc_succeeded() ||
2139 (old_marking_started_before != old_marking_started_after),
2940 _gc_timer_stw->register_gc_start();
2941 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2942
2943 wait_for_root_region_scanning();
2944
2945 print_heap_before_gc();
2946 print_heap_regions();
2947 trace_heap_before_gc(_gc_tracer_stw);
2948
2949 _verifier->verify_region_sets_optional();
2950 _verifier->verify_dirty_young_regions();
2951
2952 // We should not be doing concurrent start unless the concurrent mark thread is running
2953 if (!_cm_thread->should_terminate()) {
2954 // This call will decide whether this pause is a concurrent start
2955 // pause. If it is, in_concurrent_start_gc() will return true
2956 // for the duration of this pause.
2957 policy()->decide_on_conc_mark_initiation();
2958 }
2959
2960 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2961 assert(!collector_state()->in_concurrent_start_gc() ||
2962 collector_state()->in_young_only_phase(), "sanity");
2963 // We also do not allow mixed GCs during marking.
2964 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2965
2966 // Record whether this pause is a concurrent start. When the current
2967 // thread has completed its logging output and it's safe to signal
2968 // the CM thread, the flag's value in the policy has been reset.
2969 bool should_start_conc_mark = collector_state()->in_concurrent_start_gc();
2970 if (should_start_conc_mark) {
2971 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2972 }
2973
2974 // Inner scope for scope based logging, timers, and stats collection
2975 {
2976 G1EvacuationInfo evacuation_info;
2977
2978 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2979
2980 GCTraceCPUTime tcpu;
|
2030 ResourceMark rm; /* For thread name. */ \
2031 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
2032 LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
2033 Thread::current()->name(), \
2034 GCCause::to_string(cause)); \
2035 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
2036 } \
2037 } while (0)
2038
2039 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
2040 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
2041
2042 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
2043 uint gc_counter,
2044 uint old_marking_started_before) {
2045 assert_heap_not_locked();
2046 assert(should_do_concurrent_full_gc(cause),
2047 "Non-concurrent cause %s", GCCause::to_string(cause));
2048
2049 for (uint i = 1; true; ++i) {
2050 // Try to schedule concurrent start evacuation pause that will
2051 // start a concurrent cycle.
2052 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
2053 VM_G1TryInitiateConcMark op(gc_counter,
2054 cause,
2055 policy()->max_pause_time_ms());
2056 VMThread::execute(&op);
2057
2058 // Request is trivially finished.
2059 if (cause == GCCause::_g1_periodic_collection) {
2060 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
2061 return op.gc_succeeded();
2062 }
2063
2064 // If VMOp skipped initiating concurrent marking cycle because
2065 // we're terminating, then we're done.
2066 if (op.terminating()) {
2067 LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating");
2068 return false;
2069 }
2070
2099 // to start a concurrent cycle.
2100 if (old_marking_started_before != old_marking_started_after) {
2101 LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
2102 old_marking_started_before = old_marking_started_after;
2103 }
2104 } else if (!GCCause::is_user_requested_gc(cause)) {
2105 // For an "automatic" (not user-requested) collection, we just need to
2106 // ensure that progress is made.
2107 //
2108 // Request is finished if any of
2109 // (1) the VMOp successfully performed a GC,
2110 // (2) a concurrent cycle was already in progress,
2111 // (3) whitebox is controlling concurrent cycles,
2112 // (4) a new cycle was started (by this thread or some other), or
2113 // (5) a Full GC was performed.
2114 // Cases (4) and (5) are detected together by a change to
2115 // _old_marking_cycles_started.
2116 //
2117 // Note that (1) does not imply (4). If we're still in the mixed
2118 // phase of an earlier concurrent collection, the request to make the
2119 // collection a concurrent start won't be honored. If we don't check for
2120 // both conditions we'll spin doing back-to-back collections.
2121 if (op.gc_succeeded() ||
2122 op.cycle_already_in_progress() ||
2123 op.whitebox_attached() ||
2124 (old_marking_started_before != old_marking_started_after)) {
2125 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2126 return true;
2127 }
2128 } else { // User-requested GC.
2129 // For a user-requested collection, we want to ensure that a complete
2130 // full collection has been performed before returning, but without
2131 // waiting for more than needed.
2132
2133 // For user-requested GCs (unlike non-UR), a successful VMOp implies a
2134 // new cycle was started. That's good, because it's not clear what we
2135 // should do otherwise. Trying again just does back to back GCs.
2136 // Can't wait for someone else to start a cycle. And returning fails
2137 // to meet the goal of ensuring a full collection was performed.
2138 assert(!op.gc_succeeded() ||
2139 (old_marking_started_before != old_marking_started_after),
2940 _gc_timer_stw->register_gc_start();
2941 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2942
2943 wait_for_root_region_scanning();
2944
2945 print_heap_before_gc();
2946 print_heap_regions();
2947 trace_heap_before_gc(_gc_tracer_stw);
2948
2949 _verifier->verify_region_sets_optional();
2950 _verifier->verify_dirty_young_regions();
2951
2952 // We should not be doing concurrent start unless the concurrent mark thread is running
2953 if (!_cm_thread->should_terminate()) {
2954 // This call will decide whether this pause is a concurrent start
2955 // pause. If it is, in_concurrent_start_gc() will return true
2956 // for the duration of this pause.
2957 policy()->decide_on_conc_mark_initiation();
2958 }
2959
2960 // We do not allow concurrent start to be piggy-backed on a mixed GC.
2961 assert(!collector_state()->in_concurrent_start_gc() ||
2962 collector_state()->in_young_only_phase(), "sanity");
2963 // We also do not allow mixed GCs during marking.
2964 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2965
2966 // Record whether this pause is a concurrent start. When the current
2967 // thread has completed its logging output and it's safe to signal
2968 // the CM thread, the flag's value in the policy has been reset.
2969 bool should_start_conc_mark = collector_state()->in_concurrent_start_gc();
2970 if (should_start_conc_mark) {
2971 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2972 }
2973
2974 // Inner scope for scope based logging, timers, and stats collection
2975 {
2976 G1EvacuationInfo evacuation_info;
2977
2978 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2979
2980 GCTraceCPUTime tcpu;
|