9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentMarkThread.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorPolicy.hpp"
29 #include "gc/g1/g1Log.hpp"
30 #include "gc/g1/g1MMUTracker.hpp"
31 #include "gc/g1/suspendibleThreadSet.hpp"
32 #include "gc/g1/vm_operations_g1.hpp"
33 #include "gc/shared/gcId.hpp"
34 #include "gc/shared/gcTrace.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "runtime/vmThread.hpp"
37
38 // ======= Concurrent Mark Thread ========
39
40 // The CM thread is created when the G1 garbage collector is used
41
42 SurrogateLockerThread*
43 ConcurrentMarkThread::_slt = NULL;
44
45 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
46 ConcurrentGCThread(),
47 _cm(cm),
48 _state(Idle),
49 _vtime_accum(0.0),
50 _vtime_mark_accum(0.0) {
51
52 set_name("G1 Main Marker");
53 create_and_start();
54 }
61 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
62 _cm(cm) {}
63
64 void do_void(){
65 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
66 }
67 };
68
69 class CMCleanUp: public VoidClosure {
70 ConcurrentMark* _cm;
71 public:
72
73 CMCleanUp(ConcurrentMark* cm) :
74 _cm(cm) {}
75
76 void do_void(){
77 _cm->cleanup();
78 }
79 };
80
81 // We want to avoid that the logging from the concurrent thread is mixed
82 // with the logging from a STW GC. So, if necessary join the STS to ensure
83 // that the logging is done either before or after the STW logging.
84 void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
85 if (doit) {
86 SuspendibleThreadSetJoiner sts_joiner(join_sts);
87 va_list args;
88 va_start(args, fmt);
89 gclog_or_tty->gclog_stamp();
90 gclog_or_tty->vprint_cr(fmt, args);
91 va_end(args);
92 }
93 }
94
95 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
96 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
97 if (g1_policy->adaptive_young_list_length()) {
98 double now = os::elapsedTime();
99 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
100 : g1_policy->predict_cleanup_time_ms();
101 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
102 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
103 os::sleep(this, sleep_time_ms, false);
104 }
105 }
106 void ConcurrentMarkThread::run() {
107 initialize_in_thread();
108 wait_for_universe_init();
109
110 run_service();
111
112 terminate();
113 }
114
126 }
127
128 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
129 {
130 ResourceMark rm;
131 HandleMark hm;
132 double cycle_start = os::elapsedVTime();
133
134 // We have to ensure that we finish scanning the root regions
135 // before the next GC takes place. To ensure this we have to
136 // make sure that we do not join the STS until the root regions
137 // have been scanned. If we did then it's possible that a
138 // subsequent GC could block us from joining the STS and proceed
139 // without the root regions have been scanned which would be a
140 // correctness issue.
141
142 if (!cm()->has_aborted()) {
143 _cm->scanRootRegions();
144 }
145
146 double mark_start_sec = os::elapsedTime();
147 cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
148
149 int iter = 0;
150 do {
151 iter++;
152 if (!cm()->has_aborted()) {
153 _cm->markFromRoots();
154 }
155
156 double mark_end_time = os::elapsedVTime();
157 double mark_end_sec = os::elapsedTime();
158 _vtime_mark_accum += (mark_end_time - cycle_start);
159 if (!cm()->has_aborted()) {
160 delay_to_keep_mmu(g1_policy, true /* remark */);
161
162 cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
163
164 CMCheckpointRootsFinalClosure final_cl(_cm);
165 VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
166 VMThread::execute(&op);
167 }
168 if (cm()->restart_for_overflow()) {
169 cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
170 cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
171 }
172 } while (cm()->restart_for_overflow());
173
174 double end_time = os::elapsedVTime();
175 // Update the total virtual time before doing this, since it will try
176 // to measure it to get the vtime for this marking. We purposely
177 // neglect the presumably-short "completeCleanup" phase here.
178 _vtime_accum = (end_time - _vtime_start);
179
180 if (!cm()->has_aborted()) {
181 delay_to_keep_mmu(g1_policy, false /* cleanup */);
182
183 CMCleanUp cl_cl(_cm);
184 VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
185 VMThread::execute(&op);
186 } else {
187 // We don't want to update the marking status if a GC pause
188 // is already underway.
189 SuspendibleThreadSetJoiner sts_join;
190 g1h->collector_state()->set_mark_in_progress(false);
191 }
192
193 // Check if cleanup set the free_regions_coming flag. If it
194 // hasn't, we can just skip the next step.
195 if (g1h->free_regions_coming()) {
196 // The following will finish freeing up any regions that we
197 // found to be empty during cleanup. We'll do this part
198 // without joining the suspendible set. If an evacuation pause
199 // takes place, then we would carry on freeing regions in
200 // case they are needed by the pause. If a Full GC takes
201 // place, it would wait for us to process the regions
202 // reclaimed by cleanup.
203
204 double cleanup_start_sec = os::elapsedTime();
205 cm_log(G1Log::fine(), false, "[GC concurrent-cleanup-start]");
206
207 // Now do the concurrent cleanup operation.
208 _cm->completeCleanup();
209
210 // Notify anyone who's waiting that there are no more free
211 // regions coming. We have to do this before we join the STS
212 // (in fact, we should not attempt to join the STS in the
213 // interval between finishing the cleanup pause and clearing
214 // the free_regions_coming flag) otherwise we might deadlock:
215 // a GC worker could be blocked waiting for the notification
216 // whereas this thread will be blocked for the pause to finish
217 // while it's trying to join the STS, which is conditional on
218 // the GC workers finishing.
219 g1h->reset_free_regions_coming();
220
221 double cleanup_end_sec = os::elapsedTime();
222 cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
223 }
224 guarantee(cm()->cleanup_list_is_empty(),
225 "at this point there should be no regions on the cleanup list");
226
227 // There is a tricky race before recording that the concurrent
228 // cleanup has completed and a potential Full GC starting around
229 // the same time. We want to make sure that the Full GC calls
230 // abort() on concurrent mark after
231 // record_concurrent_mark_cleanup_completed(), since abort() is
232 // the method that will reset the concurrent mark state. If we
233 // end up calling record_concurrent_mark_cleanup_completed()
234 // after abort() then we might incorrectly undo some of the work
235 // abort() did. Checking the has_aborted() flag after joining
236 // the STS allows the correct ordering of the two methods. There
237 // are two scenarios:
238 //
239 // a) If we reach here before the Full GC, the fact that we have
240 // joined the STS means that the Full GC cannot start until we
241 // leave the STS, so record_concurrent_mark_cleanup_completed()
242 // will complete before abort() is called.
243 //
244 // b) If we reach here during the Full GC, we'll be held up from
245 // joining the STS until the Full GC is done, which means that
246 // abort() will have completed and has_aborted() will return
247 // true to prevent us from calling
248 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
249 // not needed any more as the concurrent mark state has been
250 // already reset).
251 {
252 SuspendibleThreadSetJoiner sts_join;
253 if (!cm()->has_aborted()) {
254 g1_policy->record_concurrent_mark_cleanup_completed();
255 } else {
256 cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
257 }
258 }
259
260 // We now want to allow clearing of the marking bitmap to be
261 // suspended by a collection pause.
262 // We may have aborted just before the remark. Do not bother clearing the
263 // bitmap then, as it has been done during mark abort.
264 if (!cm()->has_aborted()) {
265 _cm->clearNextBitmap();
266 } else {
267 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
268 }
269 }
270
271 // Update the number of full collections that have been
272 // completed. This will also notify the FullGCCount_lock in case a
273 // Java thread is waiting for a full GC to happen (e.g., it
274 // called System.gc() with +ExplicitGCInvokesConcurrent).
275 {
276 SuspendibleThreadSetJoiner sts_join;
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentMarkThread.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorPolicy.hpp"
29 #include "gc/g1/g1MMUTracker.hpp"
30 #include "gc/g1/suspendibleThreadSet.hpp"
31 #include "gc/g1/vm_operations_g1.hpp"
32 #include "gc/shared/gcId.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "logging/log.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "runtime/vmThread.hpp"
37
38 // ======= Concurrent Mark Thread ========
39
40 // The CM thread is created when the G1 garbage collector is used
41
42 SurrogateLockerThread*
43 ConcurrentMarkThread::_slt = NULL;
44
45 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
46 ConcurrentGCThread(),
47 _cm(cm),
48 _state(Idle),
49 _vtime_accum(0.0),
50 _vtime_mark_accum(0.0) {
51
52 set_name("G1 Main Marker");
53 create_and_start();
54 }
61 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
62 _cm(cm) {}
63
64 void do_void(){
65 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
66 }
67 };
68
69 class CMCleanUp: public VoidClosure {
70 ConcurrentMark* _cm;
71 public:
72
73 CMCleanUp(ConcurrentMark* cm) :
74 _cm(cm) {}
75
76 void do_void(){
77 _cm->cleanup();
78 }
79 };
80
81 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
82 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
83 if (g1_policy->adaptive_young_list_length()) {
84 double now = os::elapsedTime();
85 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
86 : g1_policy->predict_cleanup_time_ms();
87 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
88 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
89 os::sleep(this, sleep_time_ms, false);
90 }
91 }
92 void ConcurrentMarkThread::run() {
93 initialize_in_thread();
94 wait_for_universe_init();
95
96 run_service();
97
98 terminate();
99 }
100
112 }
113
114 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
115 {
116 ResourceMark rm;
117 HandleMark hm;
118 double cycle_start = os::elapsedVTime();
119
120 // We have to ensure that we finish scanning the root regions
121 // before the next GC takes place. To ensure this we have to
122 // make sure that we do not join the STS until the root regions
123 // have been scanned. If we did then it's possible that a
124 // subsequent GC could block us from joining the STS and proceed
125 // without the root regions have been scanned which would be a
126 // correctness issue.
127
128 if (!cm()->has_aborted()) {
129 _cm->scanRootRegions();
130 }
131
132 jlong mark_start = os::elapsed_counter();
133 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
134
135 int iter = 0;
136 do {
137 iter++;
138 if (!cm()->has_aborted()) {
139 _cm->markFromRoots();
140 }
141
142 double mark_end_time = os::elapsedVTime();
143 jlong mark_end = os::elapsed_counter();
144 _vtime_mark_accum += (mark_end_time - cycle_start);
145 if (!cm()->has_aborted()) {
146 delay_to_keep_mmu(g1_policy, true /* remark */);
147 log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
148 TimeHelper::counter_to_seconds(mark_start),
149 TimeHelper::counter_to_seconds(mark_end),
150 TimeHelper::counter_to_millis(mark_end - mark_start));
151
152 CMCheckpointRootsFinalClosure final_cl(_cm);
153 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
154 VMThread::execute(&op);
155 }
156 if (cm()->restart_for_overflow()) {
157 log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
158 log_info(gc)("Concurrent Mark restart for overflow");
159 }
160 } while (cm()->restart_for_overflow());
161
162 double end_time = os::elapsedVTime();
163 // Update the total virtual time before doing this, since it will try
164 // to measure it to get the vtime for this marking. We purposely
165 // neglect the presumably-short "completeCleanup" phase here.
166 _vtime_accum = (end_time - _vtime_start);
167
168 if (!cm()->has_aborted()) {
169 delay_to_keep_mmu(g1_policy, false /* cleanup */);
170
171 CMCleanUp cl_cl(_cm);
172 VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
173 VMThread::execute(&op);
174 } else {
175 // We don't want to update the marking status if a GC pause
176 // is already underway.
177 SuspendibleThreadSetJoiner sts_join;
178 g1h->collector_state()->set_mark_in_progress(false);
179 }
180
181 // Check if cleanup set the free_regions_coming flag. If it
182 // hasn't, we can just skip the next step.
183 if (g1h->free_regions_coming()) {
184 // The following will finish freeing up any regions that we
185 // found to be empty during cleanup. We'll do this part
186 // without joining the suspendible set. If an evacuation pause
187 // takes place, then we would carry on freeing regions in
188 // case they are needed by the pause. If a Full GC takes
189 // place, it would wait for us to process the regions
190 // reclaimed by cleanup.
191
192 jlong cleanup_start = os::elapsed_counter();
193 log_info(gc)("Concurrent Cleanup (%.3fs)", TimeHelper::counter_to_seconds(cleanup_start));
194
195 // Now do the concurrent cleanup operation.
196 _cm->completeCleanup();
197
198 // Notify anyone who's waiting that there are no more free
199 // regions coming. We have to do this before we join the STS
200 // (in fact, we should not attempt to join the STS in the
201 // interval between finishing the cleanup pause and clearing
202 // the free_regions_coming flag) otherwise we might deadlock:
203 // a GC worker could be blocked waiting for the notification
204 // whereas this thread will be blocked for the pause to finish
205 // while it's trying to join the STS, which is conditional on
206 // the GC workers finishing.
207 g1h->reset_free_regions_coming();
208
209 jlong cleanup_end = os::elapsed_counter();
210 log_info(gc)("Concurrent Cleanup (%.3fs, %.3fs) %.3fms",
211 TimeHelper::counter_to_seconds(cleanup_start),
212 TimeHelper::counter_to_seconds(cleanup_end),
213 TimeHelper::counter_to_millis(cleanup_end - cleanup_start));
214 }
215 guarantee(cm()->cleanup_list_is_empty(),
216 "at this point there should be no regions on the cleanup list");
217
218 // There is a tricky race before recording that the concurrent
219 // cleanup has completed and a potential Full GC starting around
220 // the same time. We want to make sure that the Full GC calls
221 // abort() on concurrent mark after
222 // record_concurrent_mark_cleanup_completed(), since abort() is
223 // the method that will reset the concurrent mark state. If we
224 // end up calling record_concurrent_mark_cleanup_completed()
225 // after abort() then we might incorrectly undo some of the work
226 // abort() did. Checking the has_aborted() flag after joining
227 // the STS allows the correct ordering of the two methods. There
228 // are two scenarios:
229 //
230 // a) If we reach here before the Full GC, the fact that we have
231 // joined the STS means that the Full GC cannot start until we
232 // leave the STS, so record_concurrent_mark_cleanup_completed()
233 // will complete before abort() is called.
234 //
235 // b) If we reach here during the Full GC, we'll be held up from
236 // joining the STS until the Full GC is done, which means that
237 // abort() will have completed and has_aborted() will return
238 // true to prevent us from calling
239 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
240 // not needed any more as the concurrent mark state has been
241 // already reset).
242 {
243 SuspendibleThreadSetJoiner sts_join;
244 if (!cm()->has_aborted()) {
245 g1_policy->record_concurrent_mark_cleanup_completed();
246 } else {
247 log_info(gc)("Concurrent Mark abort");
248 }
249 }
250
251 // We now want to allow clearing of the marking bitmap to be
252 // suspended by a collection pause.
253 // We may have aborted just before the remark. Do not bother clearing the
254 // bitmap then, as it has been done during mark abort.
255 if (!cm()->has_aborted()) {
256 _cm->clearNextBitmap();
257 } else {
258 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
259 }
260 }
261
262 // Update the number of full collections that have been
263 // completed. This will also notify the FullGCCount_lock in case a
264 // Java thread is waiting for a full GC to happen (e.g., it
265 // called System.gc() with +ExplicitGCInvokesConcurrent).
266 {
267 SuspendibleThreadSetJoiner sts_join;
|