9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentMarkThread.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorPolicy.hpp"
29 #include "gc/g1/g1Log.hpp"
30 #include "gc/g1/g1MMUTracker.hpp"
31 #include "gc/g1/suspendibleThreadSet.hpp"
32 #include "gc/g1/vm_operations_g1.hpp"
33 #include "gc/shared/gcId.hpp"
34 #include "gc/shared/gcTrace.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "runtime/vmThread.hpp"
37
38 // ======= Concurrent Mark Thread ========
39
40 // The CM thread is created when the G1 garbage collector is used
41
42 SurrogateLockerThread*
43 ConcurrentMarkThread::_slt = NULL;
44
45 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
46 ConcurrentGCThread(),
47 _cm(cm),
48 _state(Idle),
49 _vtime_accum(0.0),
50 _vtime_mark_accum(0.0) {
51
52 set_name("G1 Main Marker");
53 create_and_start();
54 }
61 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
62 _cm(cm) {}
63
64 void do_void(){
65 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
66 }
67 };
68
69 class CMCleanUp: public VoidClosure {
70 ConcurrentMark* _cm;
71 public:
72
73 CMCleanUp(ConcurrentMark* cm) :
74 _cm(cm) {}
75
76 void do_void(){
77 _cm->cleanup();
78 }
79 };
80
81 // We want to avoid that the logging from the concurrent thread is mixed
82 // with the logging from a STW GC. So, if necessary join the STS to ensure
83 // that the logging is done either before or after the STW logging.
84 void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
85 if (doit) {
86 SuspendibleThreadSetJoiner sts_joiner(join_sts);
87 va_list args;
88 va_start(args, fmt);
89 gclog_or_tty->gclog_stamp();
90 gclog_or_tty->vprint_cr(fmt, args);
91 va_end(args);
92 }
93 }
94
95 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
96 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
97 if (g1_policy->adaptive_young_list_length()) {
98 double now = os::elapsedTime();
99 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
100 : g1_policy->predict_cleanup_time_ms();
101 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
102 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
103 os::sleep(this, sleep_time_ms, false);
104 }
105 }
106 void ConcurrentMarkThread::run() {
107 initialize_in_thread();
108 wait_for_universe_init();
109
110 run_service();
111
112 terminate();
113 }
114
126 }
127
128 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
129 {
130 ResourceMark rm;
131 HandleMark hm;
132 double cycle_start = os::elapsedVTime();
133
134 // We have to ensure that we finish scanning the root regions
135 // before the next GC takes place. To ensure this we have to
136 // make sure that we do not join the STS until the root regions
137 // have been scanned. If we did then it's possible that a
138 // subsequent GC could block us from joining the STS and proceed
139 // without the root regions have been scanned which would be a
140 // correctness issue.
141
142 if (!cm()->has_aborted()) {
143 _cm->scanRootRegions();
144 }
145
146 double mark_start_sec = os::elapsedTime();
147 cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
148
149 int iter = 0;
150 do {
151 iter++;
152 if (!cm()->has_aborted()) {
153 _cm->markFromRoots();
154 }
155
156 double mark_end_time = os::elapsedVTime();
157 double mark_end_sec = os::elapsedTime();
158 _vtime_mark_accum += (mark_end_time - cycle_start);
159 if (!cm()->has_aborted()) {
160 delay_to_keep_mmu(g1_policy, true /* remark */);
161
162 cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
163
164 CMCheckpointRootsFinalClosure final_cl(_cm);
165 VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
166 VMThread::execute(&op);
167 }
168 if (cm()->restart_for_overflow()) {
169 cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
170 cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
171 }
172 } while (cm()->restart_for_overflow());
173
174 double end_time = os::elapsedVTime();
175 // Update the total virtual time before doing this, since it will try
176 // to measure it to get the vtime for this marking. We purposely
177 // neglect the presumably-short "completeCleanup" phase here.
178 _vtime_accum = (end_time - _vtime_start);
179
180 if (!cm()->has_aborted()) {
181 delay_to_keep_mmu(g1_policy, false /* cleanup */);
182
183 CMCleanUp cl_cl(_cm);
184 VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
185 VMThread::execute(&op);
186 } else {
187 // We don't want to update the marking status if a GC pause
188 // is already underway.
189 SuspendibleThreadSetJoiner sts_join;
190 g1h->collector_state()->set_mark_in_progress(false);
191 }
192
193 // Check if cleanup set the free_regions_coming flag. If it
194 // hasn't, we can just skip the next step.
195 if (g1h->free_regions_coming()) {
196 // The following will finish freeing up any regions that we
197 // found to be empty during cleanup. We'll do this part
198 // without joining the suspendible set. If an evacuation pause
199 // takes place, then we would carry on freeing regions in
200 // case they are needed by the pause. If a Full GC takes
201 // place, it would wait for us to process the regions
202 // reclaimed by cleanup.
203
204 double cleanup_start_sec = os::elapsedTime();
205 cm_log(G1Log::fine(), false, "[GC concurrent-cleanup-start]");
206
207 // Now do the concurrent cleanup operation.
208 _cm->completeCleanup();
209
210 // Notify anyone who's waiting that there are no more free
211 // regions coming. We have to do this before we join the STS
212 // (in fact, we should not attempt to join the STS in the
213 // interval between finishing the cleanup pause and clearing
214 // the free_regions_coming flag) otherwise we might deadlock:
215 // a GC worker could be blocked waiting for the notification
216 // whereas this thread will be blocked for the pause to finish
217 // while it's trying to join the STS, which is conditional on
218 // the GC workers finishing.
219 g1h->reset_free_regions_coming();
220
221 double cleanup_end_sec = os::elapsedTime();
222 cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
223 }
224 guarantee(cm()->cleanup_list_is_empty(),
225 "at this point there should be no regions on the cleanup list");
226
227 // There is a tricky race before recording that the concurrent
228 // cleanup has completed and a potential Full GC starting around
229 // the same time. We want to make sure that the Full GC calls
230 // abort() on concurrent mark after
231 // record_concurrent_mark_cleanup_completed(), since abort() is
232 // the method that will reset the concurrent mark state. If we
233 // end up calling record_concurrent_mark_cleanup_completed()
234 // after abort() then we might incorrectly undo some of the work
235 // abort() did. Checking the has_aborted() flag after joining
236 // the STS allows the correct ordering of the two methods. There
237 // are two scenarios:
238 //
239 // a) If we reach here before the Full GC, the fact that we have
240 // joined the STS means that the Full GC cannot start until we
241 // leave the STS, so record_concurrent_mark_cleanup_completed()
242 // will complete before abort() is called.
243 //
244 // b) If we reach here during the Full GC, we'll be held up from
245 // joining the STS until the Full GC is done, which means that
246 // abort() will have completed and has_aborted() will return
247 // true to prevent us from calling
248 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
249 // not needed any more as the concurrent mark state has been
250 // already reset).
251 {
252 SuspendibleThreadSetJoiner sts_join;
253 if (!cm()->has_aborted()) {
254 g1_policy->record_concurrent_mark_cleanup_completed();
255 } else {
256 cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
257 }
258 }
259
260 // We now want to allow clearing of the marking bitmap to be
261 // suspended by a collection pause.
262 // We may have aborted just before the remark. Do not bother clearing the
263 // bitmap then, as it has been done during mark abort.
264 if (!cm()->has_aborted()) {
265 _cm->clearNextBitmap();
266 } else {
267 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
268 }
269 }
270
271 // Update the number of full collections that have been
272 // completed. This will also notify the FullGCCount_lock in case a
273 // Java thread is waiting for a full GC to happen (e.g., it
274 // called System.gc() with +ExplicitGCInvokesConcurrent).
275 {
276 SuspendibleThreadSetJoiner sts_join;
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentMarkThread.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorPolicy.hpp"
29 #include "gc/g1/g1MMUTracker.hpp"
30 #include "gc/g1/suspendibleThreadSet.hpp"
31 #include "gc/g1/vm_operations_g1.hpp"
32 #include "gc/shared/gcId.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/gcTraceTime.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "runtime/vmThread.hpp"
38
39 // ======= Concurrent Mark Thread ========
40
41 // The CM thread is created when the G1 garbage collector is used
42
43 SurrogateLockerThread*
44 ConcurrentMarkThread::_slt = NULL;
45
46 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
47 ConcurrentGCThread(),
48 _cm(cm),
49 _state(Idle),
50 _vtime_accum(0.0),
51 _vtime_mark_accum(0.0) {
52
53 set_name("G1 Main Marker");
54 create_and_start();
55 }
62 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
63 _cm(cm) {}
64
65 void do_void(){
66 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
67 }
68 };
69
70 class CMCleanUp: public VoidClosure {
71 ConcurrentMark* _cm;
72 public:
73
74 CMCleanUp(ConcurrentMark* cm) :
75 _cm(cm) {}
76
77 void do_void(){
78 _cm->cleanup();
79 }
80 };
81
82 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
83 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
84 if (g1_policy->adaptive_young_list_length()) {
85 double now = os::elapsedTime();
86 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
87 : g1_policy->predict_cleanup_time_ms();
88 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
89 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
90 os::sleep(this, sleep_time_ms, false);
91 }
92 }
93 void ConcurrentMarkThread::run() {
94 initialize_in_thread();
95 wait_for_universe_init();
96
97 run_service();
98
99 terminate();
100 }
101
113 }
114
115 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
116 {
117 ResourceMark rm;
118 HandleMark hm;
119 double cycle_start = os::elapsedVTime();
120
121 // We have to ensure that we finish scanning the root regions
122 // before the next GC takes place. To ensure this we have to
123 // make sure that we do not join the STS until the root regions
124 // have been scanned. If we did then it's possible that a
125 // subsequent GC could block us from joining the STS and proceed
126 // without the root regions have been scanned which would be a
127 // correctness issue.
128
129 if (!cm()->has_aborted()) {
130 _cm->scanRootRegions();
131 }
132
133 // It would be nice to use the GCTraceConcTime class here but
134 // the "end" logging is inside the loop and not at the end of
135 // a scope. Mimicking the same log output as GCTraceConcTime instead.
136 jlong mark_start = os::elapsed_counter();
137 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
138
139 int iter = 0;
140 do {
141 iter++;
142 if (!cm()->has_aborted()) {
143 _cm->markFromRoots();
144 }
145
146 double mark_end_time = os::elapsedVTime();
147 jlong mark_end = os::elapsed_counter();
148 _vtime_mark_accum += (mark_end_time - cycle_start);
149 if (!cm()->has_aborted()) {
150 delay_to_keep_mmu(g1_policy, true /* remark */);
151 log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
152 TimeHelper::counter_to_seconds(mark_start),
153 TimeHelper::counter_to_seconds(mark_end),
154 TimeHelper::counter_to_millis(mark_end - mark_start));
155
156 CMCheckpointRootsFinalClosure final_cl(_cm);
157 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
158 VMThread::execute(&op);
159 }
160 if (cm()->restart_for_overflow()) {
161 log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
162 log_info(gc)("Concurrent Mark restart for overflow");
163 }
164 } while (cm()->restart_for_overflow());
165
166 double end_time = os::elapsedVTime();
167 // Update the total virtual time before doing this, since it will try
168 // to measure it to get the vtime for this marking. We purposely
169 // neglect the presumably-short "completeCleanup" phase here.
170 _vtime_accum = (end_time - _vtime_start);
171
172 if (!cm()->has_aborted()) {
173 delay_to_keep_mmu(g1_policy, false /* cleanup */);
174
175 CMCleanUp cl_cl(_cm);
176 VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
177 VMThread::execute(&op);
178 } else {
179 // We don't want to update the marking status if a GC pause
180 // is already underway.
181 SuspendibleThreadSetJoiner sts_join;
182 g1h->collector_state()->set_mark_in_progress(false);
183 }
184
185 // Check if cleanup set the free_regions_coming flag. If it
186 // hasn't, we can just skip the next step.
187 if (g1h->free_regions_coming()) {
188 // The following will finish freeing up any regions that we
189 // found to be empty during cleanup. We'll do this part
190 // without joining the suspendible set. If an evacuation pause
191 // takes place, then we would carry on freeing regions in
192 // case they are needed by the pause. If a Full GC takes
193 // place, it would wait for us to process the regions
194 // reclaimed by cleanup.
195
196 GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
197
198 // Now do the concurrent cleanup operation.
199 _cm->completeCleanup();
200
201 // Notify anyone who's waiting that there are no more free
202 // regions coming. We have to do this before we join the STS
203 // (in fact, we should not attempt to join the STS in the
204 // interval between finishing the cleanup pause and clearing
205 // the free_regions_coming flag) otherwise we might deadlock:
206 // a GC worker could be blocked waiting for the notification
207 // whereas this thread will be blocked for the pause to finish
208 // while it's trying to join the STS, which is conditional on
209 // the GC workers finishing.
210 g1h->reset_free_regions_coming();
211 }
212 guarantee(cm()->cleanup_list_is_empty(),
213 "at this point there should be no regions on the cleanup list");
214
215 // There is a tricky race before recording that the concurrent
216 // cleanup has completed and a potential Full GC starting around
217 // the same time. We want to make sure that the Full GC calls
218 // abort() on concurrent mark after
219 // record_concurrent_mark_cleanup_completed(), since abort() is
220 // the method that will reset the concurrent mark state. If we
221 // end up calling record_concurrent_mark_cleanup_completed()
222 // after abort() then we might incorrectly undo some of the work
223 // abort() did. Checking the has_aborted() flag after joining
224 // the STS allows the correct ordering of the two methods. There
225 // are two scenarios:
226 //
227 // a) If we reach here before the Full GC, the fact that we have
228 // joined the STS means that the Full GC cannot start until we
229 // leave the STS, so record_concurrent_mark_cleanup_completed()
230 // will complete before abort() is called.
231 //
232 // b) If we reach here during the Full GC, we'll be held up from
233 // joining the STS until the Full GC is done, which means that
234 // abort() will have completed and has_aborted() will return
235 // true to prevent us from calling
236 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
237 // not needed any more as the concurrent mark state has been
238 // already reset).
239 {
240 SuspendibleThreadSetJoiner sts_join;
241 if (!cm()->has_aborted()) {
242 g1_policy->record_concurrent_mark_cleanup_completed();
243 } else {
244 log_info(gc)("Concurrent Mark abort");
245 }
246 }
247
248 // We now want to allow clearing of the marking bitmap to be
249 // suspended by a collection pause.
250 // We may have aborted just before the remark. Do not bother clearing the
251 // bitmap then, as it has been done during mark abort.
252 if (!cm()->has_aborted()) {
253 _cm->clearNextBitmap();
254 } else {
255 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
256 }
257 }
258
259 // Update the number of full collections that have been
260 // completed. This will also notify the FullGCCount_lock in case a
261 // Java thread is waiting for a full GC to happen (e.g., it
262 // called System.gc() with +ExplicitGCInvokesConcurrent).
263 {
264 SuspendibleThreadSetJoiner sts_join;
|