77 _sltMonitor = SLT_lock;
78 }
79
80 void ConcurrentMarkSweepThread::run_service() {
81 assert(this == cmst(), "just checking");
82
83 if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
84 warning("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
85 }
86
87 {
88 MutexLockerEx x(CGC_lock, true);
89 set_CMS_flag(CMS_cms_wants_token);
90 assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
91
92 // Wait until the surrogate locker thread that will do
93 // pending list locking on our behalf has been created.
94 // We cannot start the SLT thread ourselves since we need
95 // to be a JavaThread to do so.
96 CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
97 while (_slt == NULL && !_should_terminate) {
98 CGC_lock->wait(true, 200);
99 loopY.tick();
100 }
101 clear_CMS_flag(CMS_cms_wants_token);
102 }
103
104 while (!_should_terminate) {
105 sleepBeforeNextCycle();
106 if (_should_terminate) break;
107 GCIdMark gc_id_mark;
108 GCCause::Cause cause = _collector->_full_gc_requested ?
109 _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
110 _collector->collect_in_background(cause);
111 }
112
113 // Check that the state of any protocol for synchronization
114 // between background (CMS) and foreground collector is "clean"
115 // (i.e. will not potentially block the foreground collector,
116 // requiring action by us).
117 verify_ok_to_terminate();
118 }
119
120 #ifndef PRODUCT
121 void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
122 assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
123 cms_thread_wants_cms_token()),
124 "Must renounce all worldly possessions and desires for nirvana");
125 _collector->verify_ok_to_terminate();
126 }
132 ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
133 assert(_cmst == th, "Where did the just-created CMS thread go?");
134 return th;
135 }
136
137 void ConcurrentMarkSweepThread::stop_all() {
138 assert(_cmst != NULL, "stop_all should be called after initialization");
139 _cmst->stop();
140 }
141
142 void ConcurrentMarkSweepThread::stop_service() {
143 // Now post a notify on CGC_lock so as to nudge
144 // CMS thread(s) that might be slumbering in
145 // sleepBeforeNextCycle.
146 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
147 CGC_lock->notify_all();
148 }
149
150 void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
151 assert(tc != NULL, "Null ThreadClosure");
152 if (cmst() != NULL) {
153 tc->do_thread(cmst());
154 }
155 assert(Universe::is_fully_initialized(),
156 "Called too early, make sure heap is fully initialized");
157 if (_collector != NULL) {
158 AbstractWorkGang* gang = _collector->conc_workers();
159 if (gang != NULL) {
160 gang->threads_do(tc);
161 }
162 }
163 }
164
165 void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
166 if (cmst() != NULL) {
167 cmst()->print_on(st);
168 st->cr();
169 }
170 if (_collector != NULL) {
171 AbstractWorkGang* gang = _collector->conc_workers();
172 if (gang != NULL) {
173 gang->print_worker_threads_on(st);
174 }
175 }
176 }
177
178 void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
179 assert(UseConcMarkSweepGC, "just checking");
180
181 MutexLockerEx x(CGC_lock,
182 Mutex::_no_safepoint_check_flag);
183 if (!is_cms_thread) {
184 assert(Thread::current()->is_VM_thread(), "Not a VM thread");
185 CMSSynchronousYieldRequest yr;
186 while (CMS_flag_is_set(CMS_cms_has_token)) {
222 assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
223 "Should have been cleared");
224 } else {
225 assert(Thread::current()->is_ConcurrentGC_thread(),
226 "Not a CMS thread");
227 assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
228 clear_CMS_flag(CMS_cms_has_token);
229 if (CMS_flag_is_set(CMS_vm_wants_token)) {
230 // wake-up a waiting VM thread
231 CGC_lock->notify();
232 }
233 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
234 "Should have been cleared");
235 }
236 }
237
238 // Wait until any cms_lock event
239 void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
240 MutexLockerEx x(CGC_lock,
241 Mutex::_no_safepoint_check_flag);
242 if (_should_terminate || _collector->_full_gc_requested) {
243 return;
244 }
245 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
246 CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
247 clear_CMS_flag(CMS_cms_wants_token);
248 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
249 "Should not be set");
250 }
251
252 // Wait until the next synchronous GC, a concurrent full gc request,
253 // or a timeout, whichever is earlier.
254 void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
255 // Wait time in millis or 0 value representing infinite wait for a scavenge
256 assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
257
258 GenCollectedHeap* gch = GenCollectedHeap::heap();
259 double start_time_secs = os::elapsedTime();
260 double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
261
262 // Total collections count before waiting loop
263 unsigned int before_count;
264 {
265 MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
266 before_count = gch->total_collections();
267 }
268
269 unsigned int loop_count = 0;
270
271 while(!_should_terminate) {
272 double now_time = os::elapsedTime();
273 long wait_time_millis;
274
275 if(t_millis != 0) {
276 // New wait limit
277 wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
278 if(wait_time_millis <= 0) {
279 // Wait time is over
280 break;
281 }
282 } else {
283 // No wait limit, wait if necessary forever
284 wait_time_millis = 0;
285 }
286
287 // Wait until the next event or the remaining timeout
288 {
289 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
290
291 if (_should_terminate || _collector->_full_gc_requested) {
292 return;
293 }
294 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
295 assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
296 CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
297 clear_CMS_flag(CMS_cms_wants_token);
298 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
299 "Should not be set");
300 }
301
302 // Extra wait time check before entering the heap lock to get the collection count
303 if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
304 // Wait time is over
305 break;
306 }
307
308 // Total collections count after the event
309 unsigned int after_count;
310 {
311 MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
312 after_count = gch->total_collections();
313 }
314
315 if(before_count != after_count) {
316 // There was a collection - success
317 break;
318 }
319
320 // Too many loops warning
321 if(++loop_count == 0) {
322 warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
323 }
324 }
325 }
326
327 void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
328 while (!_should_terminate) {
329 if(CMSWaitDuration >= 0) {
330 // Wait until the next synchronous GC, a concurrent full gc
331 // request or a timeout, whichever is earlier.
332 wait_on_cms_lock_for_scavenge(CMSWaitDuration);
333 } else {
334 // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
335 wait_on_cms_lock(CMSCheckInterval);
336 }
337 // Check if we should start a CMS collection cycle
338 if (_collector->shouldConcurrentCollect()) {
339 return;
340 }
341 // .. collection criterion not yet met, let's go back
342 // and wait some more
343 }
344 }
345
346 // Note: this method, although exported by the ConcurrentMarkSweepThread,
347 // which is a non-JavaThread, can only be called by a JavaThread.
348 // Currently this is done at vm creation time (post-vm-init) by the
|
77 _sltMonitor = SLT_lock;
78 }
79
80 void ConcurrentMarkSweepThread::run_service() {
81 assert(this == cmst(), "just checking");
82
83 if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
84 warning("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
85 }
86
87 {
88 MutexLockerEx x(CGC_lock, true);
89 set_CMS_flag(CMS_cms_wants_token);
90 assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
91
92 // Wait until the surrogate locker thread that will do
93 // pending list locking on our behalf has been created.
94 // We cannot start the SLT thread ourselves since we need
95 // to be a JavaThread to do so.
96 CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
97 while (_slt == NULL && !should_terminate()) {
98 CGC_lock->wait(true, 200);
99 loopY.tick();
100 }
101 clear_CMS_flag(CMS_cms_wants_token);
102 }
103
104 while (!should_terminate()) {
105 sleepBeforeNextCycle();
106 if (should_terminate()) break;
107 GCIdMark gc_id_mark;
108 GCCause::Cause cause = _collector->_full_gc_requested ?
109 _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
110 _collector->collect_in_background(cause);
111 }
112
113 // Check that the state of any protocol for synchronization
114 // between background (CMS) and foreground collector is "clean"
115 // (i.e. will not potentially block the foreground collector,
116 // requiring action by us).
117 verify_ok_to_terminate();
118 }
119
120 #ifndef PRODUCT
121 void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
122 assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
123 cms_thread_wants_cms_token()),
124 "Must renounce all worldly possessions and desires for nirvana");
125 _collector->verify_ok_to_terminate();
126 }
132 ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
133 assert(_cmst == th, "Where did the just-created CMS thread go?");
134 return th;
135 }
136
137 void ConcurrentMarkSweepThread::stop_all() {
138 assert(_cmst != NULL, "stop_all should be called after initialization");
139 _cmst->stop();
140 }
141
142 void ConcurrentMarkSweepThread::stop_service() {
143 // Now post a notify on CGC_lock so as to nudge
144 // CMS thread(s) that might be slumbering in
145 // sleepBeforeNextCycle.
146 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
147 CGC_lock->notify_all();
148 }
149
150 void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
151 assert(tc != NULL, "Null ThreadClosure");
152 if (cmst() != NULL && !cmst()->has_terminated()) {
153 tc->do_thread(cmst());
154 }
155 assert(Universe::is_fully_initialized(),
156 "Called too early, make sure heap is fully initialized");
157 if (_collector != NULL) {
158 AbstractWorkGang* gang = _collector->conc_workers();
159 if (gang != NULL) {
160 gang->threads_do(tc);
161 }
162 }
163 }
164
165 void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
166 if (cmst() != NULL && !cmst()->has_terminated()) {
167 cmst()->print_on(st);
168 st->cr();
169 }
170 if (_collector != NULL) {
171 AbstractWorkGang* gang = _collector->conc_workers();
172 if (gang != NULL) {
173 gang->print_worker_threads_on(st);
174 }
175 }
176 }
177
178 void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
179 assert(UseConcMarkSweepGC, "just checking");
180
181 MutexLockerEx x(CGC_lock,
182 Mutex::_no_safepoint_check_flag);
183 if (!is_cms_thread) {
184 assert(Thread::current()->is_VM_thread(), "Not a VM thread");
185 CMSSynchronousYieldRequest yr;
186 while (CMS_flag_is_set(CMS_cms_has_token)) {
222 assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
223 "Should have been cleared");
224 } else {
225 assert(Thread::current()->is_ConcurrentGC_thread(),
226 "Not a CMS thread");
227 assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
228 clear_CMS_flag(CMS_cms_has_token);
229 if (CMS_flag_is_set(CMS_vm_wants_token)) {
230 // wake-up a waiting VM thread
231 CGC_lock->notify();
232 }
233 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
234 "Should have been cleared");
235 }
236 }
237
238 // Wait until any cms_lock event
239 void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
240 MutexLockerEx x(CGC_lock,
241 Mutex::_no_safepoint_check_flag);
242 if (should_terminate() || _collector->_full_gc_requested) {
243 return;
244 }
245 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
246 CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
247 clear_CMS_flag(CMS_cms_wants_token);
248 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
249 "Should not be set");
250 }
251
252 // Wait until the next synchronous GC, a concurrent full gc request,
253 // or a timeout, whichever is earlier.
254 void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
255 // Wait time in millis or 0 value representing infinite wait for a scavenge
256 assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
257
258 GenCollectedHeap* gch = GenCollectedHeap::heap();
259 double start_time_secs = os::elapsedTime();
260 double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
261
262 // Total collections count before waiting loop
263 unsigned int before_count;
264 {
265 MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
266 before_count = gch->total_collections();
267 }
268
269 unsigned int loop_count = 0;
270
271 while(!should_terminate()) {
272 double now_time = os::elapsedTime();
273 long wait_time_millis;
274
275 if(t_millis != 0) {
276 // New wait limit
277 wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
278 if(wait_time_millis <= 0) {
279 // Wait time is over
280 break;
281 }
282 } else {
283 // No wait limit, wait if necessary forever
284 wait_time_millis = 0;
285 }
286
287 // Wait until the next event or the remaining timeout
288 {
289 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
290
291 if (should_terminate() || _collector->_full_gc_requested) {
292 return;
293 }
294 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
295 assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
296 CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
297 clear_CMS_flag(CMS_cms_wants_token);
298 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
299 "Should not be set");
300 }
301
302 // Extra wait time check before entering the heap lock to get the collection count
303 if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
304 // Wait time is over
305 break;
306 }
307
308 // Total collections count after the event
309 unsigned int after_count;
310 {
311 MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
312 after_count = gch->total_collections();
313 }
314
315 if(before_count != after_count) {
316 // There was a collection - success
317 break;
318 }
319
320 // Too many loops warning
321 if(++loop_count == 0) {
322 warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
323 }
324 }
325 }
326
327 void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
328 while (!should_terminate()) {
329 if(CMSWaitDuration >= 0) {
330 // Wait until the next synchronous GC, a concurrent full gc
331 // request or a timeout, whichever is earlier.
332 wait_on_cms_lock_for_scavenge(CMSWaitDuration);
333 } else {
334 // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
335 wait_on_cms_lock(CMSCheckInterval);
336 }
337 // Check if we should start a CMS collection cycle
338 if (_collector->shouldConcurrentCollect()) {
339 return;
340 }
341 // .. collection criterion not yet met, let's go back
342 // and wait some more
343 }
344 }
345
346 // Note: this method, although exported by the ConcurrentMarkSweepThread,
347 // which is a non-JavaThread, can only be called by a JavaThread.
348 // Currently this is done at vm creation time (post-vm-init) by the
|