247 if (PrintSafepointStatistics && iterations == 0) {
248 begin_statistics(nof_threads, still_running);
249 }
250
251 if (still_running > 0) {
252 // Check for if it takes to long
253 if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
254 print_safepoint_timeout(_spinning_timeout);
255 }
256
257 // Spin to avoid context switching.
258 // There's a tension between allowing the mutators to run (and rendezvous)
259 // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
260 // a mutator might otherwise use profitably to reach a safepoint. Excessive
261 // spinning by the VM thread on a saturated system can increase rendezvous latency.
262 // Blocking or yielding incur their own penalties in the form of context switching
263 // and the resultant loss of $ residency.
264 //
265 // Further complicating matters is that yield() does not work as naively expected
266 // on many platforms -- yield() does not guarantee that any other ready threads
267 // will run. As such we revert yield_all() after some number of iterations.
268 // Yield_all() is implemented as a short unconditional sleep on some platforms.
269 // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
270 // can actually increase the time it takes the VM thread to detect that a system-wide
271 // stop-the-world safepoint has been reached. In a pathological scenario such as that
272 // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
273 // In that case the mutators will be stalled waiting for the safepoint to complete and the
274 // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
275 // will eventually wake up and detect that all mutators are safe, at which point
276 // we'll again make progress.
277 //
278 // Beware too that that the VMThread typically runs at elevated priority.
279 // Its default priority is higher than the default mutator priority.
280 // Obviously, this complicates spinning.
281 //
282 // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
283 // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
284 //
285 // See the comments in synchronizer.cpp for additional remarks on spinning.
286 //
287 // In the future we might:
288 // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
305 // Alternately, instead of counting iterations of the outer loop
306 // we could count the # of threads visited in the inner loop, above.
307 // 9. On windows consider using the return value from SwitchThreadTo()
308 // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
309
310 if (UseCompilerSafepoints && int(iterations) == DeferPollingPageLoopCount) {
311 guarantee (PageArmed == 0, "invariant") ;
312 PageArmed = 1 ;
313 os::make_polling_page_unreadable();
314 }
315
316 // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
317 // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
318 ++steps ;
319 if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
320 SpinPause() ; // MP-Polite spin
321 } else
322 if (steps < DeferThrSuspendLoopCount) {
323 os::NakedYield() ;
324 } else {
325 os::yield_all() ;
326 // Alternately, the VM thread could transiently depress its scheduling priority or
327 // transiently increase the priority of the tardy mutator(s).
328 }
329
330 iterations ++ ;
331 }
332 assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
333 }
334 assert(still_running == 0, "sanity check");
335
336 if (PrintSafepointStatistics) {
337 update_statistics_on_spin_end();
338 }
339
340 // wait until all threads are stopped
341 while (_waiting_to_block > 0) {
342 if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
343 if (!SafepointTimeout || timeout_error_printed) {
344 Safepoint_lock->wait(true); // true, means with no safepoint checks
345 } else {
346 // Compute remaining time
347 jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
|
247 if (PrintSafepointStatistics && iterations == 0) {
248 begin_statistics(nof_threads, still_running);
249 }
250
251 if (still_running > 0) {
252 // Check for if it takes to long
253 if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
254 print_safepoint_timeout(_spinning_timeout);
255 }
256
257 // Spin to avoid context switching.
258 // There's a tension between allowing the mutators to run (and rendezvous)
259 // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
260 // a mutator might otherwise use profitably to reach a safepoint. Excessive
261 // spinning by the VM thread on a saturated system can increase rendezvous latency.
262 // Blocking or yielding incur their own penalties in the form of context switching
263 // and the resultant loss of $ residency.
264 //
265 // Further complicating matters is that yield() does not work as naively expected
266 // on many platforms -- yield() does not guarantee that any other ready threads
267 // will run. As such we revert to naked_short_sleep() after some number of iterations.
268 // nakes_short_sleep() is implemented as a short unconditional sleep.
269 // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
270 // can actually increase the time it takes the VM thread to detect that a system-wide
271 // stop-the-world safepoint has been reached. In a pathological scenario such as that
272 // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
273 // In that case the mutators will be stalled waiting for the safepoint to complete and the
274 // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
275 // will eventually wake up and detect that all mutators are safe, at which point
276 // we'll again make progress.
277 //
278 // Beware too that that the VMThread typically runs at elevated priority.
279 // Its default priority is higher than the default mutator priority.
280 // Obviously, this complicates spinning.
281 //
282 // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
283 // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
284 //
285 // See the comments in synchronizer.cpp for additional remarks on spinning.
286 //
287 // In the future we might:
288 // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
305 // Alternately, instead of counting iterations of the outer loop
306 // we could count the # of threads visited in the inner loop, above.
307 // 9. On windows consider using the return value from SwitchThreadTo()
308 // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
309
310 if (UseCompilerSafepoints && int(iterations) == DeferPollingPageLoopCount) {
311 guarantee (PageArmed == 0, "invariant") ;
312 PageArmed = 1 ;
313 os::make_polling_page_unreadable();
314 }
315
316 // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
317 // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
318 ++steps ;
319 if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
320 SpinPause() ; // MP-Polite spin
321 } else
322 if (steps < DeferThrSuspendLoopCount) {
323 os::NakedYield() ;
324 } else {
325 os::naked_short_sleep(1);
326 }
327
328 iterations ++ ;
329 }
330 assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
331 }
332 assert(still_running == 0, "sanity check");
333
334 if (PrintSafepointStatistics) {
335 update_statistics_on_spin_end();
336 }
337
338 // wait until all threads are stopped
339 while (_waiting_to_block > 0) {
340 if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
341 if (!SafepointTimeout || timeout_error_printed) {
342 Safepoint_lock->wait(true); // true, means with no safepoint checks
343 } else {
344 // Compute remaining time
345 jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
|