< prev index next >

src/os/bsd/vm/os_bsd.cpp

Print this page


   1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


4025   } else {
4026     jio_fprintf(stderr,
4027                 "Could not open pause file '%s', continuing immediately.\n", filename);
4028   }
4029 }
4030 
4031 
4032 // Refer to the comments in os_solaris.cpp park-unpark. The next two
4033 // comment paragraphs are worth repeating here:
4034 //
4035 // Assumption:
4036 //    Only one parker can exist on an event, which is why we allocate
4037 //    them per-thread. Multiple unparkers can coexist.
4038 //
4039 // _Event serves as a restricted-range semaphore.
4040 //   -1 : thread is blocked, i.e. there is a waiter
4041 //    0 : neutral: thread is running or ready,
4042 //        could have been signaled after a wait started
4043 //    1 : signaled - thread is running or ready
4044 //
4045 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4046 // hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4047 // For specifics regarding the bug see GLIBC BUGID 261237 :
4048 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4049 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4050 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4051 // is used.  (The simple C test-case provided in the GLIBC bug report manifests the
4052 // hang).  The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4053 // and monitorenter when we're using 1-0 locking.  All those operations may result in
4054 // calls to pthread_cond_timedwait().  Using LD_ASSUME_KERNEL to use an older version
4055 // of libpthread avoids the problem, but isn't practical.
4056 //
4057 // Possible remedies:
4058 //
4059 // 1.   Establish a minimum relative wait time.  50 to 100 msecs seems to work.
4060 //      This is palliative and probabilistic, however.  If the thread is preempted
4061 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4062 //      than the minimum period may have passed, and the abstime may be stale (in the
4063 //      past) resultin in a hang.   Using this technique reduces the odds of a hang
4064 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4065 //
4066 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4067 //      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set
4068 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4069 //      reduces to poll()+read().  This works well, but consumes 2 FDs per extant
4070 //      thread.
4071 //
4072 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4073 //      that manages timeouts.  We'd emulate pthread_cond_timedwait() by enqueuing
4074 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4075 //      This also works well.  In fact it avoids kernel-level scalability impediments
4076 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4077 //      timers in a graceful fashion.
4078 //
4079 // 4.   When the abstime value is in the past it appears that control returns
4080 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4081 //      Subsequent timedwait/wait calls may hang indefinitely.  Given that, we
4082 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4083 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4084 //      It may be possible to avoid reinitialization by checking the return
4085 //      value from pthread_cond_timedwait().  In addition to reinitializing the
4086 //      condvar we must establish the invariant that cond_signal() is only called
4087 //      within critical sections protected by the adjunct mutex.  This prevents
4088 //      cond_signal() from "seeing" a condvar that's in the midst of being
4089 //      reinitialized or that is corrupt.  Sadly, this invariant obviates the
4090 //      desirable signal-after-unlock optimization that avoids futile context switching.
4091 //
4092 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4093 //      structure when a condvar is used or initialized.  cond_destroy()  would
4094 //      release the helper structure.  Our reinitialize-after-timedwait fix
4095 //      put excessive stress on malloc/free and locks protecting the c-heap.
4096 //
4097 // We currently use (4).  See the WorkAroundNTPLTimedWaitHang flag.
4098 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4099 // and only enabling the work-around for vulnerable environments.
4100 
4101 // utility to compute the abstime argument to timedwait:
4102 // millis is the relative timeout time
4103 // abstime will be the absolute timeout time
4104 // TODO: replace compute_abstime() with unpackTime()
4105 
4106 static struct timespec* compute_abstime(struct timespec* abstime,
4107                                         jlong millis) {
4108   if (millis < 0)  millis = 0;
4109   struct timeval now;
4110   int status = gettimeofday(&now, NULL);
4111   assert(status == 0, "gettimeofday");
4112   jlong seconds = millis / 1000;
4113   millis %= 1000;
4114   if (seconds > 50000000) { // see man cond_timedwait(3T)
4115     seconds = 50000000;
4116   }
4117   abstime->tv_sec = now.tv_sec  + seconds;
4118   long       usec = now.tv_usec + millis * 1000;
4119   if (usec >= 1000000) {


4191   int status = pthread_mutex_lock(_mutex);
4192   assert_status(status == 0, status, "mutex_lock");
4193   guarantee(_nParked == 0, "invariant");
4194   ++_nParked;
4195 
4196   // Object.wait(timo) will return because of
4197   // (a) notification
4198   // (b) timeout
4199   // (c) thread.interrupt
4200   //
4201   // Thread.interrupt and object.notify{All} both call Event::set.
4202   // That is, we treat thread.interrupt as a special case of notification.
4203   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4204   // We assume all ETIME returns are valid.
4205   //
4206   // TODO: properly differentiate simultaneous notify+interrupt.
4207   // In that case, we should propagate the notify to another waiter.
4208 
4209   while (_Event < 0) {
4210     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4211     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4212       pthread_cond_destroy(_cond);
4213       pthread_cond_init(_cond, NULL);
4214     }
4215     assert_status(status == 0 || status == EINTR ||
4216                   status == ETIMEDOUT,
4217                   status, "cond_timedwait");
4218     if (!FilterSpuriousWakeups) break;                 // previous semantics
4219     if (status == ETIMEDOUT) break;
4220     // We consume and ignore EINTR and spurious wakeups.
4221   }
4222   --_nParked;
4223   if (_Event >= 0) {
4224     ret = OS_OK;
4225   }
4226   _Event = 0;
4227   status = pthread_mutex_unlock(_mutex);
4228   assert_status(status == 0, status, "mutex_unlock");
4229   assert(_nParked == 0, "invariant");
4230   // Paranoia to ensure our locked and lock-free paths interact
4231   // correctly with each other.
4232   OrderAccess::fence();
4233   return ret;
4234 }


4238   //    0 => 1 : just return
4239   //    1 => 1 : just return
4240   //   -1 => either 0 or 1; must signal target thread
4241   //         That is, we can safely transition _Event from -1 to either
4242   //         0 or 1.
4243   // See also: "Semaphores in Plan 9" by Mullender & Cox
4244   //
4245   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4246   // that it will take two back-to-back park() calls for the owning
4247   // thread to block. This has the benefit of forcing a spurious return
4248   // from the first park() call after an unpark() call which will help
4249   // shake out uses of park() and unpark() without condition variables.
4250 
4251   if (Atomic::xchg(1, &_Event) >= 0) return;
4252 
4253   // Wait for the thread associated with the event to vacate
4254   int status = pthread_mutex_lock(_mutex);
4255   assert_status(status == 0, status, "mutex_lock");
4256   int AnyWaiters = _nParked;
4257   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
4258   if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
4259     AnyWaiters = 0;
4260     pthread_cond_signal(_cond);
4261   }
4262   status = pthread_mutex_unlock(_mutex);
4263   assert_status(status == 0, status, "mutex_unlock");
4264   if (AnyWaiters != 0) {
4265     // Note that we signal() *after* dropping the lock for "immortal" Events.
4266     // This is safe and avoids a common class of  futile wakeups.  In rare
4267     // circumstances this can cause a thread to return prematurely from
4268     // cond_{timed}wait() but the spurious wakeup is benign and the victim
4269     // will simply re-test the condition and re-park itself.
4270     // This provides particular benefit if the underlying platform does not
4271     // provide wait morphing.
4272     status = pthread_cond_signal(_cond);
4273     assert_status(status == 0, status, "cond_signal");
4274   }
4275 }
4276 
4277 
4278 // JSR166
4279 // -------------------------------------------------------
4280 
4281 // The solaris and bsd implementations of park/unpark are fairly


4374 
4375 
4376   // Enter safepoint region
4377   // Beware of deadlocks such as 6317397.
4378   // The per-thread Parker:: mutex is a classic leaf-lock.
4379   // In particular a thread must never block on the Threads_lock while
4380   // holding the Parker:: mutex.  If safepoints are pending both the
4381   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4382   ThreadBlockInVM tbivm(jt);
4383 
4384   // Don't wait if cannot get lock since interference arises from
4385   // unblocking.  Also. check interrupt before trying wait
4386   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4387     return;
4388   }
4389 
4390   int status;
4391   if (_counter > 0)  { // no wait needed
4392     _counter = 0;
4393     status = pthread_mutex_unlock(_mutex);
4394     assert(status == 0, "invariant");
4395     // Paranoia to ensure our locked and lock-free paths interact
4396     // correctly with each other and Java-level accesses.
4397     OrderAccess::fence();
4398     return;
4399   }
4400 
4401 #ifdef ASSERT
4402   // Don't catch signals while blocked; let the running threads have the signals.
4403   // (This allows a debugger to break into the running thread.)
4404   sigset_t oldsigs;
4405   sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4406   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4407 #endif
4408 
4409   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4410   jt->set_suspend_equivalent();
4411   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4412 
4413   if (time == 0) {
4414     status = pthread_cond_wait(_cond, _mutex);
4415   } else {
4416     status = pthread_cond_timedwait(_cond, _mutex, &absTime);
4417     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4418       pthread_cond_destroy(_cond);
4419       pthread_cond_init(_cond, NULL);
4420     }
4421   }
4422   assert_status(status == 0 || status == EINTR ||
4423                 status == ETIMEDOUT,
4424                 status, "cond_timedwait");
4425 
4426 #ifdef ASSERT
4427   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4428 #endif
4429 
4430   _counter = 0;
4431   status = pthread_mutex_unlock(_mutex);
4432   assert_status(status == 0, status, "invariant");
4433   // Paranoia to ensure our locked and lock-free paths interact
4434   // correctly with each other and Java-level accesses.
4435   OrderAccess::fence();
4436 
4437   // If externally suspended while waiting, re-suspend
4438   if (jt->handle_special_suspend_equivalent_condition()) {
4439     jt->java_suspend_self();
4440   }
4441 }
4442 
4443 void Parker::unpark() {
4444   int status = pthread_mutex_lock(_mutex);
4445   assert(status == 0, "invariant");
4446   const int s = _counter;
4447   _counter = 1;
4448   if (s < 1) {
4449     if (WorkAroundNPTLTimedWaitHang) {
4450       status = pthread_cond_signal(_cond);
4451       assert(status == 0, "invariant");
4452       status = pthread_mutex_unlock(_mutex);
4453       assert(status == 0, "invariant");
4454     } else {
4455       status = pthread_mutex_unlock(_mutex);
4456       assert(status == 0, "invariant");

4457       status = pthread_cond_signal(_cond);
4458       assert(status == 0, "invariant");
4459     }
4460   } else {
4461     pthread_mutex_unlock(_mutex);
4462     assert(status == 0, "invariant");
4463   }
4464 }
4465 
4466 
4467 // Darwin has no "environ" in a dynamic library.
4468 #ifdef __APPLE__
4469   #include <crt_externs.h>
4470   #define environ (*_NSGetEnviron())
4471 #else
4472 extern char** environ;
4473 #endif
4474 
4475 // Run the specified command in a separate process. Return its exit value,
4476 // or -1 on failure (e.g. can't fork a new process).
4477 // Unlike system(), this function can be called from signal handler. It
4478 // doesn't block SIGINT et al.
4479 int os::fork_and_exec(char* cmd) {
4480   const char * argv[4] = {"sh", "-c", cmd, NULL};
4481 
4482   // fork() in BsdThreads/NPTL is not async-safe. It needs to run


   1 /*
   2  * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


4025   } else {
4026     jio_fprintf(stderr,
4027                 "Could not open pause file '%s', continuing immediately.\n", filename);
4028   }
4029 }
4030 
4031 
4032 // Refer to the comments in os_solaris.cpp park-unpark. The next two
4033 // comment paragraphs are worth repeating here:
4034 //
4035 // Assumption:
4036 //    Only one parker can exist on an event, which is why we allocate
4037 //    them per-thread. Multiple unparkers can coexist.
4038 //
4039 // _Event serves as a restricted-range semaphore.
4040 //   -1 : thread is blocked, i.e. there is a waiter
4041 //    0 : neutral: thread is running or ready,
4042 //        could have been signaled after a wait started
4043 //    1 : signaled - thread is running or ready
4044 //























































4045 
4046 // utility to compute the abstime argument to timedwait:
4047 // millis is the relative timeout time
4048 // abstime will be the absolute timeout time
4049 // TODO: replace compute_abstime() with unpackTime()
4050 
4051 static struct timespec* compute_abstime(struct timespec* abstime,
4052                                         jlong millis) {
4053   if (millis < 0)  millis = 0;
4054   struct timeval now;
4055   int status = gettimeofday(&now, NULL);
4056   assert(status == 0, "gettimeofday");
4057   jlong seconds = millis / 1000;
4058   millis %= 1000;
4059   if (seconds > 50000000) { // see man cond_timedwait(3T)
4060     seconds = 50000000;
4061   }
4062   abstime->tv_sec = now.tv_sec  + seconds;
4063   long       usec = now.tv_usec + millis * 1000;
4064   if (usec >= 1000000) {


4136   int status = pthread_mutex_lock(_mutex);
4137   assert_status(status == 0, status, "mutex_lock");
4138   guarantee(_nParked == 0, "invariant");
4139   ++_nParked;
4140 
4141   // Object.wait(timo) will return because of
4142   // (a) notification
4143   // (b) timeout
4144   // (c) thread.interrupt
4145   //
4146   // Thread.interrupt and object.notify{All} both call Event::set.
4147   // That is, we treat thread.interrupt as a special case of notification.
4148   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4149   // We assume all ETIME returns are valid.
4150   //
4151   // TODO: properly differentiate simultaneous notify+interrupt.
4152   // In that case, we should propagate the notify to another waiter.
4153 
4154   while (_Event < 0) {
4155     status = pthread_cond_timedwait(_cond, _mutex, &abst);




4156     assert_status(status == 0 || status == EINTR ||
4157                   status == ETIMEDOUT,
4158                   status, "cond_timedwait");
4159     if (!FilterSpuriousWakeups) break;                 // previous semantics
4160     if (status == ETIMEDOUT) break;
4161     // We consume and ignore EINTR and spurious wakeups.
4162   }
4163   --_nParked;
4164   if (_Event >= 0) {
4165     ret = OS_OK;
4166   }
4167   _Event = 0;
4168   status = pthread_mutex_unlock(_mutex);
4169   assert_status(status == 0, status, "mutex_unlock");
4170   assert(_nParked == 0, "invariant");
4171   // Paranoia to ensure our locked and lock-free paths interact
4172   // correctly with each other.
4173   OrderAccess::fence();
4174   return ret;
4175 }


4179   //    0 => 1 : just return
4180   //    1 => 1 : just return
4181   //   -1 => either 0 or 1; must signal target thread
4182   //         That is, we can safely transition _Event from -1 to either
4183   //         0 or 1.
4184   // See also: "Semaphores in Plan 9" by Mullender & Cox
4185   //
4186   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4187   // that it will take two back-to-back park() calls for the owning
4188   // thread to block. This has the benefit of forcing a spurious return
4189   // from the first park() call after an unpark() call which will help
4190   // shake out uses of park() and unpark() without condition variables.
4191 
4192   if (Atomic::xchg(1, &_Event) >= 0) return;
4193 
4194   // Wait for the thread associated with the event to vacate
4195   int status = pthread_mutex_lock(_mutex);
4196   assert_status(status == 0, status, "mutex_lock");
4197   int AnyWaiters = _nParked;
4198   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");




4199   status = pthread_mutex_unlock(_mutex);
4200   assert_status(status == 0, status, "mutex_unlock");
4201   if (AnyWaiters != 0) {
4202     // Note that we signal() *after* dropping the lock for "immortal" Events.
4203     // This is safe and avoids a common class of  futile wakeups.  In rare
4204     // circumstances this can cause a thread to return prematurely from
4205     // cond_{timed}wait() but the spurious wakeup is benign and the victim
4206     // will simply re-test the condition and re-park itself.
4207     // This provides particular benefit if the underlying platform does not
4208     // provide wait morphing.
4209     status = pthread_cond_signal(_cond);
4210     assert_status(status == 0, status, "cond_signal");
4211   }
4212 }
4213 
4214 
4215 // JSR166
4216 // -------------------------------------------------------
4217 
4218 // The solaris and bsd implementations of park/unpark are fairly


4311 
4312 
4313   // Enter safepoint region
4314   // Beware of deadlocks such as 6317397.
4315   // The per-thread Parker:: mutex is a classic leaf-lock.
4316   // In particular a thread must never block on the Threads_lock while
4317   // holding the Parker:: mutex.  If safepoints are pending both the
4318   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4319   ThreadBlockInVM tbivm(jt);
4320 
4321   // Don't wait if cannot get lock since interference arises from
4322   // unblocking.  Also. check interrupt before trying wait
4323   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4324     return;
4325   }
4326 
4327   int status;
4328   if (_counter > 0)  { // no wait needed
4329     _counter = 0;
4330     status = pthread_mutex_unlock(_mutex);
4331     assert_status(status == 0, status, "invariant");
4332     // Paranoia to ensure our locked and lock-free paths interact
4333     // correctly with each other and Java-level accesses.
4334     OrderAccess::fence();
4335     return;
4336   }
4337 
4338 #ifdef ASSERT
4339   // Don't catch signals while blocked; let the running threads have the signals.
4340   // (This allows a debugger to break into the running thread.)
4341   sigset_t oldsigs;
4342   sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4343   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4344 #endif
4345 
4346   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4347   jt->set_suspend_equivalent();
4348   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4349 
4350   if (time == 0) {
4351     status = pthread_cond_wait(_cond, _mutex);
4352   } else {
4353     status = pthread_cond_timedwait(_cond, _mutex, &absTime);




4354   }
4355   assert_status(status == 0 || status == EINTR ||
4356                 status == ETIMEDOUT,
4357                 status, "cond_timedwait");
4358 
4359 #ifdef ASSERT
4360   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4361 #endif
4362 
4363   _counter = 0;
4364   status = pthread_mutex_unlock(_mutex);
4365   assert_status(status == 0, status, "invariant");
4366   // Paranoia to ensure our locked and lock-free paths interact
4367   // correctly with each other and Java-level accesses.
4368   OrderAccess::fence();
4369 
4370   // If externally suspended while waiting, re-suspend
4371   if (jt->handle_special_suspend_equivalent_condition()) {
4372     jt->java_suspend_self();
4373   }
4374 }
4375 
4376 void Parker::unpark() {
4377   int status = pthread_mutex_lock(_mutex);
4378   assert_status(status == 0, status, "invariant");
4379   const int s = _counter;
4380   _counter = 1;







4381   status = pthread_mutex_unlock(_mutex);
4382   assert_status(status == 0, status, "invariant");
4383   if (s < 1) {
4384     status = pthread_cond_signal(_cond);
4385     assert_status(status == 0, status, "invariant");




4386   }
4387 }
4388 
4389 
4390 // Darwin has no "environ" in a dynamic library.
4391 #ifdef __APPLE__
4392   #include <crt_externs.h>
4393   #define environ (*_NSGetEnviron())
4394 #else
4395 extern char** environ;
4396 #endif
4397 
4398 // Run the specified command in a separate process. Return its exit value,
4399 // or -1 on failure (e.g. can't fork a new process).
4400 // Unlike system(), this function can be called from signal handler. It
4401 // doesn't block SIGINT et al.
4402 int os::fork_and_exec(char* cmd) {
4403   const char * argv[4] = {"sh", "-c", cmd, NULL};
4404 
4405   // fork() in BsdThreads/NPTL is not async-safe. It needs to run


< prev index next >