src/os/bsd/vm/os_bsd.cpp

Print this page




4074   for (;;) {
4075       v = _Event ;
4076       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4077   }
4078   guarantee (v >= 0, "invariant") ;
4079   if (v == 0) {
4080      // Do this the hard way by blocking ...
4081      int status = pthread_mutex_lock(_mutex);
4082      assert_status(status == 0, status, "mutex_lock");
4083      guarantee (_nParked == 0, "invariant") ;
4084      ++ _nParked ;
4085      while (_Event < 0) {
4086         status = pthread_cond_wait(_cond, _mutex);
4087         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4088         // Treat this the same as if the wait was interrupted
4089         if (status == ETIMEDOUT) { status = EINTR; }
4090         assert_status(status == 0 || status == EINTR, status, "cond_wait");
4091      }
4092      -- _nParked ;
4093 
4094     // In theory we could move the ST of 0 into _Event past the unlock(),
4095     // but then we'd need a MEMBAR after the ST.
4096     _Event = 0 ;
4097      status = pthread_mutex_unlock(_mutex);
4098      assert_status(status == 0, status, "mutex_unlock");



4099   }
4100   guarantee (_Event >= 0, "invariant") ;
4101 }
4102 
4103 int os::PlatformEvent::park(jlong millis) {
4104   guarantee (_nParked == 0, "invariant") ;
4105 
4106   int v ;
4107   for (;;) {
4108       v = _Event ;
4109       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4110   }
4111   guarantee (v >= 0, "invariant") ;
4112   if (v != 0) return OS_OK ;
4113 
4114   // We do this the hard way, by blocking the thread.
4115   // Consider enforcing a minimum timeout value.
4116   struct timespec abst;
4117   compute_abstime(&abst, millis);
4118 


4141     status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
4142     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4143       pthread_cond_destroy (_cond);
4144       pthread_cond_init (_cond, NULL) ;
4145     }
4146     assert_status(status == 0 || status == EINTR ||
4147                   status == ETIMEDOUT,
4148                   status, "cond_timedwait");
4149     if (!FilterSpuriousWakeups) break ;                 // previous semantics
4150     if (status == ETIMEDOUT) break ;
4151     // We consume and ignore EINTR and spurious wakeups.
4152   }
4153   --_nParked ;
4154   if (_Event >= 0) {
4155      ret = OS_OK;
4156   }
4157   _Event = 0 ;
4158   status = pthread_mutex_unlock(_mutex);
4159   assert_status(status == 0, status, "mutex_unlock");
4160   assert (_nParked == 0, "invariant") ;



4161   return ret;
4162 }
4163 
4164 void os::PlatformEvent::unpark() {
4165   int v, AnyWaiters ;
4166   for (;;) {
4167       v = _Event ;
4168       if (v > 0) {
4169          // The LD of _Event could have reordered or be satisfied
4170          // by a read-aside from this processor's write buffer.
4171          // To avoid problems execute a barrier and then
4172          // ratify the value.
4173          OrderAccess::fence() ;
4174          if (_Event == v) return ;
4175          continue ;
4176       }
4177       if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
4178   }
4179   if (v < 0) {


4180      // Wait for the thread associated with the event to vacate
4181      int status = pthread_mutex_lock(_mutex);
4182      assert_status(status == 0, status, "mutex_lock");
4183      AnyWaiters = _nParked ;
4184      assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
4185      if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
4186         AnyWaiters = 0 ;
4187         pthread_cond_signal (_cond);
4188      }
4189      status = pthread_mutex_unlock(_mutex);
4190      assert_status(status == 0, status, "mutex_unlock");
4191      if (AnyWaiters != 0) {
4192         status = pthread_cond_signal(_cond);
4193         assert_status(status == 0, status, "cond_signal");
4194      }
4195   }
4196 
4197   // Note that we signal() _after dropping the lock for "immortal" Events.
4198   // This is safe and avoids a common class of  futile wakeups.  In rare
4199   // circumstances this can cause a thread to return prematurely from
4200   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4201   // simply re-test the condition and re-park itself.
4202 }
4203 
4204 
4205 // JSR166
4206 // -------------------------------------------------------
4207 
4208 /*
4209  * The solaris and bsd implementations of park/unpark are fairly
4210  * conservative for now, but can be improved. They currently use a
4211  * mutex/condvar pair, plus a a count.
4212  * Park decrements count if > 0, else does a condvar wait.  Unpark
4213  * sets count to 1 and signals condvar.  Only one thread ever waits
4214  * on the condvar. Contention seen when trying to park implies that someone
4215  * is unparking you, so don't wait. And spurious returns are fine, so there


4260     if (secs >= MAX_SECS) {
4261       absTime->tv_sec = max_secs;
4262       absTime->tv_nsec = 0;
4263     }
4264     else {
4265       absTime->tv_sec = now.tv_sec + secs;
4266       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4267       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4268         absTime->tv_nsec -= NANOSECS_PER_SEC;
4269         ++absTime->tv_sec; // note: this must be <= max_secs
4270       }
4271     }
4272   }
4273   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4274   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4275   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4276   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4277 }
4278 
4279 void Parker::park(bool isAbsolute, jlong time) {



4280   // Optional fast-path check:
4281   // Return immediately if a permit is available.
4282   if (_counter > 0) {
4283       _counter = 0 ;
4284       OrderAccess::fence();
4285       return ;
4286   }
4287 
4288   Thread* thread = Thread::current();
4289   assert(thread->is_Java_thread(), "Must be JavaThread");
4290   JavaThread *jt = (JavaThread *)thread;
4291 
4292   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4293   // Check interrupt before trying to wait
4294   if (Thread::is_interrupted(thread, false)) {
4295     return;
4296   }
4297 
4298   // Next, demultiplex/decode time arguments
4299   struct timespec absTime;
4300   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
4301     return;
4302   }
4303   if (time > 0) {
4304     unpackTime(&absTime, isAbsolute, time);
4305   }
4306 
4307 
4308   // Enter safepoint region
4309   // Beware of deadlocks such as 6317397.
4310   // The per-thread Parker:: mutex is a classic leaf-lock.
4311   // In particular a thread must never block on the Threads_lock while
4312   // holding the Parker:: mutex.  If safepoints are pending both the
4313   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4314   ThreadBlockInVM tbivm(jt);
4315 
4316   // Don't wait if cannot get lock since interference arises from
4317   // unblocking.  Also. check interrupt before trying wait
4318   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4319     return;
4320   }
4321 
4322   int status ;
4323   if (_counter > 0)  { // no wait needed
4324     _counter = 0;
4325     status = pthread_mutex_unlock(_mutex);
4326     assert (status == 0, "invariant") ;


4327     OrderAccess::fence();
4328     return;
4329   }
4330 
4331 #ifdef ASSERT
4332   // Don't catch signals while blocked; let the running threads have the signals.
4333   // (This allows a debugger to break into the running thread.)
4334   sigset_t oldsigs;
4335   sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4336   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4337 #endif
4338 
4339   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4340   jt->set_suspend_equivalent();
4341   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4342 
4343   if (time == 0) {
4344     status = pthread_cond_wait (_cond, _mutex) ;
4345   } else {
4346     status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ;
4347     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4348       pthread_cond_destroy (_cond) ;
4349       pthread_cond_init    (_cond, NULL);
4350     }
4351   }
4352   assert_status(status == 0 || status == EINTR ||
4353                 status == ETIMEDOUT,
4354                 status, "cond_timedwait");
4355 
4356 #ifdef ASSERT
4357   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4358 #endif
4359 
4360   _counter = 0 ;
4361   status = pthread_mutex_unlock(_mutex) ;
4362   assert_status(status == 0, status, "invariant") ;




4363   // If externally suspended while waiting, re-suspend
4364   if (jt->handle_special_suspend_equivalent_condition()) {
4365     jt->java_suspend_self();
4366   }
4367 
4368   OrderAccess::fence();
4369 }
4370 
4371 void Parker::unpark() {
4372   int s, status ;
4373   status = pthread_mutex_lock(_mutex);
4374   assert (status == 0, "invariant") ;
4375   s = _counter;
4376   _counter = 1;
4377   if (s < 1) {
4378      if (WorkAroundNPTLTimedWaitHang) {
4379         status = pthread_cond_signal (_cond) ;
4380         assert (status == 0, "invariant") ;
4381         status = pthread_mutex_unlock(_mutex);
4382         assert (status == 0, "invariant") ;
4383      } else {
4384         status = pthread_mutex_unlock(_mutex);
4385         assert (status == 0, "invariant") ;
4386         status = pthread_cond_signal (_cond) ;
4387         assert (status == 0, "invariant") ;
4388      }




4074   for (;;) {
4075       v = _Event ;
4076       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4077   }
4078   guarantee (v >= 0, "invariant") ;
4079   if (v == 0) {
4080      // Do this the hard way by blocking ...
4081      int status = pthread_mutex_lock(_mutex);
4082      assert_status(status == 0, status, "mutex_lock");
4083      guarantee (_nParked == 0, "invariant") ;
4084      ++ _nParked ;
4085      while (_Event < 0) {
4086         status = pthread_cond_wait(_cond, _mutex);
4087         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4088         // Treat this the same as if the wait was interrupted
4089         if (status == ETIMEDOUT) { status = EINTR; }
4090         assert_status(status == 0 || status == EINTR, status, "cond_wait");
4091      }
4092      -- _nParked ;
4093 


4094     _Event = 0 ;
4095      status = pthread_mutex_unlock(_mutex);
4096      assert_status(status == 0, status, "mutex_unlock");
4097     // Paranoia to ensure our locked and lock-free paths interact
4098     // correctly with each other.
4099     OrderAccess::fence();
4100   }
4101   guarantee (_Event >= 0, "invariant") ;
4102 }
4103 
4104 int os::PlatformEvent::park(jlong millis) {
4105   guarantee (_nParked == 0, "invariant") ;
4106 
4107   int v ;
4108   for (;;) {
4109       v = _Event ;
4110       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4111   }
4112   guarantee (v >= 0, "invariant") ;
4113   if (v != 0) return OS_OK ;
4114 
4115   // We do this the hard way, by blocking the thread.
4116   // Consider enforcing a minimum timeout value.
4117   struct timespec abst;
4118   compute_abstime(&abst, millis);
4119 


4142     status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
4143     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4144       pthread_cond_destroy (_cond);
4145       pthread_cond_init (_cond, NULL) ;
4146     }
4147     assert_status(status == 0 || status == EINTR ||
4148                   status == ETIMEDOUT,
4149                   status, "cond_timedwait");
4150     if (!FilterSpuriousWakeups) break ;                 // previous semantics
4151     if (status == ETIMEDOUT) break ;
4152     // We consume and ignore EINTR and spurious wakeups.
4153   }
4154   --_nParked ;
4155   if (_Event >= 0) {
4156      ret = OS_OK;
4157   }
4158   _Event = 0 ;
4159   status = pthread_mutex_unlock(_mutex);
4160   assert_status(status == 0, status, "mutex_unlock");
4161   assert (_nParked == 0, "invariant") ;
4162   // Paranoia to ensure our locked and lock-free paths interact
4163   // correctly with each other.
4164   OrderAccess::fence();
4165   return ret;
4166 }
4167 
4168 void os::PlatformEvent::unpark() {
4169   // Transitions for _Event:
4170   //    0 :=> 1
4171   //    1 :=> 1
4172   //   -1 :=> either 0 or 1; must signal target thread
4173   //          That is, we can safely transition _Event from -1 to either
4174   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
4175   //          unpark() calls.
4176   // See also: "Semaphores in Plan 9" by Mullender & Cox
4177   //
4178   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4179   // that it will take two back-to-back park() calls for the owning
4180   // thread to block. This has the benefit of forcing a spurious return
4181   // from the first park() call after an unpark() call which will help
4182   // shake out uses of park() and unpark() without condition variables.
4183 
4184   if (Atomic::xchg(1, &_Event) >= 0) return;
4185 
4186   // Wait for the thread associated with the event to vacate
4187   int status = pthread_mutex_lock(_mutex);
4188   assert_status(status == 0, status, "mutex_lock");
4189   int AnyWaiters = _nParked;
4190   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
4191   if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
4192     AnyWaiters = 0;
4193     pthread_cond_signal(_cond);
4194   }
4195   status = pthread_mutex_unlock(_mutex);
4196   assert_status(status == 0, status, "mutex_unlock");
4197   if (AnyWaiters != 0) {
4198     status = pthread_cond_signal(_cond);
4199     assert_status(status == 0, status, "cond_signal");
4200   }

4201 
4202   // Note that we signal() _after dropping the lock for "immortal" Events.
4203   // This is safe and avoids a common class of  futile wakeups.  In rare
4204   // circumstances this can cause a thread to return prematurely from
4205   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4206   // simply re-test the condition and re-park itself.
4207 }
4208 
4209 
4210 // JSR166
4211 // -------------------------------------------------------
4212 
4213 /*
4214  * The solaris and bsd implementations of park/unpark are fairly
4215  * conservative for now, but can be improved. They currently use a
4216  * mutex/condvar pair, plus a a count.
4217  * Park decrements count if > 0, else does a condvar wait.  Unpark
4218  * sets count to 1 and signals condvar.  Only one thread ever waits
4219  * on the condvar. Contention seen when trying to park implies that someone
4220  * is unparking you, so don't wait. And spurious returns are fine, so there


4265     if (secs >= MAX_SECS) {
4266       absTime->tv_sec = max_secs;
4267       absTime->tv_nsec = 0;
4268     }
4269     else {
4270       absTime->tv_sec = now.tv_sec + secs;
4271       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4272       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4273         absTime->tv_nsec -= NANOSECS_PER_SEC;
4274         ++absTime->tv_sec; // note: this must be <= max_secs
4275       }
4276     }
4277   }
4278   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4279   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4280   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4281   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4282 }
4283 
4284 void Parker::park(bool isAbsolute, jlong time) {
4285   // Ideally we'd do something useful while spinning, such
4286   // as calling unpackTime().
4287 
4288   // Optional fast-path check:
4289   // Return immediately if a permit is available.
4290   // We depend on Atomic::xchg() having full barrier semantics
4291   // since we are doing a lock-free update to _counter.
4292   if (Atomic::xchg(0, &_counter) > 0) return;


4293 
4294   Thread* thread = Thread::current();
4295   assert(thread->is_Java_thread(), "Must be JavaThread");
4296   JavaThread *jt = (JavaThread *)thread;
4297 
4298   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4299   // Check interrupt before trying to wait
4300   if (Thread::is_interrupted(thread, false)) {
4301     return;
4302   }
4303 
4304   // Next, demultiplex/decode time arguments
4305   struct timespec absTime;
4306   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
4307     return;
4308   }
4309   if (time > 0) {
4310     unpackTime(&absTime, isAbsolute, time);
4311   }
4312 
4313 
4314   // Enter safepoint region
4315   // Beware of deadlocks such as 6317397.
4316   // The per-thread Parker:: mutex is a classic leaf-lock.
4317   // In particular a thread must never block on the Threads_lock while
4318   // holding the Parker:: mutex.  If safepoints are pending both the
4319   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4320   ThreadBlockInVM tbivm(jt);
4321 
4322   // Don't wait if cannot get lock since interference arises from
4323   // unblocking.  Also. check interrupt before trying wait
4324   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4325     return;
4326   }
4327 
4328   int status ;
4329   if (_counter > 0)  { // no wait needed
4330     _counter = 0;
4331     status = pthread_mutex_unlock(_mutex);
4332     assert (status == 0, "invariant") ;
4333     // Paranoia to ensure our locked and lock-free paths interact
4334     // correctly with each other and Java-level accesses. 
4335     OrderAccess::fence();
4336     return;
4337   }
4338 
4339 #ifdef ASSERT
4340   // Don't catch signals while blocked; let the running threads have the signals.
4341   // (This allows a debugger to break into the running thread.)
4342   sigset_t oldsigs;
4343   sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4344   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4345 #endif
4346 
4347   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4348   jt->set_suspend_equivalent();
4349   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4350 
4351   if (time == 0) {
4352     status = pthread_cond_wait (_cond, _mutex) ;
4353   } else {
4354     status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ;
4355     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4356       pthread_cond_destroy (_cond) ;
4357       pthread_cond_init    (_cond, NULL);
4358     }
4359   }
4360   assert_status(status == 0 || status == EINTR ||
4361                 status == ETIMEDOUT,
4362                 status, "cond_timedwait");
4363 
4364 #ifdef ASSERT
4365   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4366 #endif
4367 
4368   _counter = 0 ;
4369   status = pthread_mutex_unlock(_mutex) ;
4370   assert_status(status == 0, status, "invariant") ;
4371   // Paranoia to ensure our locked and lock-free paths interact
4372   // correctly with each other and Java-level accesses. 
4373   OrderAccess::fence();
4374 
4375   // If externally suspended while waiting, re-suspend
4376   if (jt->handle_special_suspend_equivalent_condition()) {
4377     jt->java_suspend_self();
4378   }


4379 }
4380 
4381 void Parker::unpark() {
4382   int s, status ;
4383   status = pthread_mutex_lock(_mutex);
4384   assert (status == 0, "invariant") ;
4385   s = _counter;
4386   _counter = 1;
4387   if (s < 1) {
4388      if (WorkAroundNPTLTimedWaitHang) {
4389         status = pthread_cond_signal (_cond) ;
4390         assert (status == 0, "invariant") ;
4391         status = pthread_mutex_unlock(_mutex);
4392         assert (status == 0, "invariant") ;
4393      } else {
4394         status = pthread_mutex_unlock(_mutex);
4395         assert (status == 0, "invariant") ;
4396         status = pthread_cond_signal (_cond) ;
4397         assert (status == 0, "invariant") ;
4398      }