src/os/linux/vm/os_linux.cpp

Print this page




4984   for (;;) {
4985       v = _Event ;
4986       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4987   }
4988   guarantee (v >= 0, "invariant") ;
4989   if (v == 0) {
4990      // Do this the hard way by blocking ...
4991      int status = pthread_mutex_lock(_mutex);
4992      assert_status(status == 0, status, "mutex_lock");
4993      guarantee (_nParked == 0, "invariant") ;
4994      ++ _nParked ;
4995      while (_Event < 0) {
4996         status = pthread_cond_wait(_cond, _mutex);
4997         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4998         // Treat this the same as if the wait was interrupted
4999         if (status == ETIME) { status = EINTR; }
5000         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5001      }
5002      -- _nParked ;
5003 
5004     // In theory we could move the ST of 0 into _Event past the unlock(),
5005     // but then we'd need a MEMBAR after the ST.
5006     _Event = 0 ;
5007      status = pthread_mutex_unlock(_mutex);
5008      assert_status(status == 0, status, "mutex_unlock");



5009   }
5010   guarantee (_Event >= 0, "invariant") ;
5011 }
5012 
5013 int os::PlatformEvent::park(jlong millis) {
5014   guarantee (_nParked == 0, "invariant") ;
5015 
5016   int v ;
5017   for (;;) {
5018       v = _Event ;
5019       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5020   }
5021   guarantee (v >= 0, "invariant") ;
5022   if (v != 0) return OS_OK ;
5023 
5024   // We do this the hard way, by blocking the thread.
5025   // Consider enforcing a minimum timeout value.
5026   struct timespec abst;
5027   compute_abstime(&abst, millis);
5028 


5051     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
5052     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5053       pthread_cond_destroy (_cond);
5054       pthread_cond_init (_cond, NULL) ;
5055     }
5056     assert_status(status == 0 || status == EINTR ||
5057                   status == ETIME || status == ETIMEDOUT,
5058                   status, "cond_timedwait");
5059     if (!FilterSpuriousWakeups) break ;                 // previous semantics
5060     if (status == ETIME || status == ETIMEDOUT) break ;
5061     // We consume and ignore EINTR and spurious wakeups.
5062   }
5063   --_nParked ;
5064   if (_Event >= 0) {
5065      ret = OS_OK;
5066   }
5067   _Event = 0 ;
5068   status = pthread_mutex_unlock(_mutex);
5069   assert_status(status == 0, status, "mutex_unlock");
5070   assert (_nParked == 0, "invariant") ;



5071   return ret;
5072 }
5073 
5074 void os::PlatformEvent::unpark() {
5075   int v, AnyWaiters ;
5076   for (;;) {
5077       v = _Event ;
5078       if (v > 0) {
5079          // The LD of _Event could have reordered or be satisfied
5080          // by a read-aside from this processor's write buffer.
5081          // To avoid problems execute a barrier and then
5082          // ratify the value.
5083          OrderAccess::fence() ;
5084          if (_Event == v) return ;
5085          continue ;
5086       }
5087       if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
5088   }
5089   if (v < 0) {


5090      // Wait for the thread associated with the event to vacate
5091      int status = pthread_mutex_lock(_mutex);
5092      assert_status(status == 0, status, "mutex_lock");
5093      AnyWaiters = _nParked ;
5094      assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
5095      if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
5096         AnyWaiters = 0 ;
5097         pthread_cond_signal (_cond);
5098      }
5099      status = pthread_mutex_unlock(_mutex);
5100      assert_status(status == 0, status, "mutex_unlock");
5101      if (AnyWaiters != 0) {
5102         status = pthread_cond_signal(_cond);
5103         assert_status(status == 0, status, "cond_signal");
5104      }
5105   }
5106 
5107   // Note that we signal() _after dropping the lock for "immortal" Events.
5108   // This is safe and avoids a common class of  futile wakeups.  In rare
5109   // circumstances this can cause a thread to return prematurely from
5110   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
5111   // simply re-test the condition and re-park itself.
5112 }
5113 
5114 
5115 // JSR166
5116 // -------------------------------------------------------
5117 
5118 /*
5119  * The solaris and linux implementations of park/unpark are fairly
5120  * conservative for now, but can be improved. They currently use a
5121  * mutex/condvar pair, plus a a count.
5122  * Park decrements count if > 0, else does a condvar wait.  Unpark
5123  * sets count to 1 and signals condvar.  Only one thread ever waits
5124  * on the condvar. Contention seen when trying to park implies that someone
5125  * is unparking you, so don't wait. And spurious returns are fine, so there


5170     if (secs >= MAX_SECS) {
5171       absTime->tv_sec = max_secs;
5172       absTime->tv_nsec = 0;
5173     }
5174     else {
5175       absTime->tv_sec = now.tv_sec + secs;
5176       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5177       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5178         absTime->tv_nsec -= NANOSECS_PER_SEC;
5179         ++absTime->tv_sec; // note: this must be <= max_secs
5180       }
5181     }
5182   }
5183   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5184   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5185   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5186   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5187 }
5188 
5189 void Parker::park(bool isAbsolute, jlong time) {



5190   // Optional fast-path check:
5191   // Return immediately if a permit is available.
5192   if (_counter > 0) {
5193       _counter = 0 ;
5194       OrderAccess::fence();
5195       return ;
5196   }
5197 
5198   Thread* thread = Thread::current();
5199   assert(thread->is_Java_thread(), "Must be JavaThread");
5200   JavaThread *jt = (JavaThread *)thread;
5201 
5202   // Optional optimization -- avoid state transitions if there's an interrupt pending.
5203   // Check interrupt before trying to wait
5204   if (Thread::is_interrupted(thread, false)) {
5205     return;
5206   }
5207 
5208   // Next, demultiplex/decode time arguments
5209   timespec absTime;
5210   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5211     return;
5212   }
5213   if (time > 0) {
5214     unpackTime(&absTime, isAbsolute, time);
5215   }
5216 
5217 
5218   // Enter safepoint region
5219   // Beware of deadlocks such as 6317397.
5220   // The per-thread Parker:: mutex is a classic leaf-lock.
5221   // In particular a thread must never block on the Threads_lock while
5222   // holding the Parker:: mutex.  If safepoints are pending both the
5223   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5224   ThreadBlockInVM tbivm(jt);
5225 
5226   // Don't wait if cannot get lock since interference arises from
5227   // unblocking.  Also. check interrupt before trying wait
5228   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
5229     return;
5230   }
5231 
5232   int status ;
5233   if (_counter > 0)  { // no wait needed
5234     _counter = 0;
5235     status = pthread_mutex_unlock(_mutex);
5236     assert (status == 0, "invariant") ;


5237     OrderAccess::fence();
5238     return;
5239   }
5240 
5241 #ifdef ASSERT
5242   // Don't catch signals while blocked; let the running threads have the signals.
5243   // (This allows a debugger to break into the running thread.)
5244   sigset_t oldsigs;
5245   sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
5246   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5247 #endif
5248 
5249   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5250   jt->set_suspend_equivalent();
5251   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5252 
5253   if (time == 0) {
5254     status = pthread_cond_wait (_cond, _mutex) ;
5255   } else {
5256     status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
5257     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5258       pthread_cond_destroy (_cond) ;
5259       pthread_cond_init    (_cond, NULL);
5260     }
5261   }
5262   assert_status(status == 0 || status == EINTR ||
5263                 status == ETIME || status == ETIMEDOUT,
5264                 status, "cond_timedwait");
5265 
5266 #ifdef ASSERT
5267   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5268 #endif
5269 
5270   _counter = 0 ;
5271   status = pthread_mutex_unlock(_mutex) ;
5272   assert_status(status == 0, status, "invariant") ;




5273   // If externally suspended while waiting, re-suspend
5274   if (jt->handle_special_suspend_equivalent_condition()) {
5275     jt->java_suspend_self();
5276   }
5277 
5278   OrderAccess::fence();
5279 }
5280 
5281 void Parker::unpark() {
5282   int s, status ;
5283   status = pthread_mutex_lock(_mutex);
5284   assert (status == 0, "invariant") ;
5285   s = _counter;
5286   _counter = 1;
5287   if (s < 1) {
5288      if (WorkAroundNPTLTimedWaitHang) {
5289         status = pthread_cond_signal (_cond) ;
5290         assert (status == 0, "invariant") ;
5291         status = pthread_mutex_unlock(_mutex);
5292         assert (status == 0, "invariant") ;
5293      } else {
5294         status = pthread_mutex_unlock(_mutex);
5295         assert (status == 0, "invariant") ;
5296         status = pthread_cond_signal (_cond) ;
5297         assert (status == 0, "invariant") ;
5298      }




4984   for (;;) {
4985       v = _Event ;
4986       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4987   }
4988   guarantee (v >= 0, "invariant") ;
4989   if (v == 0) {
4990      // Do this the hard way by blocking ...
4991      int status = pthread_mutex_lock(_mutex);
4992      assert_status(status == 0, status, "mutex_lock");
4993      guarantee (_nParked == 0, "invariant") ;
4994      ++ _nParked ;
4995      while (_Event < 0) {
4996         status = pthread_cond_wait(_cond, _mutex);
4997         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4998         // Treat this the same as if the wait was interrupted
4999         if (status == ETIME) { status = EINTR; }
5000         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5001      }
5002      -- _nParked ;
5003 


5004     _Event = 0 ;
5005      status = pthread_mutex_unlock(_mutex);
5006      assert_status(status == 0, status, "mutex_unlock");
5007     // Paranoia to ensure our locked and lock-free paths interact
5008     // correctly with each other.
5009     OrderAccess::fence();
5010   }
5011   guarantee (_Event >= 0, "invariant") ;
5012 }
5013 
5014 int os::PlatformEvent::park(jlong millis) {
5015   guarantee (_nParked == 0, "invariant") ;
5016 
5017   int v ;
5018   for (;;) {
5019       v = _Event ;
5020       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5021   }
5022   guarantee (v >= 0, "invariant") ;
5023   if (v != 0) return OS_OK ;
5024 
5025   // We do this the hard way, by blocking the thread.
5026   // Consider enforcing a minimum timeout value.
5027   struct timespec abst;
5028   compute_abstime(&abst, millis);
5029 


5052     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
5053     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5054       pthread_cond_destroy (_cond);
5055       pthread_cond_init (_cond, NULL) ;
5056     }
5057     assert_status(status == 0 || status == EINTR ||
5058                   status == ETIME || status == ETIMEDOUT,
5059                   status, "cond_timedwait");
5060     if (!FilterSpuriousWakeups) break ;                 // previous semantics
5061     if (status == ETIME || status == ETIMEDOUT) break ;
5062     // We consume and ignore EINTR and spurious wakeups.
5063   }
5064   --_nParked ;
5065   if (_Event >= 0) {
5066      ret = OS_OK;
5067   }
5068   _Event = 0 ;
5069   status = pthread_mutex_unlock(_mutex);
5070   assert_status(status == 0, status, "mutex_unlock");
5071   assert (_nParked == 0, "invariant") ;
5072   // Paranoia to ensure our locked and lock-free paths interact
5073   // correctly with each other.
5074   OrderAccess::fence();
5075   return ret;
5076 }
5077 
5078 void os::PlatformEvent::unpark() {
5079   // Transitions for _Event:
5080   //    0 :=> 1
5081   //    1 :=> 1
5082   //   -1 :=> either 0 or 1; must signal target thread
5083   //          That is, we can safely transition _Event from -1 to either
5084   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5085   //          unpark() calls.
5086   // See also: "Semaphores in Plan 9" by Mullender & Cox
5087   //
5088   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5089   // that it will take two back-to-back park() calls for the owning
5090   // thread to block. This has the benefit of forcing a spurious return
5091   // from the first park() call after an unpark() call which will help
5092   // shake out uses of park() and unpark() without condition variables.
5093 
5094   if (Atomic::xchg(1, &_Event) >= 0) return;
5095 
5096   // Wait for the thread associated with the event to vacate
5097   int status = pthread_mutex_lock(_mutex);
5098   assert_status(status == 0, status, "mutex_lock");
5099   int AnyWaiters = _nParked;
5100   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5101   if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
5102     AnyWaiters = 0;
5103     pthread_cond_signal(_cond);
5104   }
5105   status = pthread_mutex_unlock(_mutex);
5106   assert_status(status == 0, status, "mutex_unlock");
5107   if (AnyWaiters != 0) {
5108     status = pthread_cond_signal(_cond);
5109     assert_status(status == 0, status, "cond_signal");
5110   }

5111 
5112   // Note that we signal() _after dropping the lock for "immortal" Events.
5113   // This is safe and avoids a common class of  futile wakeups.  In rare
5114   // circumstances this can cause a thread to return prematurely from
5115   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
5116   // simply re-test the condition and re-park itself.
5117 }
5118 
5119 
5120 // JSR166
5121 // -------------------------------------------------------
5122 
5123 /*
5124  * The solaris and linux implementations of park/unpark are fairly
5125  * conservative for now, but can be improved. They currently use a
5126  * mutex/condvar pair, plus a a count.
5127  * Park decrements count if > 0, else does a condvar wait.  Unpark
5128  * sets count to 1 and signals condvar.  Only one thread ever waits
5129  * on the condvar. Contention seen when trying to park implies that someone
5130  * is unparking you, so don't wait. And spurious returns are fine, so there


5175     if (secs >= MAX_SECS) {
5176       absTime->tv_sec = max_secs;
5177       absTime->tv_nsec = 0;
5178     }
5179     else {
5180       absTime->tv_sec = now.tv_sec + secs;
5181       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5182       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5183         absTime->tv_nsec -= NANOSECS_PER_SEC;
5184         ++absTime->tv_sec; // note: this must be <= max_secs
5185       }
5186     }
5187   }
5188   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5189   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5190   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5191   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5192 }
5193 
5194 void Parker::park(bool isAbsolute, jlong time) {
5195   // Ideally we'd do something useful while spinning, such
5196   // as calling unpackTime().
5197 
5198   // Optional fast-path check:
5199   // Return immediately if a permit is available.
5200   // We depend on Atomic::xchg() having full barrier semantics
5201   // since we are doing a lock-free update to _counter.
5202   if (Atomic::xchg(0, &_counter) > 0) return;


5203 
5204   Thread* thread = Thread::current();
5205   assert(thread->is_Java_thread(), "Must be JavaThread");
5206   JavaThread *jt = (JavaThread *)thread;
5207 
5208   // Optional optimization -- avoid state transitions if there's an interrupt pending.
5209   // Check interrupt before trying to wait
5210   if (Thread::is_interrupted(thread, false)) {
5211     return;
5212   }
5213 
5214   // Next, demultiplex/decode time arguments
5215   timespec absTime;
5216   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5217     return;
5218   }
5219   if (time > 0) {
5220     unpackTime(&absTime, isAbsolute, time);
5221   }
5222 
5223 
5224   // Enter safepoint region
5225   // Beware of deadlocks such as 6317397.
5226   // The per-thread Parker:: mutex is a classic leaf-lock.
5227   // In particular a thread must never block on the Threads_lock while
5228   // holding the Parker:: mutex.  If safepoints are pending both the
5229   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5230   ThreadBlockInVM tbivm(jt);
5231 
5232   // Don't wait if cannot get lock since interference arises from
5233   // unblocking.  Also. check interrupt before trying wait
5234   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
5235     return;
5236   }
5237 
5238   int status ;
5239   if (_counter > 0)  { // no wait needed
5240     _counter = 0;
5241     status = pthread_mutex_unlock(_mutex);
5242     assert (status == 0, "invariant") ;
5243     // Paranoia to ensure our locked and lock-free paths interact
5244     // correctly with each other and Java-level accesses. 
5245     OrderAccess::fence();
5246     return;
5247   }
5248 
5249 #ifdef ASSERT
5250   // Don't catch signals while blocked; let the running threads have the signals.
5251   // (This allows a debugger to break into the running thread.)
5252   sigset_t oldsigs;
5253   sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
5254   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5255 #endif
5256 
5257   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5258   jt->set_suspend_equivalent();
5259   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5260 
5261   if (time == 0) {
5262     status = pthread_cond_wait (_cond, _mutex) ;
5263   } else {
5264     status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
5265     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5266       pthread_cond_destroy (_cond) ;
5267       pthread_cond_init    (_cond, NULL);
5268     }
5269   }
5270   assert_status(status == 0 || status == EINTR ||
5271                 status == ETIME || status == ETIMEDOUT,
5272                 status, "cond_timedwait");
5273 
5274 #ifdef ASSERT
5275   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5276 #endif
5277 
5278   _counter = 0 ;
5279   status = pthread_mutex_unlock(_mutex) ;
5280   assert_status(status == 0, status, "invariant") ;
5281   // Paranoia to ensure our locked and lock-free paths interact
5282   // correctly with each other and Java-level accesses. 
5283   OrderAccess::fence();
5284 
5285   // If externally suspended while waiting, re-suspend
5286   if (jt->handle_special_suspend_equivalent_condition()) {
5287     jt->java_suspend_self();
5288   }


5289 }
5290 
5291 void Parker::unpark() {
5292   int s, status ;
5293   status = pthread_mutex_lock(_mutex);
5294   assert (status == 0, "invariant") ;
5295   s = _counter;
5296   _counter = 1;
5297   if (s < 1) {
5298      if (WorkAroundNPTLTimedWaitHang) {
5299         status = pthread_cond_signal (_cond) ;
5300         assert (status == 0, "invariant") ;
5301         status = pthread_mutex_unlock(_mutex);
5302         assert (status == 0, "invariant") ;
5303      } else {
5304         status = pthread_mutex_unlock(_mutex);
5305         assert (status == 0, "invariant") ;
5306         status = pthread_cond_signal (_cond) ;
5307         assert (status == 0, "invariant") ;
5308      }