< prev index next >

src/hotspot/os/solaris/os_solaris.cpp

Print this page




1007 intx os::current_thread_id() {
1008   return (intx)thr_self();
1009 }
1010 
1011 static pid_t _initial_pid = 0;
1012 
1013 int os::current_process_id() {
1014   return (int)(_initial_pid ? _initial_pid : getpid());
1015 }
1016 
1017 // gethrtime() should be monotonic according to the documentation,
1018 // but some virtualized platforms are known to break this guarantee.
1019 // getTimeNanos() must be guaranteed not to move backwards, so we
1020 // are forced to add a check here.
1021 inline hrtime_t getTimeNanos() {
1022   const hrtime_t now = gethrtime();
1023   const hrtime_t prev = max_hrtime;
1024   if (now <= prev) {
1025     return prev;   // same or retrograde time;
1026   }
1027   const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
1028   assert(obsv >= prev, "invariant");   // Monotonicity
1029   // If the CAS succeeded then we're done and return "now".
1030   // If the CAS failed and the observed value "obsv" is >= now then
1031   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1032   // some other thread raced this thread and installed a new value, in which case
1033   // we could either (a) retry the entire operation, (b) retry trying to install now
1034   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1035   // we might discard a higher "now" value in deference to a slightly lower but freshly
1036   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1037   // to (a) or (b) -- and greatly reduces coherence traffic.
1038   // We might also condition (c) on the magnitude of the delta between obsv and now.
1039   // Avoiding excessive CAS operations to hot RW locations is critical.
1040   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1041   return (prev == obsv) ? now : obsv;
1042 }
1043 
1044 // Time since start-up in seconds to a fine granularity.
1045 // Used by VMSelfDestructTimer and the MemProfiler.
1046 double os::elapsedTime() {
1047   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;


1967   // Initialize signal semaphore
1968   sig_sem = new Semaphore();
1969 }
1970 
1971 void os::signal_notify(int sig) {
1972   if (sig_sem != NULL) {
1973     Atomic::inc(&pending_signals[sig]);
1974     sig_sem->signal();
1975   } else {
1976     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1977     // initialization isn't called.
1978     assert(ReduceSignalUsage, "signal semaphore should be created");
1979   }
1980 }
1981 
1982 static int check_pending_signals() {
1983   int ret;
1984   while (true) {
1985     for (int i = 0; i < Sigexit + 1; i++) {
1986       jint n = pending_signals[i];
1987       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1988         return i;
1989       }
1990     }
1991     JavaThread *thread = JavaThread::current();
1992     ThreadBlockInVM tbivm(thread);
1993 
1994     bool threadIsSuspended;
1995     do {
1996       thread->set_suspend_equivalent();
1997       sig_sem->wait();
1998 
1999       // were we externally suspended while we were waiting?
2000       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2001       if (threadIsSuspended) {
2002         // The semaphore has been incremented, but while we were waiting
2003         // another thread suspended us. We don't want to continue running
2004         // while suspended because that would surprise the thread that
2005         // suspended us.
2006         sig_sem->signal();
2007 


4693     abstime->tv_sec += 1;
4694     usec -= 1000000;
4695   }
4696   abstime->tv_nsec = usec * 1000;
4697   return abstime;
4698 }
4699 
4700 void os::PlatformEvent::park() {           // AKA: down()
4701   // Transitions for _Event:
4702   //   -1 => -1 : illegal
4703   //    1 =>  0 : pass - return immediately
4704   //    0 => -1 : block; then set _Event to 0 before returning
4705 
4706   // Invariant: Only the thread associated with the Event/PlatformEvent
4707   // may call park().
4708   assert(_nParked == 0, "invariant");
4709 
4710   int v;
4711   for (;;) {
4712     v = _Event;
4713     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4714   }
4715   guarantee(v >= 0, "invariant");
4716   if (v == 0) {
4717     // Do this the hard way by blocking ...
4718     // See http://monaco.sfbay/detail.jsf?cr=5094058.
4719     int status = os::Solaris::mutex_lock(_mutex);
4720     assert_status(status == 0, status, "mutex_lock");
4721     guarantee(_nParked == 0, "invariant");
4722     ++_nParked;
4723     while (_Event < 0) {
4724       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4725       // Treat this the same as if the wait was interrupted
4726       // With usr/lib/lwp going to kernel, always handle ETIME
4727       status = os::Solaris::cond_wait(_cond, _mutex);
4728       if (status == ETIME) status = EINTR;
4729       assert_status(status == 0 || status == EINTR, status, "cond_wait");
4730     }
4731     --_nParked;
4732     _Event = 0;
4733     status = os::Solaris::mutex_unlock(_mutex);
4734     assert_status(status == 0, status, "mutex_unlock");
4735     // Paranoia to ensure our locked and lock-free paths interact
4736     // correctly with each other.
4737     OrderAccess::fence();
4738   }
4739 }
4740 
4741 int os::PlatformEvent::park(jlong millis) {
4742   // Transitions for _Event:
4743   //   -1 => -1 : illegal
4744   //    1 =>  0 : pass - return immediately
4745   //    0 => -1 : block; then set _Event to 0 before returning
4746 
4747   guarantee(_nParked == 0, "invariant");
4748   int v;
4749   for (;;) {
4750     v = _Event;
4751     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4752   }
4753   guarantee(v >= 0, "invariant");
4754   if (v != 0) return OS_OK;
4755 
4756   int ret = OS_TIMEOUT;
4757   timestruc_t abst;
4758   compute_abstime(&abst, millis);
4759 
4760   // See http://monaco.sfbay/detail.jsf?cr=5094058.
4761   int status = os::Solaris::mutex_lock(_mutex);
4762   assert_status(status == 0, status, "mutex_lock");
4763   guarantee(_nParked == 0, "invariant");
4764   ++_nParked;
4765   while (_Event < 0) {
4766     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
4767     assert_status(status == 0 || status == EINTR ||
4768                   status == ETIME || status == ETIMEDOUT,
4769                   status, "cond_timedwait");
4770     if (!FilterSpuriousWakeups) break;                // previous semantics
4771     if (status == ETIME || status == ETIMEDOUT) break;


4780   // correctly with each other.
4781   OrderAccess::fence();
4782   return ret;
4783 }
4784 
4785 void os::PlatformEvent::unpark() {
4786   // Transitions for _Event:
4787   //    0 => 1 : just return
4788   //    1 => 1 : just return
4789   //   -1 => either 0 or 1; must signal target thread
4790   //         That is, we can safely transition _Event from -1 to either
4791   //         0 or 1.
4792   // See also: "Semaphores in Plan 9" by Mullender & Cox
4793   //
4794   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4795   // that it will take two back-to-back park() calls for the owning
4796   // thread to block. This has the benefit of forcing a spurious return
4797   // from the first park() call after an unpark() call which will help
4798   // shake out uses of park() and unpark() without condition variables.
4799 
4800   if (Atomic::xchg(1, &_Event) >= 0) return;
4801 
4802   // If the thread associated with the event was parked, wake it.
4803   // Wait for the thread assoc with the PlatformEvent to vacate.
4804   int status = os::Solaris::mutex_lock(_mutex);
4805   assert_status(status == 0, status, "mutex_lock");
4806   int AnyWaiters = _nParked;
4807   status = os::Solaris::mutex_unlock(_mutex);
4808   assert_status(status == 0, status, "mutex_unlock");
4809   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
4810   if (AnyWaiters != 0) {
4811     // Note that we signal() *after* dropping the lock for "immortal" Events.
4812     // This is safe and avoids a common class of  futile wakeups.  In rare
4813     // circumstances this can cause a thread to return prematurely from
4814     // cond_{timed}wait() but the spurious wakeup is benign and the victim
4815     // will simply re-test the condition and re-park itself.
4816     // This provides particular benefit if the underlying platform does not
4817     // provide wait morphing.
4818     status = os::Solaris::cond_signal(_cond);
4819     assert_status(status == 0, status, "cond_signal");
4820   }


4879       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4880         absTime->tv_nsec -= NANOSECS_PER_SEC;
4881         ++absTime->tv_sec; // note: this must be <= max_secs
4882       }
4883     }
4884   }
4885   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4886   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4887   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4888   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4889 }
4890 
4891 void Parker::park(bool isAbsolute, jlong time) {
4892   // Ideally we'd do something useful while spinning, such
4893   // as calling unpackTime().
4894 
4895   // Optional fast-path check:
4896   // Return immediately if a permit is available.
4897   // We depend on Atomic::xchg() having full barrier semantics
4898   // since we are doing a lock-free update to _counter.
4899   if (Atomic::xchg(0, &_counter) > 0) return;
4900 
4901   // Optional fast-exit: Check interrupt before trying to wait
4902   Thread* thread = Thread::current();
4903   assert(thread->is_Java_thread(), "Must be JavaThread");
4904   JavaThread *jt = (JavaThread *)thread;
4905   if (jt->is_interrupted(false)) {
4906     return;
4907   }
4908 
4909   // First, demultiplex/decode time arguments
4910   timespec absTime;
4911   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4912     return;
4913   }
4914   if (time > 0) {
4915     // Warning: this code might be exposed to the old Solaris time
4916     // round-down bugs.  Grep "roundingFix" for details.
4917     unpackTime(&absTime, isAbsolute, time);
4918   }
4919 




1007 intx os::current_thread_id() {
1008   return (intx)thr_self();
1009 }
1010 
1011 static pid_t _initial_pid = 0;
1012 
1013 int os::current_process_id() {
1014   return (int)(_initial_pid ? _initial_pid : getpid());
1015 }
1016 
1017 // gethrtime() should be monotonic according to the documentation,
1018 // but some virtualized platforms are known to break this guarantee.
1019 // getTimeNanos() must be guaranteed not to move backwards, so we
1020 // are forced to add a check here.
1021 inline hrtime_t getTimeNanos() {
1022   const hrtime_t now = gethrtime();
1023   const hrtime_t prev = max_hrtime;
1024   if (now <= prev) {
1025     return prev;   // same or retrograde time;
1026   }
1027   const hrtime_t obsv = Atomic::cmpxchg(&max_hrtime, prev, now);
1028   assert(obsv >= prev, "invariant");   // Monotonicity
1029   // If the CAS succeeded then we're done and return "now".
1030   // If the CAS failed and the observed value "obsv" is >= now then
1031   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1032   // some other thread raced this thread and installed a new value, in which case
1033   // we could either (a) retry the entire operation, (b) retry trying to install now
1034   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1035   // we might discard a higher "now" value in deference to a slightly lower but freshly
1036   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1037   // to (a) or (b) -- and greatly reduces coherence traffic.
1038   // We might also condition (c) on the magnitude of the delta between obsv and now.
1039   // Avoiding excessive CAS operations to hot RW locations is critical.
1040   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1041   return (prev == obsv) ? now : obsv;
1042 }
1043 
1044 // Time since start-up in seconds to a fine granularity.
1045 // Used by VMSelfDestructTimer and the MemProfiler.
1046 double os::elapsedTime() {
1047   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;


1967   // Initialize signal semaphore
1968   sig_sem = new Semaphore();
1969 }
1970 
1971 void os::signal_notify(int sig) {
1972   if (sig_sem != NULL) {
1973     Atomic::inc(&pending_signals[sig]);
1974     sig_sem->signal();
1975   } else {
1976     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1977     // initialization isn't called.
1978     assert(ReduceSignalUsage, "signal semaphore should be created");
1979   }
1980 }
1981 
1982 static int check_pending_signals() {
1983   int ret;
1984   while (true) {
1985     for (int i = 0; i < Sigexit + 1; i++) {
1986       jint n = pending_signals[i];
1987       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
1988         return i;
1989       }
1990     }
1991     JavaThread *thread = JavaThread::current();
1992     ThreadBlockInVM tbivm(thread);
1993 
1994     bool threadIsSuspended;
1995     do {
1996       thread->set_suspend_equivalent();
1997       sig_sem->wait();
1998 
1999       // were we externally suspended while we were waiting?
2000       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2001       if (threadIsSuspended) {
2002         // The semaphore has been incremented, but while we were waiting
2003         // another thread suspended us. We don't want to continue running
2004         // while suspended because that would surprise the thread that
2005         // suspended us.
2006         sig_sem->signal();
2007 


4693     abstime->tv_sec += 1;
4694     usec -= 1000000;
4695   }
4696   abstime->tv_nsec = usec * 1000;
4697   return abstime;
4698 }
4699 
4700 void os::PlatformEvent::park() {           // AKA: down()
4701   // Transitions for _Event:
4702   //   -1 => -1 : illegal
4703   //    1 =>  0 : pass - return immediately
4704   //    0 => -1 : block; then set _Event to 0 before returning
4705 
4706   // Invariant: Only the thread associated with the Event/PlatformEvent
4707   // may call park().
4708   assert(_nParked == 0, "invariant");
4709 
4710   int v;
4711   for (;;) {
4712     v = _Event;
4713     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
4714   }
4715   guarantee(v >= 0, "invariant");
4716   if (v == 0) {
4717     // Do this the hard way by blocking ...
4718     // See http://monaco.sfbay/detail.jsf?cr=5094058.
4719     int status = os::Solaris::mutex_lock(_mutex);
4720     assert_status(status == 0, status, "mutex_lock");
4721     guarantee(_nParked == 0, "invariant");
4722     ++_nParked;
4723     while (_Event < 0) {
4724       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4725       // Treat this the same as if the wait was interrupted
4726       // With usr/lib/lwp going to kernel, always handle ETIME
4727       status = os::Solaris::cond_wait(_cond, _mutex);
4728       if (status == ETIME) status = EINTR;
4729       assert_status(status == 0 || status == EINTR, status, "cond_wait");
4730     }
4731     --_nParked;
4732     _Event = 0;
4733     status = os::Solaris::mutex_unlock(_mutex);
4734     assert_status(status == 0, status, "mutex_unlock");
4735     // Paranoia to ensure our locked and lock-free paths interact
4736     // correctly with each other.
4737     OrderAccess::fence();
4738   }
4739 }
4740 
4741 int os::PlatformEvent::park(jlong millis) {
4742   // Transitions for _Event:
4743   //   -1 => -1 : illegal
4744   //    1 =>  0 : pass - return immediately
4745   //    0 => -1 : block; then set _Event to 0 before returning
4746 
4747   guarantee(_nParked == 0, "invariant");
4748   int v;
4749   for (;;) {
4750     v = _Event;
4751     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
4752   }
4753   guarantee(v >= 0, "invariant");
4754   if (v != 0) return OS_OK;
4755 
4756   int ret = OS_TIMEOUT;
4757   timestruc_t abst;
4758   compute_abstime(&abst, millis);
4759 
4760   // See http://monaco.sfbay/detail.jsf?cr=5094058.
4761   int status = os::Solaris::mutex_lock(_mutex);
4762   assert_status(status == 0, status, "mutex_lock");
4763   guarantee(_nParked == 0, "invariant");
4764   ++_nParked;
4765   while (_Event < 0) {
4766     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
4767     assert_status(status == 0 || status == EINTR ||
4768                   status == ETIME || status == ETIMEDOUT,
4769                   status, "cond_timedwait");
4770     if (!FilterSpuriousWakeups) break;                // previous semantics
4771     if (status == ETIME || status == ETIMEDOUT) break;


4780   // correctly with each other.
4781   OrderAccess::fence();
4782   return ret;
4783 }
4784 
4785 void os::PlatformEvent::unpark() {
4786   // Transitions for _Event:
4787   //    0 => 1 : just return
4788   //    1 => 1 : just return
4789   //   -1 => either 0 or 1; must signal target thread
4790   //         That is, we can safely transition _Event from -1 to either
4791   //         0 or 1.
4792   // See also: "Semaphores in Plan 9" by Mullender & Cox
4793   //
4794   // Note: Forcing a transition from "-1" to "1" on an unpark() means
4795   // that it will take two back-to-back park() calls for the owning
4796   // thread to block. This has the benefit of forcing a spurious return
4797   // from the first park() call after an unpark() call which will help
4798   // shake out uses of park() and unpark() without condition variables.
4799 
4800   if (Atomic::xchg(&_Event, 1) >= 0) return;
4801 
4802   // If the thread associated with the event was parked, wake it.
4803   // Wait for the thread assoc with the PlatformEvent to vacate.
4804   int status = os::Solaris::mutex_lock(_mutex);
4805   assert_status(status == 0, status, "mutex_lock");
4806   int AnyWaiters = _nParked;
4807   status = os::Solaris::mutex_unlock(_mutex);
4808   assert_status(status == 0, status, "mutex_unlock");
4809   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
4810   if (AnyWaiters != 0) {
4811     // Note that we signal() *after* dropping the lock for "immortal" Events.
4812     // This is safe and avoids a common class of  futile wakeups.  In rare
4813     // circumstances this can cause a thread to return prematurely from
4814     // cond_{timed}wait() but the spurious wakeup is benign and the victim
4815     // will simply re-test the condition and re-park itself.
4816     // This provides particular benefit if the underlying platform does not
4817     // provide wait morphing.
4818     status = os::Solaris::cond_signal(_cond);
4819     assert_status(status == 0, status, "cond_signal");
4820   }


4879       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4880         absTime->tv_nsec -= NANOSECS_PER_SEC;
4881         ++absTime->tv_sec; // note: this must be <= max_secs
4882       }
4883     }
4884   }
4885   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4886   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4887   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4888   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4889 }
4890 
4891 void Parker::park(bool isAbsolute, jlong time) {
4892   // Ideally we'd do something useful while spinning, such
4893   // as calling unpackTime().
4894 
4895   // Optional fast-path check:
4896   // Return immediately if a permit is available.
4897   // We depend on Atomic::xchg() having full barrier semantics
4898   // since we are doing a lock-free update to _counter.
4899   if (Atomic::xchg(&_counter, 0) > 0) return;
4900 
4901   // Optional fast-exit: Check interrupt before trying to wait
4902   Thread* thread = Thread::current();
4903   assert(thread->is_Java_thread(), "Must be JavaThread");
4904   JavaThread *jt = (JavaThread *)thread;
4905   if (jt->is_interrupted(false)) {
4906     return;
4907   }
4908 
4909   // First, demultiplex/decode time arguments
4910   timespec absTime;
4911   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4912     return;
4913   }
4914   if (time > 0) {
4915     // Warning: this code might be exposed to the old Solaris time
4916     // round-down bugs.  Grep "roundingFix" for details.
4917     unpackTime(&absTime, isAbsolute, time);
4918   }
4919 


< prev index next >