< prev index next >

src/hotspot/os/posix/os_posix.cpp

Print this page




1883   assert_status(status == 0, status, "mutex_init");
1884   _event   = 0;
1885   _nParked = 0;
1886 }
1887 
1888 void os::PlatformEvent::park() {       // AKA "down()"
1889   // Transitions for _event:
1890   //   -1 => -1 : illegal
1891   //    1 =>  0 : pass - return immediately
1892   //    0 => -1 : block; then set _event to 0 before returning
1893 
1894   // Invariant: Only the thread associated with the PlatformEvent
1895   // may call park().
1896   assert(_nParked == 0, "invariant");
1897 
1898   int v;
1899 
1900   // atomically decrement _event
1901   for (;;) {
1902     v = _event;
1903     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1904   }
1905   guarantee(v >= 0, "invariant");
1906 
1907   if (v == 0) { // Do this the hard way by blocking ...
1908     int status = pthread_mutex_lock(_mutex);
1909     assert_status(status == 0, status, "mutex_lock");
1910     guarantee(_nParked == 0, "invariant");
1911     ++_nParked;
1912     while (_event < 0) {
1913       // OS-level "spurious wakeups" are ignored
1914       status = pthread_cond_wait(_cond, _mutex);
1915       assert_status(status == 0, status, "cond_wait");
1916     }
1917     --_nParked;
1918 
1919     _event = 0;
1920     status = pthread_mutex_unlock(_mutex);
1921     assert_status(status == 0, status, "mutex_unlock");
1922     // Paranoia to ensure our locked and lock-free paths interact
1923     // correctly with each other.
1924     OrderAccess::fence();
1925   }
1926   guarantee(_event >= 0, "invariant");
1927 }
1928 
1929 int os::PlatformEvent::park(jlong millis) {
1930   // Transitions for _event:
1931   //   -1 => -1 : illegal
1932   //    1 =>  0 : pass - return immediately
1933   //    0 => -1 : block; then set _event to 0 before returning
1934 
1935   // Invariant: Only the thread associated with the Event/PlatformEvent
1936   // may call park().
1937   assert(_nParked == 0, "invariant");
1938 
1939   int v;
1940   // atomically decrement _event
1941   for (;;) {
1942     v = _event;
1943     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1944   }
1945   guarantee(v >= 0, "invariant");
1946 
1947   if (v == 0) { // Do this the hard way by blocking ...
1948     struct timespec abst;
1949     to_abstime(&abst, millis_to_nanos(millis), false, false);
1950 
1951     int ret = OS_TIMEOUT;
1952     int status = pthread_mutex_lock(_mutex);
1953     assert_status(status == 0, status, "mutex_lock");
1954     guarantee(_nParked == 0, "invariant");
1955     ++_nParked;
1956 
1957     while (_event < 0) {
1958       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1959       assert_status(status == 0 || status == ETIMEDOUT,
1960                     status, "cond_timedwait");
1961       // OS-level "spurious wakeups" are ignored unless the archaic
1962       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1963       if (!FilterSpuriousWakeups) break;


1981 }
1982 
1983 void os::PlatformEvent::unpark() {
1984   // Transitions for _event:
1985   //    0 => 1 : just return
1986   //    1 => 1 : just return
1987   //   -1 => either 0 or 1; must signal target thread
1988   //         That is, we can safely transition _event from -1 to either
1989   //         0 or 1.
1990   // See also: "Semaphores in Plan 9" by Mullender & Cox
1991   //
1992   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1993   // that it will take two back-to-back park() calls for the owning
1994   // thread to block. This has the benefit of forcing a spurious return
1995   // from the first park() call after an unpark() call which will help
1996   // shake out uses of park() and unpark() without checking state conditions
1997   // properly. This spurious return doesn't manifest itself in any user code
1998   // but only in the correctly written condition checking loops of ObjectMonitor,
1999   // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
2000 
2001   if (Atomic::xchg(1, &_event) >= 0) return;
2002 
2003   int status = pthread_mutex_lock(_mutex);
2004   assert_status(status == 0, status, "mutex_lock");
2005   int anyWaiters = _nParked;
2006   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2007   status = pthread_mutex_unlock(_mutex);
2008   assert_status(status == 0, status, "mutex_unlock");
2009 
2010   // Note that we signal() *after* dropping the lock for "immortal" Events.
2011   // This is safe and avoids a common class of futile wakeups.  In rare
2012   // circumstances this can cause a thread to return prematurely from
2013   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2014   // will simply re-test the condition and re-park itself.
2015   // This provides particular benefit if the underlying platform does not
2016   // provide wait morphing.
2017 
2018   if (anyWaiters != 0) {
2019     status = pthread_cond_signal(_cond);
2020     assert_status(status == 0, status, "cond_signal");
2021   }


2029   assert_status(status == 0, status, "cond_init rel");
2030   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
2031   assert_status(status == 0, status, "cond_init abs");
2032   status = pthread_mutex_init(_mutex, _mutexAttr);
2033   assert_status(status == 0, status, "mutex_init");
2034   _cur_index = -1; // mark as unused
2035 }
2036 
2037 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
2038 // sets count to 1 and signals condvar.  Only one thread ever waits
2039 // on the condvar. Contention seen when trying to park implies that someone
2040 // is unparking you, so don't wait. And spurious returns are fine, so there
2041 // is no need to track notifications.
2042 
2043 void Parker::park(bool isAbsolute, jlong time) {
2044 
2045   // Optional fast-path check:
2046   // Return immediately if a permit is available.
2047   // We depend on Atomic::xchg() having full barrier semantics
2048   // since we are doing a lock-free update to _counter.
2049   if (Atomic::xchg(0, &_counter) > 0) return;
2050 
2051   Thread* thread = Thread::current();
2052   assert(thread->is_Java_thread(), "Must be JavaThread");
2053   JavaThread *jt = (JavaThread *)thread;
2054 
2055   // Optional optimization -- avoid state transitions if there's
2056   // an interrupt pending.
2057   if (jt->is_interrupted(false)) {
2058     return;
2059   }
2060 
2061   // Next, demultiplex/decode time arguments
2062   struct timespec absTime;
2063   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
2064     return;
2065   }
2066   if (time > 0) {
2067     to_abstime(&absTime, time, isAbsolute, false);
2068   }
2069 




1883   assert_status(status == 0, status, "mutex_init");
1884   _event   = 0;
1885   _nParked = 0;
1886 }
1887 
1888 void os::PlatformEvent::park() {       // AKA "down()"
1889   // Transitions for _event:
1890   //   -1 => -1 : illegal
1891   //    1 =>  0 : pass - return immediately
1892   //    0 => -1 : block; then set _event to 0 before returning
1893 
1894   // Invariant: Only the thread associated with the PlatformEvent
1895   // may call park().
1896   assert(_nParked == 0, "invariant");
1897 
1898   int v;
1899 
1900   // atomically decrement _event
1901   for (;;) {
1902     v = _event;
1903     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1904   }
1905   guarantee(v >= 0, "invariant");
1906 
1907   if (v == 0) { // Do this the hard way by blocking ...
1908     int status = pthread_mutex_lock(_mutex);
1909     assert_status(status == 0, status, "mutex_lock");
1910     guarantee(_nParked == 0, "invariant");
1911     ++_nParked;
1912     while (_event < 0) {
1913       // OS-level "spurious wakeups" are ignored
1914       status = pthread_cond_wait(_cond, _mutex);
1915       assert_status(status == 0, status, "cond_wait");
1916     }
1917     --_nParked;
1918 
1919     _event = 0;
1920     status = pthread_mutex_unlock(_mutex);
1921     assert_status(status == 0, status, "mutex_unlock");
1922     // Paranoia to ensure our locked and lock-free paths interact
1923     // correctly with each other.
1924     OrderAccess::fence();
1925   }
1926   guarantee(_event >= 0, "invariant");
1927 }
1928 
1929 int os::PlatformEvent::park(jlong millis) {
1930   // Transitions for _event:
1931   //   -1 => -1 : illegal
1932   //    1 =>  0 : pass - return immediately
1933   //    0 => -1 : block; then set _event to 0 before returning
1934 
1935   // Invariant: Only the thread associated with the Event/PlatformEvent
1936   // may call park().
1937   assert(_nParked == 0, "invariant");
1938 
1939   int v;
1940   // atomically decrement _event
1941   for (;;) {
1942     v = _event;
1943     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1944   }
1945   guarantee(v >= 0, "invariant");
1946 
1947   if (v == 0) { // Do this the hard way by blocking ...
1948     struct timespec abst;
1949     to_abstime(&abst, millis_to_nanos(millis), false, false);
1950 
1951     int ret = OS_TIMEOUT;
1952     int status = pthread_mutex_lock(_mutex);
1953     assert_status(status == 0, status, "mutex_lock");
1954     guarantee(_nParked == 0, "invariant");
1955     ++_nParked;
1956 
1957     while (_event < 0) {
1958       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1959       assert_status(status == 0 || status == ETIMEDOUT,
1960                     status, "cond_timedwait");
1961       // OS-level "spurious wakeups" are ignored unless the archaic
1962       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1963       if (!FilterSpuriousWakeups) break;


1981 }
1982 
1983 void os::PlatformEvent::unpark() {
1984   // Transitions for _event:
1985   //    0 => 1 : just return
1986   //    1 => 1 : just return
1987   //   -1 => either 0 or 1; must signal target thread
1988   //         That is, we can safely transition _event from -1 to either
1989   //         0 or 1.
1990   // See also: "Semaphores in Plan 9" by Mullender & Cox
1991   //
1992   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1993   // that it will take two back-to-back park() calls for the owning
1994   // thread to block. This has the benefit of forcing a spurious return
1995   // from the first park() call after an unpark() call which will help
1996   // shake out uses of park() and unpark() without checking state conditions
1997   // properly. This spurious return doesn't manifest itself in any user code
1998   // but only in the correctly written condition checking loops of ObjectMonitor,
1999   // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
2000 
2001   if (Atomic::xchg(&_event, 1) >= 0) return;
2002 
2003   int status = pthread_mutex_lock(_mutex);
2004   assert_status(status == 0, status, "mutex_lock");
2005   int anyWaiters = _nParked;
2006   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2007   status = pthread_mutex_unlock(_mutex);
2008   assert_status(status == 0, status, "mutex_unlock");
2009 
2010   // Note that we signal() *after* dropping the lock for "immortal" Events.
2011   // This is safe and avoids a common class of futile wakeups.  In rare
2012   // circumstances this can cause a thread to return prematurely from
2013   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2014   // will simply re-test the condition and re-park itself.
2015   // This provides particular benefit if the underlying platform does not
2016   // provide wait morphing.
2017 
2018   if (anyWaiters != 0) {
2019     status = pthread_cond_signal(_cond);
2020     assert_status(status == 0, status, "cond_signal");
2021   }


2029   assert_status(status == 0, status, "cond_init rel");
2030   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
2031   assert_status(status == 0, status, "cond_init abs");
2032   status = pthread_mutex_init(_mutex, _mutexAttr);
2033   assert_status(status == 0, status, "mutex_init");
2034   _cur_index = -1; // mark as unused
2035 }
2036 
2037 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
2038 // sets count to 1 and signals condvar.  Only one thread ever waits
2039 // on the condvar. Contention seen when trying to park implies that someone
2040 // is unparking you, so don't wait. And spurious returns are fine, so there
2041 // is no need to track notifications.
2042 
2043 void Parker::park(bool isAbsolute, jlong time) {
2044 
2045   // Optional fast-path check:
2046   // Return immediately if a permit is available.
2047   // We depend on Atomic::xchg() having full barrier semantics
2048   // since we are doing a lock-free update to _counter.
2049   if (Atomic::xchg(&_counter, 0) > 0) return;
2050 
2051   Thread* thread = Thread::current();
2052   assert(thread->is_Java_thread(), "Must be JavaThread");
2053   JavaThread *jt = (JavaThread *)thread;
2054 
2055   // Optional optimization -- avoid state transitions if there's
2056   // an interrupt pending.
2057   if (jt->is_interrupted(false)) {
2058     return;
2059   }
2060 
2061   // Next, demultiplex/decode time arguments
2062   struct timespec absTime;
2063   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
2064     return;
2065   }
2066   if (time > 0) {
2067     to_abstime(&absTime, time, isAbsolute, false);
2068   }
2069 


< prev index next >