< prev index next >

src/hotspot/os/posix/os_posix.cpp

Print this page




1883   assert_status(status == 0, status, "mutex_init");
1884   _event   = 0;
1885   _nParked = 0;
1886 }
1887 
1888 void os::PlatformEvent::park() {       // AKA "down()"
1889   // Transitions for _event:
1890   //   -1 => -1 : illegal
1891   //    1 =>  0 : pass - return immediately
1892   //    0 => -1 : block; then set _event to 0 before returning
1893 
1894   // Invariant: Only the thread associated with the PlatformEvent
1895   // may call park().
1896   assert(_nParked == 0, "invariant");
1897 
1898   int v;
1899 
1900   // atomically decrement _event
1901   for (;;) {
1902     v = _event;
1903     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1904   }
1905   guarantee(v >= 0, "invariant");
1906 
1907   if (v == 0) { // Do this the hard way by blocking ...
1908     int status = pthread_mutex_lock(_mutex);
1909     assert_status(status == 0, status, "mutex_lock");
1910     guarantee(_nParked == 0, "invariant");
1911     ++_nParked;
1912     while (_event < 0) {
1913       // OS-level "spurious wakeups" are ignored
1914       status = pthread_cond_wait(_cond, _mutex);
1915       assert_status(status == 0, status, "cond_wait");
1916     }
1917     --_nParked;
1918 
1919     _event = 0;
1920     status = pthread_mutex_unlock(_mutex);
1921     assert_status(status == 0, status, "mutex_unlock");
1922     // Paranoia to ensure our locked and lock-free paths interact
1923     // correctly with each other.
1924     OrderAccess::fence();
1925   }
1926   guarantee(_event >= 0, "invariant");
1927 }
1928 
1929 int os::PlatformEvent::park(jlong millis) {
1930   // Transitions for _event:
1931   //   -1 => -1 : illegal
1932   //    1 =>  0 : pass - return immediately
1933   //    0 => -1 : block; then set _event to 0 before returning
1934 
1935   // Invariant: Only the thread associated with the Event/PlatformEvent
1936   // may call park().
1937   assert(_nParked == 0, "invariant");
1938 
1939   int v;
1940   // atomically decrement _event
1941   for (;;) {
1942     v = _event;
1943     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1944   }
1945   guarantee(v >= 0, "invariant");
1946 
1947   if (v == 0) { // Do this the hard way by blocking ...
1948     struct timespec abst;
1949     to_abstime(&abst, millis_to_nanos(millis), false, false);
1950 
1951     int ret = OS_TIMEOUT;
1952     int status = pthread_mutex_lock(_mutex);
1953     assert_status(status == 0, status, "mutex_lock");
1954     guarantee(_nParked == 0, "invariant");
1955     ++_nParked;
1956 
1957     while (_event < 0) {
1958       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1959       assert_status(status == 0 || status == ETIMEDOUT,
1960                     status, "cond_timedwait");
1961       // OS-level "spurious wakeups" are ignored unless the archaic
1962       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1963       if (!FilterSpuriousWakeups) break;




1883   assert_status(status == 0, status, "mutex_init");
1884   _event   = 0;
1885   _nParked = 0;
1886 }
1887 
1888 void os::PlatformEvent::park() {       // AKA "down()"
1889   // Transitions for _event:
1890   //   -1 => -1 : illegal
1891   //    1 =>  0 : pass - return immediately
1892   //    0 => -1 : block; then set _event to 0 before returning
1893 
1894   // Invariant: Only the thread associated with the PlatformEvent
1895   // may call park().
1896   assert(_nParked == 0, "invariant");
1897 
1898   int v;
1899 
1900   // atomically decrement _event
1901   for (;;) {
1902     v = _event;
1903     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1904   }
1905   guarantee(v >= 0, "invariant");
1906 
1907   if (v == 0) { // Do this the hard way by blocking ...
1908     int status = pthread_mutex_lock(_mutex);
1909     assert_status(status == 0, status, "mutex_lock");
1910     guarantee(_nParked == 0, "invariant");
1911     ++_nParked;
1912     while (_event < 0) {
1913       // OS-level "spurious wakeups" are ignored
1914       status = pthread_cond_wait(_cond, _mutex);
1915       assert_status(status == 0, status, "cond_wait");
1916     }
1917     --_nParked;
1918 
1919     _event = 0;
1920     status = pthread_mutex_unlock(_mutex);
1921     assert_status(status == 0, status, "mutex_unlock");
1922     // Paranoia to ensure our locked and lock-free paths interact
1923     // correctly with each other.
1924     OrderAccess::fence();
1925   }
1926   guarantee(_event >= 0, "invariant");
1927 }
1928 
1929 int os::PlatformEvent::park(jlong millis) {
1930   // Transitions for _event:
1931   //   -1 => -1 : illegal
1932   //    1 =>  0 : pass - return immediately
1933   //    0 => -1 : block; then set _event to 0 before returning
1934 
1935   // Invariant: Only the thread associated with the Event/PlatformEvent
1936   // may call park().
1937   assert(_nParked == 0, "invariant");
1938 
1939   int v;
1940   // atomically decrement _event
1941   for (;;) {
1942     v = _event;
1943     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1944   }
1945   guarantee(v >= 0, "invariant");
1946 
1947   if (v == 0) { // Do this the hard way by blocking ...
1948     struct timespec abst;
1949     to_abstime(&abst, millis_to_nanos(millis), false, false);
1950 
1951     int ret = OS_TIMEOUT;
1952     int status = pthread_mutex_lock(_mutex);
1953     assert_status(status == 0, status, "mutex_lock");
1954     guarantee(_nParked == 0, "invariant");
1955     ++_nParked;
1956 
1957     while (_event < 0) {
1958       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1959       assert_status(status == 0 || status == ETIMEDOUT,
1960                     status, "cond_timedwait");
1961       // OS-level "spurious wakeups" are ignored unless the archaic
1962       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1963       if (!FilterSpuriousWakeups) break;


< prev index next >