< prev index next >

src/os/posix/vm/os_posix.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/vmError.hpp"
  33 
  34 #include <signal.h>
  35 #include <unistd.h>
  36 #include <sys/resource.h>
  37 #include <sys/utsname.h>
  38 #include <pthread.h>
  39 #include <semaphore.h>
  40 #include <signal.h>




  41 
  42 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  43 // may have been configured, can be read more accurately from proc fs etc.
  44 #ifndef MAX_PID
  45 #define MAX_PID INT_MAX
  46 #endif
  47 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  48 
  49 // Check core dump limit and report possible place where core can be found
  50 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  51   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  52     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  53     VMError::record_coredump_status(buffer, false);
  54     return;
  55   }
  56 
  57   int n;
  58   struct rlimit rlim;
  59   bool success;
  60 


1377   return ret == 0;
1378 }
1379 
1380 bool PosixSemaphore::timedwait(struct timespec ts) {
1381   while (true) {
1382     int result = sem_timedwait(&_semaphore, &ts);
1383     if (result == 0) {
1384       return true;
1385     } else if (errno == EINTR) {
1386       continue;
1387     } else if (errno == ETIMEDOUT) {
1388       return false;
1389     } else {
1390       assert_with_errno(false, "timedwait failed");
1391       return false;
1392     }
1393   }
1394 }
1395 
1396 #endif // __APPLE__























































































































































































































































































































































































































































































































































































  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/vmError.hpp"
  33 
  34 #include <dlfcn.h>



  35 #include <pthread.h>
  36 #include <semaphore.h>
  37 #include <signal.h>
  38 #include <sys/resource.h>
  39 #include <sys/utsname.h>
  40 #include <time.h>
  41 #include <unistd.h>
  42 
  43 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  44 // may have been configured, can be read more accurately from proc fs etc.
  45 #ifndef MAX_PID
  46 #define MAX_PID INT_MAX
  47 #endif
  48 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  49 
  50 // Check core dump limit and report possible place where core can be found
  51 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  52   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  53     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  54     VMError::record_coredump_status(buffer, false);
  55     return;
  56   }
  57 
  58   int n;
  59   struct rlimit rlim;
  60   bool success;
  61 


1378   return ret == 0;
1379 }
1380 
1381 bool PosixSemaphore::timedwait(struct timespec ts) {
1382   while (true) {
1383     int result = sem_timedwait(&_semaphore, &ts);
1384     if (result == 0) {
1385       return true;
1386     } else if (errno == EINTR) {
1387       continue;
1388     } else if (errno == ETIMEDOUT) {
1389       return false;
1390     } else {
1391       assert_with_errno(false, "timedwait failed");
1392       return false;
1393     }
1394   }
1395 }
1396 
1397 #endif // __APPLE__
1398 
1399 
1400 // Shared pthread_mutex/cond based PlatformEvent implementation.
1401 // Not currently usable by Solaris
1402 
1403 #ifndef SOLARIS
1404 
1405 // Shared condattr object for use with relative timed-waits. Will be associated
1406 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1407 // but otherwise whatever default is used by the platform - generally the
1408 // time-of-day clock
1409 static pthread_condattr_t _condAttr[1];
1410 
1411 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1412 // all systems (e.g. FreeBSD) map the default to "normal".
1413 static pthread_mutexattr_t _mutexAttr[1];
1414 
1415 // common basic initialization that is always supported
1416 static void pthread_init_common(void) {
1417   int status;
1418   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1419     fatal("pthread_condattr_init: %s", os::strerror(status));
1420   }
1421   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1422     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1423   }
1424   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1425     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1426   }
1427 }
1428 
1429 // Not all POSIX types and API's are available on all notionally "posix"
1430 // platforms. If we have build-time support then we will check for actual
1431 // runtime support via dlopen/dlsym lookup. This allows for running on an
1432 // older OS version compared to the build platform. But if there is no
1433 // build time support then there can not be any runtime support as we do not
1434 // know what the runtime types would be (for example clockid_t might be an
1435 // int or int64_t.
1436 //
1437 #ifdef SUPPORTS_CLOCK_MONOTONIC
1438 
1439 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
1440 
1441 static int (*_clock_gettime)(clockid_t, struct timespec *);
1442 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
1443 
1444 static bool _use_clock_monotonic_condattr;
1445 
1446 // determine what POSIX API's are present and do appropriate
1447 // configuration
1448 void os::Posix::init(void) {
1449 
1450   // NOTE: no logging available when this is called. Put logging
1451   // statements in init_2().
1452 
1453   // copied from os::Linux::clock_init(). The duplication is temporary.
1454 
1455   // 1. Check for CLOCK_MONOTONIC support
1456 
1457   void* handle = NULL;
1458 
1459   // For linux we need librt, for other OS we can find
1460   // this function in regular libc
1461 #ifdef NEEDS_LIBRT
1462   // we do dlopen's in this particular order due to bug in linux
1463   // dynamical loader (see 6348968) leading to crash on exit
1464   handle = dlopen("librt.so.1", RTLD_LAZY);
1465   if (handle == NULL) {
1466     handle = dlopen("librt.so", RTLD_LAZY);
1467   }
1468 #endif
1469 
1470   if (handle == NULL) {
1471     handle = RTLD_DEFAULT;
1472   }
1473 
1474   _clock_gettime = NULL;
1475 
1476   int (*clock_getres_func)(clockid_t, struct timespec*) =
1477     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1478   int (*clock_gettime_func)(clockid_t, struct timespec*) =
1479     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1480   if (clock_getres_func != NULL && clock_gettime_func != NULL) {
1481     // we assume that if both clock_gettime and clock_getres support
1482     // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock
1483     struct timespec res;
1484     struct timespec tp;
1485     if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
1486         clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
1487       // yes, monotonic clock is supported
1488       _clock_gettime = clock_gettime_func;
1489     } else {
1490 #ifdef NEEDS_LIBRT
1491       // close librt if there is no monotonic clock
1492       if (handle != RTLD_DEFAULT) {
1493         dlclose(handle);
1494       }
1495 #endif
1496     }
1497   }
1498   
1499   // 2. Check for pthread_condattr_setclock support
1500   
1501   _pthread_condattr_setclock = NULL;
1502 
1503   // libpthread is already loaded
1504   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1505     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1506                                                    "pthread_condattr_setclock");
1507   if (condattr_setclock_func != NULL) {
1508     _pthread_condattr_setclock = condattr_setclock_func;
1509   }
1510 
1511   // Now do general initialization
1512 
1513   pthread_init_common();
1514 
1515   int status;
1516   if (_pthread_condattr_setclock != NULL &&
1517       _clock_gettime != NULL) {
1518     _use_clock_monotonic_condattr = true;
1519 
1520     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1521       if (status == EINVAL) {
1522         _use_clock_monotonic_condattr = false;
1523         warning("Unable to use monotonic clock with relative timed-waits" \
1524                 " - changes to the time-of-day clock may have adverse affects");
1525       } else {
1526         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1527       }
1528     }
1529   }
1530   else {
1531     _use_clock_monotonic_condattr = false;
1532   }
1533 }
1534 
1535 void os::Posix::init_2(void) {
1536   log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
1537                (_clock_gettime != NULL ? "" : " not"));
1538   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1539                (_pthread_condattr_setclock != NULL ? "" : " not"));
1540   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1541                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1542 }
1543 
1544 #else // !SUPPORTS_CLOCK_MONOTONIC
1545 
1546 void os::Posix::init(void) {
1547   pthread_init_common();
1548 }
1549 
1550 void os::Posix::init_2(void) {
1551   log_info(os)("Use of CLOCK_MONOTONIC is not supported");
1552   log_info(os)("Use of pthread_condattr_setclock is not supported");
1553   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
1554 }
1555 
1556 #endif // SUPPORTS_CLOCK_MONOTONIC
1557 
1558 os::PlatformEvent::PlatformEvent() {
1559   int status = pthread_cond_init(_cond, _condAttr);
1560   assert_status(status == 0, status, "cond_init");
1561   status = pthread_mutex_init(_mutex, _mutexAttr);
1562   assert_status(status == 0, status, "mutex_init");
1563   _event   = 0;
1564   _nParked = 0;
1565 }
1566 
1567 // Utility to convert the given timeout to an absolute timespec
1568 // (based on the appropriate clock) to use with pthread_cond_timewait.
1569 // The clock queried here must be the clock used to manage the 
1570 // timeout of the condition variable.
1571 //
1572 // The passed in timeout value is either a relative time in nanoseconds
1573 // or an absolute time in milliseconds. A relative timeout will be
1574 // associated with CLOCK_MONOTONIC if available; otherwise, or if absolute,
1575 // the default time-of-day clock will be used.
1576 
1577 // Given time is a 64-bit value and the time_t used in the timespec is
1578 // sometimes a signed-32-bit value we have to watch for overflow if times
1579 // way in the future are given. Further on Solaris versions
1580 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1581 // number of seconds, in abstime, is less than current_time + 100,000,000.
1582 // As it will be over 20 years before "now + 100000000" will overflow we can
1583 // ignore overflow and just impose a hard-limit on seconds using the value
1584 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
1585 // years from "now".
1586 //
1587 #define MAX_SECS 100000000
1588 
1589 static void to_abstime(timespec* abstime, jlong timeout, bool isAbsolute) {
1590 
1591   if (timeout < 0)
1592     timeout = 0;
1593 
1594   time_t max_secs = 0;
1595 
1596 #ifdef SUPPORTS_CLOCK_MONOTONIC
1597 
1598   if (_use_clock_monotonic_condattr && !isAbsolute) {
1599     // relative timeout in nanoseconds
1600     jlong seconds = timeout / NANOUNITS;
1601     timeout %= NANOUNITS; // remaining nanos
1602 
1603     struct timespec now;
1604     int status = _clock_gettime(CLOCK_MONOTONIC, &now);
1605     assert_status(status == 0, status, "clock_gettime");
1606 
1607     max_secs = now.tv_sec + MAX_SECS;
1608     if (seconds >= MAX_SECS) {
1609       // More seconds than we can add, so pin to max_secs
1610       abstime->tv_sec = max_secs;
1611       abstime->tv_nsec = 0;
1612     }
1613     else {
1614       abstime->tv_sec = now.tv_sec  + seconds;
1615       long nsecs = now.tv_nsec + timeout;
1616       if (nsecs >= NANOUNITS) { // overflow
1617         abstime->tv_sec += 1;
1618         nsecs -= NANOUNITS;
1619       }
1620       abstime->tv_nsec = nsecs;
1621     }
1622   }
1623   else {
1624 
1625 #else
1626 
1627   { // match the block scope
1628 
1629 #endif // SUPPORTS_CLOCK_MONOTONIC
1630 
1631     // time-of-day clock is all we can reliably use
1632     struct timeval now;
1633     int status = gettimeofday(&now, NULL);
1634     assert(status == 0, "gettimeofday");
1635     max_secs = now.tv_sec + MAX_SECS;
1636 
1637     if (isAbsolute) {
1638       // absolute timeout in milliseconds
1639       jlong seconds = timeout / MILLIUNITS;
1640       timeout %= MILLIUNITS; // remaining millis
1641 
1642       if (seconds >= max_secs) {
1643         // Absolue seconds exceeds allow max, so pin to max_secs
1644         abstime->tv_sec = max_secs;
1645         abstime->tv_nsec = 0;
1646       }
1647       else {
1648         abstime->tv_sec = seconds;
1649         abstime->tv_nsec = timeout * (NANOUNITS/MILLIUNITS);
1650       }
1651     }
1652     else {
1653       // relative timeout in nanoseconds
1654       jlong seconds = timeout / NANOUNITS;
1655       timeout %= NANOUNITS; // remaining nanos
1656 
1657       if (seconds >= MAX_SECS) {
1658         // More seconds than we can add, so pin to max_secs
1659         abstime->tv_sec = max_secs;
1660         abstime->tv_nsec = 0;
1661       }        
1662       else {
1663         abstime->tv_sec = now.tv_sec + seconds;
1664         jlong micros_left = timeout / (NANOUNITS/MICROUNITS);
1665         long usec = now.tv_usec + micros_left;
1666         if (usec >= MICROUNITS) { // overflow
1667           abstime->tv_sec += 1;
1668           usec -= MICROUNITS;
1669         }
1670         abstime->tv_nsec = usec * (NANOUNITS/MICROUNITS);
1671       }
1672     }
1673   }
1674 
1675   assert(abstime->tv_sec >= 0, "tv_sec < 0");
1676   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1677   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1678   assert(abstime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
1679   
1680 }
1681 
1682 // PlatformEvent
1683 //
1684 // Assumption:
1685 //    Only one parker can exist on an event, which is why we allocate
1686 //    them per-thread. Multiple unparkers can coexist.
1687 //
1688 // _event serves as a restricted-range semaphore.
1689 //   -1 : thread is blocked, i.e. there is a waiter
1690 //    0 : neutral: thread is running or ready,
1691 //        could have been signaled after a wait started
1692 //    1 : signaled - thread is running or ready
1693 //
1694 //    Having three states allows for some detection of bad usage - see
1695 //    comments on unpark().
1696 
1697 void os::PlatformEvent::park() {       // AKA "down()"
1698   // Transitions for _event:
1699   //   -1 => -1 : illegal
1700   //    1 =>  0 : pass - return immediately
1701   //    0 => -1 : block; then set _event to 0 before returning
1702 
1703   // Invariant: Only the thread associated with the PlatformEvent
1704   // may call park().
1705   assert(_nParked == 0, "invariant");
1706 
1707   int v;
1708 
1709   // atomically decrement _event
1710   for (;;) {
1711     v = _event;
1712     if (Atomic::cmpxchg(v-1, &_event, v) == v) break;
1713   }
1714   guarantee(v >= 0, "invariant");
1715 
1716   if (v == 0) { // Do this the hard way by blocking ...
1717     int status = pthread_mutex_lock(_mutex);
1718     assert_status(status == 0, status, "mutex_lock");
1719     guarantee(_nParked == 0, "invariant");
1720     ++_nParked;
1721     while (_event < 0) {
1722       // OS-level "spurious wakeups" are ignored
1723       status = pthread_cond_wait(_cond, _mutex);
1724       assert_status(status == 0, status, "cond_wait");
1725     }
1726     --_nParked;
1727 
1728     _event = 0;
1729     status = pthread_mutex_unlock(_mutex);
1730     assert_status(status == 0, status, "mutex_unlock");
1731     // Paranoia to ensure our locked and lock-free paths interact
1732     // correctly with each other.
1733     OrderAccess::fence();
1734   }
1735   guarantee(_event >= 0, "invariant");
1736 }
1737 
1738 int os::PlatformEvent::park(jlong millis) {
1739   // Transitions for _event:
1740   //   -1 => -1 : illegal
1741   //    1 =>  0 : pass - return immediately
1742   //    0 => -1 : block; then set _event to 0 before returning
1743 
1744   // Invariant: Only the thread associated with the Event/PlatformEvent
1745   // may call park().
1746   assert(_nParked == 0, "invariant");
1747 
1748   int v;
1749   // atomically decrement _event
1750   for (;;) {
1751     v = _event;
1752     if (Atomic::cmpxchg(v-1, &_event, v) == v) break;
1753   }
1754   guarantee(v >= 0, "invariant");
1755 
1756   if (v == 0) { // Do this the hard way by blocking ...
1757     struct timespec abst;
1758     to_abstime(&abst, millis * (NANOUNITS/MILLIUNITS), false);
1759 
1760     int ret = OS_TIMEOUT;
1761     int status = pthread_mutex_lock(_mutex);
1762     assert_status(status == 0, status, "mutex_lock");
1763     guarantee(_nParked == 0, "invariant");
1764     ++_nParked;
1765 
1766     while (_event < 0) {
1767       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1768       assert_status(status == 0 || status == ETIMEDOUT,
1769                     status, "cond_timedwait");
1770       // OS-level "spurious wakeups" are ignored unless the archaic
1771       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1772       if (!FilterSpuriousWakeups) break;
1773       if (status == ETIMEDOUT) break;
1774     }
1775     --_nParked;
1776 
1777     if (_event >= 0) {
1778       ret = OS_OK;
1779     }
1780 
1781     _event = 0;
1782     status = pthread_mutex_unlock(_mutex);
1783     assert_status(status == 0, status, "mutex_unlock");
1784     // Paranoia to ensure our locked and lock-free paths interact
1785     // correctly with each other.
1786     OrderAccess::fence();
1787     return ret;
1788   }
1789   return OS_OK;
1790 }
1791 
1792 void os::PlatformEvent::unpark() {
1793   // Transitions for _event:
1794   //    0 => 1 : just return
1795   //    1 => 1 : just return
1796   //   -1 => either 0 or 1; must signal target thread
1797   //         That is, we can safely transition _event from -1 to either
1798   //         0 or 1.
1799   // See also: "Semaphores in Plan 9" by Mullender & Cox
1800   //
1801   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1802   // that it will take two back-to-back park() calls for the owning
1803   // thread to block. This has the benefit of forcing a spurious return
1804   // from the first park() call after an unpark() call which will help
1805   // shake out uses of park() and unpark() without checking state conditions
1806   // properly. This spurious return doesn't manifest itself in any user code
1807   // but only in the correctly written condition checking loops of ObjectMonitor,
1808   // Mutex/Monitor, Thread::muxAcquire and os::sleep
1809 
1810   if (Atomic::xchg(1, &_event) >= 0) return;
1811 
1812   int status = pthread_mutex_lock(_mutex);
1813   assert_status(status == 0, status, "mutex_lock");
1814   int anyWaiters = _nParked;
1815   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1816   status = pthread_mutex_unlock(_mutex);
1817   assert_status(status == 0, status, "mutex_unlock");
1818 
1819   // Note that we signal() *after* dropping the lock for "immortal" Events.
1820   // This is safe and avoids a common class of futile wakeups.  In rare
1821   // circumstances this can cause a thread to return prematurely from
1822   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1823   // will simply re-test the condition and re-park itself.
1824   // This provides particular benefit if the underlying platform does not
1825   // provide wait morphing.
1826 
1827   if (anyWaiters != 0) {
1828     status = pthread_cond_signal(_cond);
1829     assert_status(status == 0, status, "cond_signal");
1830   }
1831 }
1832 
1833 // JSR166 support
1834 
1835  os::PlatformParker::PlatformParker() {
1836   int status;
1837   status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1838   assert_status(status == 0, status, "cond_init rel");
1839   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
1840   assert_status(status == 0, status, "cond_init abs");
1841   status = pthread_mutex_init(_mutex, _mutexAttr);
1842   assert_status(status == 0, status, "mutex_init");
1843   _cur_index = -1; // mark as unused
1844 }
1845 
1846 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
1847 // sets count to 1 and signals condvar.  Only one thread ever waits
1848 // on the condvar. Contention seen when trying to park implies that someone
1849 // is unparking you, so don't wait. And spurious returns are fine, so there
1850 // is no need to track notifications.
1851 
1852 void Parker::park(bool isAbsolute, jlong time) {
1853 
1854   // Optional fast-path check:
1855   // Return immediately if a permit is available.
1856   // We depend on Atomic::xchg() having full barrier semantics
1857   // since we are doing a lock-free update to _counter.
1858   if (Atomic::xchg(0, &_counter) > 0) return;
1859 
1860   Thread* thread = Thread::current();
1861   assert(thread->is_Java_thread(), "Must be JavaThread");
1862   JavaThread *jt = (JavaThread *)thread;
1863 
1864   // Optional optimization -- avoid state transitions if there's 
1865   // an interrupt pending.
1866   if (Thread::is_interrupted(thread, false)) {
1867     return;
1868   }
1869 
1870   // Next, demultiplex/decode time arguments
1871   struct timespec absTime;
1872   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1873     return;
1874   }
1875   if (time > 0) {
1876     to_abstime(&absTime, time, isAbsolute);
1877   }
1878 
1879   // Enter safepoint region
1880   // Beware of deadlocks such as 6317397.
1881   // The per-thread Parker:: mutex is a classic leaf-lock.
1882   // In particular a thread must never block on the Threads_lock while
1883   // holding the Parker:: mutex.  If safepoints are pending both the
1884   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1885   ThreadBlockInVM tbivm(jt);
1886 
1887   // Don't wait if cannot get lock since interference arises from
1888   // unparking. Also re-check interrupt before trying wait.
1889   if (Thread::is_interrupted(thread, false) || 
1890       pthread_mutex_trylock(_mutex) != 0) {
1891     return;
1892   }
1893 
1894   int status;
1895   if (_counter > 0)  { // no wait needed
1896     _counter = 0;
1897     status = pthread_mutex_unlock(_mutex);
1898     assert_status(status == 0, status, "invariant");
1899     // Paranoia to ensure our locked and lock-free paths interact
1900     // correctly with each other and Java-level accesses.
1901     OrderAccess::fence();
1902     return;
1903   }
1904 
1905   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
1906   jt->set_suspend_equivalent();
1907   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1908 
1909   assert(_cur_index == -1, "invariant");
1910   if (time == 0) {
1911     _cur_index = REL_INDEX; // arbitrary choice when not timed
1912     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1913     assert_status(status == 0, status, "cond_timedwait");
1914   } 
1915   else {
1916     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1917     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1918     assert_status(status == 0 || status == ETIMEDOUT,
1919                   status, "cond_timedwait");
1920   }
1921   _cur_index = -1;
1922 
1923   _counter = 0;
1924   status = pthread_mutex_unlock(_mutex);
1925   assert_status(status == 0, status, "invariant");
1926   // Paranoia to ensure our locked and lock-free paths interact
1927   // correctly with each other and Java-level accesses.
1928   OrderAccess::fence();
1929 
1930   // If externally suspended while waiting, re-suspend
1931   if (jt->handle_special_suspend_equivalent_condition()) {
1932     jt->java_suspend_self();
1933   }
1934 }
1935 
1936 void Parker::unpark() {
1937   int status = pthread_mutex_lock(_mutex);
1938   assert_status(status == 0, status, "invariant");
1939   const int s = _counter;
1940   _counter = 1;
1941   // must capture correct index before unlocking
1942   int index = _cur_index;
1943   status = pthread_mutex_unlock(_mutex);
1944   assert_status(status == 0, status, "invariant");
1945 
1946   // Note that we signal() *after* dropping the lock for "immortal" Events.
1947   // This is safe and avoids a common class of futile wakeups.  In rare
1948   // circumstances this can cause a thread to return prematurely from
1949   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1950   // will simply re-test the condition and re-park itself.
1951   // This provides particular benefit if the underlying platform does not
1952   // provide wait morphing.
1953 
1954   if (s < 1 && index != -1) {
1955     // thread is definitely parked
1956     status = pthread_cond_signal(&_cond[index]);
1957     assert_status(status == 0, status, "invariant");
1958   }
1959 }
1960 
1961 
1962 #endif // !SOLARIS
< prev index next >