src/share/vm/runtime/objectMonitor.cpp

Print this page




1635   // check if the notification happened
1636   if (!WasNotified) {
1637     // no, it could be timeout or Thread.interrupt() or both
1638     // check for interrupt event, otherwise it is timeout
1639     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1640       TEVENT(Wait - throw IEX from epilog);
1641       THROW(vmSymbols::java_lang_InterruptedException());
1642     }
1643   }
1644 
1645   // NOTE: Spurious wake up will be consider as timeout.
1646   // Monitor notify has precedence over thread interrupt.
1647 }
1648 
1649 
1650 // Consider:
1651 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1652 // then instead of transferring a thread from the WaitSet to the EntryList
1653 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1654 
1655 void ObjectMonitor::notify(TRAPS) {
1656   CHECK_OWNER();
1657   if (_WaitSet == NULL) {
1658     TEVENT(Empty-Notify);
1659     return;
1660   }
1661   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1662 
1663   int Policy = Knob_MoveNotifyee;
1664 
1665   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1666   ObjectWaiter * iterator = DequeueWaiter();
1667   if (iterator != NULL) {
1668     TEVENT(Notify1 - Transfer);
1669     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1670     guarantee(iterator->_notified == 0, "invariant");
1671     if (Policy != 4) {




1672       iterator->TState = ObjectWaiter::TS_ENTER;
1673     }
1674     iterator->_notified = 1;
1675     Thread * Self = THREAD;
1676     iterator->_notifier_tid = Self->osthread()->thread_id();
1677 
1678     ObjectWaiter * List = _EntryList;
1679     if (List != NULL) {
1680       assert(List->_prev == NULL, "invariant");
1681       assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
1682       assert(List != iterator, "invariant");
1683     }
1684 
1685     if (Policy == 0) {       // prepend to EntryList
1686       if (List == NULL) {
1687         iterator->_next = iterator->_prev = NULL;
1688         _EntryList = iterator;
1689       } else {
1690         List->_prev = iterator;
1691         iterator->_next = List;
1692         iterator->_prev = NULL;
1693         _EntryList = iterator;
1694       }
1695     } else if (Policy == 1) {      // append to EntryList
1696       if (List == NULL) {
1697         iterator->_next = iterator->_prev = NULL;
1698         _EntryList = iterator;
1699       } else {
1700         // CONSIDER:  finding the tail currently requires a linear-time walk of
1701         // the EntryList.  We can make tail access constant-time by converting to
1702         // a CDLL instead of using our current DLL.
1703         ObjectWaiter * Tail;
1704         for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
1705         assert(Tail != NULL && Tail->_next == NULL, "invariant");
1706         Tail->_next = iterator;
1707         iterator->_prev = Tail;
1708         iterator->_next = NULL;
1709       }
1710     } else if (Policy == 2) {      // prepend to cxq
1711       // prepend to cxq
1712       if (List == NULL) {
1713         iterator->_next = iterator->_prev = NULL;
1714         _EntryList = iterator;
1715       } else {
1716         iterator->TState = ObjectWaiter::TS_CXQ;
1717         for (;;) {
1718           ObjectWaiter * Front = _cxq;
1719           iterator->_next = Front;
1720           if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1721             break;
1722           }
1723         }
1724       }
1725     } else if (Policy == 3) {      // append to cxq
1726       iterator->TState = ObjectWaiter::TS_CXQ;
1727       for (;;) {
1728         ObjectWaiter * Tail;
1729         Tail = _cxq;
1730         if (Tail == NULL) {
1731           iterator->_next = NULL;
1732           if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1733             break;
1734           }
1735         } else {
1736           while (Tail->_next != NULL) Tail = Tail->_next;
1737           Tail->_next = iterator;
1738           iterator->_prev = Tail;
1739           iterator->_next = NULL;
1740           break;
1741         }
1742       }
1743     } else {
1744       ParkEvent * ev = iterator->_event;
1745       iterator->TState = ObjectWaiter::TS_RUN;
1746       OrderAccess::fence();
1747       ev->unpark();
1748     }
1749 
1750     if (Policy < 4) {
1751       iterator->wait_reenter_begin(this);
1752     }
1753 
1754     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1755     // move the add-to-EntryList operation, above, outside the critical section
1756     // protected by _WaitSetLock.  In practice that's not useful.  With the
1757     // exception of  wait() timeouts and interrupts the monitor owner
1758     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1759     // on _WaitSetLock so it's not profitable to reduce the length of the
1760     // critical section.
1761   }
1762 




1763   Thread::SpinRelease(&_WaitSetLock);
1764 
1765   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
1766     ObjectMonitor::_sync_Notifications->inc();
1767   }
1768 }
1769 






1770 
1771 void ObjectMonitor::notifyAll(TRAPS) {
1772   CHECK_OWNER();
1773   ObjectWaiter* iterator;
1774   if (_WaitSet == NULL) {
1775     TEVENT(Empty-NotifyAll);
1776     return;
1777   }
1778   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1779 
1780   int Policy = Knob_MoveNotifyee;
1781   int Tally = 0;
1782   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notifyall");
1783 
1784   for (;;) {
1785     iterator = DequeueWaiter();
1786     if (iterator == NULL) break;
1787     TEVENT(NotifyAll - Transfer1);
1788     ++Tally;
1789 
1790     // Disposition - what might we do with iterator ?
1791     // a.  add it directly to the EntryList - either tail or head.
1792     // b.  push it onto the front of the _cxq.
1793     // For now we use (a).
1794 
1795     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1796     guarantee(iterator->_notified == 0, "invariant");
1797     iterator->_notified = 1;
1798     Thread * Self = THREAD;
1799     iterator->_notifier_tid = Self->osthread()->thread_id();
1800     if (Policy != 4) {
1801       iterator->TState = ObjectWaiter::TS_ENTER;
1802     }

1803 
1804     ObjectWaiter * List = _EntryList;
1805     if (List != NULL) {
1806       assert(List->_prev == NULL, "invariant");
1807       assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
1808       assert(List != iterator, "invariant");
1809     }
1810 
1811     if (Policy == 0) {       // prepend to EntryList
1812       if (List == NULL) {
1813         iterator->_next = iterator->_prev = NULL;
1814         _EntryList = iterator;
1815       } else {
1816         List->_prev = iterator;
1817         iterator->_next = List;
1818         iterator->_prev = NULL;
1819         _EntryList = iterator;
1820       }
1821     } else if (Policy == 1) {      // append to EntryList
1822       if (List == NULL) {
1823         iterator->_next = iterator->_prev = NULL;
1824         _EntryList = iterator;
1825       } else {
1826         // CONSIDER:  finding the tail currently requires a linear-time walk of
1827         // the EntryList.  We can make tail access constant-time by converting to
1828         // a CDLL instead of using our current DLL.
1829         ObjectWaiter * Tail;
1830         for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
1831         assert(Tail != NULL && Tail->_next == NULL, "invariant");
1832         Tail->_next = iterator;
1833         iterator->_prev = Tail;
1834         iterator->_next = NULL;
1835       }
1836     } else if (Policy == 2) {      // prepend to cxq
1837       // prepend to cxq
1838       iterator->TState = ObjectWaiter::TS_CXQ;
1839       for (;;) {
1840         ObjectWaiter * Front = _cxq;
1841         iterator->_next = Front;
1842         if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1843           break;
1844         }
1845       }
1846     } else if (Policy == 3) {      // append to cxq
1847       iterator->TState = ObjectWaiter::TS_CXQ;
1848       for (;;) {
1849         ObjectWaiter * Tail;
1850         Tail = _cxq;
1851         if (Tail == NULL) {
1852           iterator->_next = NULL;
1853           if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1854             break;
1855           }
1856         } else {
1857           while (Tail->_next != NULL) Tail = Tail->_next;
1858           Tail->_next = iterator;
1859           iterator->_prev = Tail;
1860           iterator->_next = NULL;
1861           break;
1862         }
1863       }
1864     } else {
1865       ParkEvent * ev = iterator->_event;
1866       iterator->TState = ObjectWaiter::TS_RUN;
1867       OrderAccess::fence();
1868       ev->unpark();
1869     }
1870 
1871     if (Policy < 4) {
1872       iterator->wait_reenter_begin(this);



1873     }
1874 
1875     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1876     // move the add-to-EntryList operation, above, outside the critical section
1877     // protected by _WaitSetLock.  In practice that's not useful.  With the
1878     // exception of  wait() timeouts and interrupts the monitor owner
1879     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1880     // on _WaitSetLock so it's not profitable to reduce the length of the
1881     // critical section.
1882   }
1883 
1884   Thread::SpinRelease(&_WaitSetLock);
1885 
1886   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1887     ObjectMonitor::_sync_Notifications->inc(Tally);
1888   }
1889 }
1890 
1891 // -----------------------------------------------------------------------------
1892 // Adaptive Spinning Support
1893 //
1894 // Adaptive spin-then-block - rational spinning
1895 //
1896 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1897 // algorithm.  On high order SMP systems it would be better to start with
1898 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1899 // a contending thread could enqueue itself on the cxq and then spin locally
1900 // on a thread-specific variable such as its ParkEvent._Event flag.
1901 // That's left as an exercise for the reader.  Note that global spinning is
1902 // not problematic on Niagara, as the L2 cache serves the interconnect and
1903 // has both low latency and massive bandwidth.
1904 //
1905 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1906 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1907 // (duration) or we can fix the count at approximately the duration of




1635   // check if the notification happened
1636   if (!WasNotified) {
1637     // no, it could be timeout or Thread.interrupt() or both
1638     // check for interrupt event, otherwise it is timeout
1639     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1640       TEVENT(Wait - throw IEX from epilog);
1641       THROW(vmSymbols::java_lang_InterruptedException());
1642     }
1643   }
1644 
1645   // NOTE: Spurious wake up will be consider as timeout.
1646   // Monitor notify has precedence over thread interrupt.
1647 }
1648 
1649 
1650 // Consider:
1651 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1652 // then instead of transferring a thread from the WaitSet to the EntryList
1653 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1654 
1655 int ObjectMonitor::INotify(Thread * Self) {
1656   const int policy = Knob_MoveNotifyee;





1657 


1658   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1659   ObjectWaiter * iterator = DequeueWaiter();
1660   if (iterator != NULL) {
1661     TEVENT(Notify1 - Transfer);
1662     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1663     guarantee(iterator->_notified == 0, "invariant");
1664     // Disposition - what might we do with iterator ?
1665     // a.  add it directly to the EntryList - either tail or head.
1666     // b.  push it onto the front of the _cxq.
1667     // For now we use (a).
1668     if (policy != 4) {
1669       iterator->TState = ObjectWaiter::TS_ENTER;
1670     }
1671     iterator->_notified = 1;

1672     iterator->_notifier_tid = Self->osthread()->thread_id();
1673 
1674     ObjectWaiter * list = _EntryList;
1675     if (list != NULL) {
1676       assert(list->_prev == NULL, "invariant");
1677       assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1678       assert(list != iterator, "invariant");
1679     }
1680 
1681     if (policy == 0) {       // prepend to EntryList
1682       if (list == NULL) {
1683         iterator->_next = iterator->_prev = NULL;
1684         _EntryList = iterator;
1685       } else {
1686         list->_prev = iterator;
1687         iterator->_next = list;
1688         iterator->_prev = NULL;
1689         _EntryList = iterator;
1690       }
1691     } else if (policy == 1) {      // append to EntryList
1692       if (list == NULL) {
1693         iterator->_next = iterator->_prev = NULL;
1694         _EntryList = iterator;
1695       } else {
1696         // CONSIDER:  finding the tail currently requires a linear-time walk of
1697         // the EntryList.  We can make tail access constant-time by converting to
1698         // a CDLL instead of using our current DLL.
1699         ObjectWaiter * tail;
1700         for (tail = list; tail->_next != NULL; tail = tail->_next) /* empty */;
1701         assert(tail != NULL && tail->_next == NULL, "invariant");
1702         tail->_next = iterator;
1703         iterator->_prev = tail;
1704         iterator->_next = NULL;
1705       }
1706     } else if (policy == 2) {      // prepend to cxq
1707       // prepend to cxq
1708       if (list == NULL) {
1709         iterator->_next = iterator->_prev = NULL;
1710         _EntryList = iterator;
1711       } else {
1712         iterator->TState = ObjectWaiter::TS_CXQ;
1713         for (;;) {
1714           ObjectWaiter * front = _cxq;
1715           iterator->_next = front;
1716           if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
1717             break;
1718           }
1719         }
1720       }
1721     } else if (policy == 3) {      // append to cxq
1722       iterator->TState = ObjectWaiter::TS_CXQ;
1723       for (;;) {
1724         ObjectWaiter * tail = _cxq;
1725         if (tail == NULL) {

1726           iterator->_next = NULL;
1727           if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
1728             break;
1729           }
1730         } else {
1731           while (tail->_next != NULL) tail = tail->_next;
1732           tail->_next = iterator;
1733           iterator->_prev = tail;
1734           iterator->_next = NULL;
1735           break;
1736         }
1737       }
1738     } else {
1739       ParkEvent * ev = iterator->_event;
1740       iterator->TState = ObjectWaiter::TS_RUN;
1741       OrderAccess::fence();
1742       ev->unpark();
1743     }
1744 




1745     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1746     // move the add-to-EntryList operation, above, outside the critical section
1747     // protected by _WaitSetLock.  In practice that's not useful.  With the
1748     // exception of  wait() timeouts and interrupts the monitor owner
1749     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1750     // on _WaitSetLock so it's not profitable to reduce the length of the
1751     // critical section.

1752 
1753     if (policy < 4) {
1754       iterator->wait_reenter_begin(this);
1755     }
1756   }
1757   Thread::SpinRelease(&_WaitSetLock);
1758 
1759   return 0;


1760 }
1761 
1762 // Consider: a non-uncommon synchronization bug is to use notify() when notifyAll()
1763 // is more appropriate, potentially resulting in lost wakeups and stranded threads.
1764 // A useful diagnostic option is to force all notify() operations to behave
1765 // as notifyAll().  We can also detect many such problems with MinimumWait.
1766 // When MinimumWait is set to a small non-zero timeout value and the program
1767 // does not hang whereas it did absent MininumWait, that suggests a lost wakeup bug.
1768 
1769 void ObjectMonitor::notify(TRAPS) {
1770   CHECK_OWNER();

1771   if (_WaitSet == NULL) {
1772     TEVENT(Empty-Notify);
1773     return;
1774   }
1775   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1776   INotify(THREAD);
1777   if (ObjectMonitor::_sync_Notifications != NULL) {
1778     ObjectMonitor::_sync_Notifications->inc(1);




















1779   }
1780 }
1781 






1782 
1783 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1784 // from the waitset to the EntryList. This could be done more efficiently with a
1785 // single bulk transfer but in practice it's not time-critical. Beware too,
1786 // that in prepend-mode we invert the order of the waiters. Let's say that the
1787 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1788 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".





















































1789 
1790 void ObjectMonitor::notifyAll(TRAPS) {
1791   CHECK_OWNER();
1792   if (_WaitSet == NULL) {
1793     TEVENT(Empty-NotifyAll);
1794     return;
1795   }
1796 
1797   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1798   int tally = 0;
1799   while (_WaitSet != NULL) {
1800     tally++;
1801     INotify(THREAD);


1802   }
1803 
1804   if (tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1805     ObjectMonitor::_sync_Notifications->inc(tally);


1806   }
1807 }
1808 
1809 // -----------------------------------------------------------------------------
1810 // Adaptive Spinning Support
1811 //
1812 // Adaptive spin-then-block - rational spinning
1813 //
1814 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1815 // algorithm.  On high order SMP systems it would be better to start with
1816 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1817 // a contending thread could enqueue itself on the cxq and then spin locally
1818 // on a thread-specific variable such as its ParkEvent._Event flag.
1819 // That's left as an exercise for the reader.  Note that global spinning is
1820 // not problematic on Niagara, as the L2 cache serves the interconnect and
1821 // has both low latency and massive bandwidth.
1822 //
1823 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1824 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1825 // (duration) or we can fix the count at approximately the duration of