src/share/vm/runtime/objectMonitor.cpp

Print this page




 429 
 430 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 431 // Callers must compensate as needed.
 432 
 433 int ObjectMonitor::TryLock(Thread * Self) {
 434   void * own = _owner;
 435   if (own != NULL) return 0;
 436   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 437     // Either guarantee _recursions == 0 or set _recursions = 0.
 438     assert(_recursions == 0, "invariant");
 439     assert(_owner == Self, "invariant");
 440     return 1;
 441   }
 442   // The lock had been free momentarily, but we lost the race to the lock.
 443   // Interference -- the CAS failed.
 444   // We can either return -1 or retry.
 445   // Retry doesn't make as much sense because the lock was just acquired.
 446   return -1;
 447 }
 448 


 449 void NOINLINE ObjectMonitor::EnterI(TRAPS) {
 450   Thread * const Self = THREAD;
 451   assert(Self->is_Java_thread(), "invariant");
 452   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 453 
 454   // Try the lock - TATAS
 455   if (TryLock (Self) > 0) {
 456     assert(_succ != Self, "invariant");
 457     assert(_owner == Self, "invariant");
 458     assert(_Responsible != Self, "invariant");
 459     return;
 460   }
 461 
 462   DeferredInitialize();
 463 
 464   // We try one round of spinning *before* enqueueing Self.
 465   //
 466   // If the _owner is ready but OFFPROC we could use a YieldTo()
 467   // operation to donate the remainder of this thread's quantum
 468   // to the owner.  This has subtle but beneficial affinity


 538 
 539   if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
 540     // Try to assume the role of responsible thread for the monitor.
 541     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 542     Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 543   }
 544 
 545   // The lock might have been released while this thread was occupied queueing
 546   // itself onto _cxq.  To close the race and avoid "stranding" and
 547   // progress-liveness failure we must resample-retry _owner before parking.
 548   // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 549   // In this case the ST-MEMBAR is accomplished with CAS().
 550   //
 551   // TODO: Defer all thread state transitions until park-time.
 552   // Since state transitions are heavy and inefficient we'd like
 553   // to defer the state transitions until absolutely necessary,
 554   // and in doing so avoid some transitions ...
 555 
 556   TEVENT(Inflated enter - Contention);
 557   int nWakeups = 0;
 558   int RecheckInterval = 1;
 559 
 560   for (;;) {
 561 
 562     if (TryLock(Self) > 0) break;
 563     assert(_owner != Self, "invariant");
 564 
 565     if ((SyncFlags & 2) && _Responsible == NULL) {
 566       Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 567     }
 568 
 569     // park self
 570     if (_Responsible == Self || (SyncFlags & 1)) {
 571       TEVENT(Inflated enter - park TIMED);
 572       Self->_ParkEvent->park((jlong) RecheckInterval);
 573       // Increase the RecheckInterval, but clamp the value.
 574       RecheckInterval *= 8;
 575       if (RecheckInterval > 1000) RecheckInterval = 1000;


 576     } else {
 577       TEVENT(Inflated enter - park UNTIMED);
 578       Self->_ParkEvent->park();
 579     }
 580 
 581     if (TryLock(Self) > 0) break;
 582 
 583     // The lock is still contested.
 584     // Keep a tally of the # of futile wakeups.
 585     // Note that the counter is not protected by a lock or updated by atomics.
 586     // That is by design - we trade "lossy" counters which are exposed to
 587     // races during updates for a lower probe effect.
 588     TEVENT(Inflated enter - Futile wakeup);
 589     if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 590       ObjectMonitor::_sync_FutileWakeups->inc();
 591     }
 592     ++nWakeups;
 593 
 594     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 595     // We can defer clearing _succ until after the spin completes


 708     ObjectWaiter::TStates v = SelfNode->TState;
 709     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 710     assert(_owner != Self, "invariant");
 711 
 712     if (TryLock(Self) > 0) break;
 713     if (TrySpin(Self) > 0) break;
 714 
 715     TEVENT(Wait Reentry - parking);
 716 
 717     // State transition wrappers around park() ...
 718     // ReenterI() wisely defers state transitions until
 719     // it's clear we must park the thread.
 720     {
 721       OSThreadContendState osts(Self->osthread());
 722       ThreadBlockInVM tbivm(jt);
 723 
 724       // cleared by handle_special_suspend_equivalent_condition()
 725       // or java_suspend_self()
 726       jt->set_suspend_equivalent();
 727       if (SyncFlags & 1) {
 728         Self->_ParkEvent->park((jlong)1000);
 729       } else {
 730         Self->_ParkEvent->park();
 731       }
 732 
 733       // were we externally suspended while we were waiting?
 734       for (;;) {
 735         if (!ExitSuspendEquivalent(jt)) break;
 736         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 737         jt->java_suspend_self();
 738         jt->set_suspend_equivalent();
 739       }
 740     }
 741 
 742     // Try again, but just so we distinguish between futile wakeups and
 743     // successful wakeups.  The following test isn't algorithmically
 744     // necessary, but it helps us maintain sensible statistics.
 745     if (TryLock(Self) > 0) break;
 746 
 747     // The lock is still contested.
 748     // Keep a tally of the # of futile wakeups.


1635   // check if the notification happened
1636   if (!WasNotified) {
1637     // no, it could be timeout or Thread.interrupt() or both
1638     // check for interrupt event, otherwise it is timeout
1639     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1640       TEVENT(Wait - throw IEX from epilog);
1641       THROW(vmSymbols::java_lang_InterruptedException());
1642     }
1643   }
1644 
1645   // NOTE: Spurious wake up will be consider as timeout.
1646   // Monitor notify has precedence over thread interrupt.
1647 }
1648 
1649 
1650 // Consider:
1651 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1652 // then instead of transferring a thread from the WaitSet to the EntryList
1653 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1654 
1655 void ObjectMonitor::notify(TRAPS) {
1656   CHECK_OWNER();
1657   if (_WaitSet == NULL) {
1658     TEVENT(Empty-Notify);
1659     return;
1660   }
1661   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1662 
1663   int Policy = Knob_MoveNotifyee;
1664 
1665   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1666   ObjectWaiter * iterator = DequeueWaiter();
1667   if (iterator != NULL) {
1668     TEVENT(Notify1 - Transfer);
1669     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1670     guarantee(iterator->_notified == 0, "invariant");
1671     if (Policy != 4) {




1672       iterator->TState = ObjectWaiter::TS_ENTER;
1673     }
1674     iterator->_notified = 1;
1675     Thread * Self = THREAD;
1676     iterator->_notifier_tid = Self->osthread()->thread_id();
1677 
1678     ObjectWaiter * List = _EntryList;
1679     if (List != NULL) {
1680       assert(List->_prev == NULL, "invariant");
1681       assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
1682       assert(List != iterator, "invariant");
1683     }
1684 
1685     if (Policy == 0) {       // prepend to EntryList
1686       if (List == NULL) {
1687         iterator->_next = iterator->_prev = NULL;
1688         _EntryList = iterator;
1689       } else {
1690         List->_prev = iterator;
1691         iterator->_next = List;
1692         iterator->_prev = NULL;
1693         _EntryList = iterator;
1694       }
1695     } else if (Policy == 1) {      // append to EntryList
1696       if (List == NULL) {
1697         iterator->_next = iterator->_prev = NULL;
1698         _EntryList = iterator;
1699       } else {
1700         // CONSIDER:  finding the tail currently requires a linear-time walk of
1701         // the EntryList.  We can make tail access constant-time by converting to
1702         // a CDLL instead of using our current DLL.
1703         ObjectWaiter * Tail;
1704         for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
1705         assert(Tail != NULL && Tail->_next == NULL, "invariant");
1706         Tail->_next = iterator;
1707         iterator->_prev = Tail;
1708         iterator->_next = NULL;
1709       }
1710     } else if (Policy == 2) {      // prepend to cxq
1711       // prepend to cxq
1712       if (List == NULL) {
1713         iterator->_next = iterator->_prev = NULL;
1714         _EntryList = iterator;
1715       } else {
1716         iterator->TState = ObjectWaiter::TS_CXQ;
1717         for (;;) {
1718           ObjectWaiter * Front = _cxq;
1719           iterator->_next = Front;
1720           if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1721             break;
1722           }
1723         }
1724       }
1725     } else if (Policy == 3) {      // append to cxq
1726       iterator->TState = ObjectWaiter::TS_CXQ;
1727       for (;;) {
1728         ObjectWaiter * Tail;
1729         Tail = _cxq;
1730         if (Tail == NULL) {
1731           iterator->_next = NULL;
1732           if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1733             break;
1734           }
1735         } else {
1736           while (Tail->_next != NULL) Tail = Tail->_next;
1737           Tail->_next = iterator;
1738           iterator->_prev = Tail;
1739           iterator->_next = NULL;
1740           break;
1741         }
1742       }
1743     } else {
1744       ParkEvent * ev = iterator->_event;
1745       iterator->TState = ObjectWaiter::TS_RUN;
1746       OrderAccess::fence();
1747       ev->unpark();
1748     }
1749 
1750     if (Policy < 4) {
1751       iterator->wait_reenter_begin(this);
1752     }
1753 
1754     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1755     // move the add-to-EntryList operation, above, outside the critical section
1756     // protected by _WaitSetLock.  In practice that's not useful.  With the
1757     // exception of  wait() timeouts and interrupts the monitor owner
1758     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1759     // on _WaitSetLock so it's not profitable to reduce the length of the
1760     // critical section.
1761   }
1762 
1763   Thread::SpinRelease(&_WaitSetLock);
1764 
1765   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
1766     ObjectMonitor::_sync_Notifications->inc();
1767   }


1768 }
1769 











1770 
1771 void ObjectMonitor::notifyAll(TRAPS) {
1772   CHECK_OWNER();
1773   ObjectWaiter* iterator;
1774   if (_WaitSet == NULL) {
1775     TEVENT(Empty-NotifyAll);
1776     return;
1777   }
1778   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1779 
1780   int Policy = Knob_MoveNotifyee;
1781   int Tally = 0;
1782   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notifyall");
1783 
1784   for (;;) {
1785     iterator = DequeueWaiter();
1786     if (iterator == NULL) break;
1787     TEVENT(NotifyAll - Transfer1);
1788     ++Tally;
1789 
1790     // Disposition - what might we do with iterator ?
1791     // a.  add it directly to the EntryList - either tail or head.
1792     // b.  push it onto the front of the _cxq.
1793     // For now we use (a).
1794 
1795     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1796     guarantee(iterator->_notified == 0, "invariant");
1797     iterator->_notified = 1;
1798     Thread * Self = THREAD;
1799     iterator->_notifier_tid = Self->osthread()->thread_id();
1800     if (Policy != 4) {
1801       iterator->TState = ObjectWaiter::TS_ENTER;
1802     }

1803 
1804     ObjectWaiter * List = _EntryList;
1805     if (List != NULL) {
1806       assert(List->_prev == NULL, "invariant");
1807       assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
1808       assert(List != iterator, "invariant");
1809     }
1810 
1811     if (Policy == 0) {       // prepend to EntryList
1812       if (List == NULL) {
1813         iterator->_next = iterator->_prev = NULL;
1814         _EntryList = iterator;
1815       } else {
1816         List->_prev = iterator;
1817         iterator->_next = List;
1818         iterator->_prev = NULL;
1819         _EntryList = iterator;
1820       }
1821     } else if (Policy == 1) {      // append to EntryList
1822       if (List == NULL) {
1823         iterator->_next = iterator->_prev = NULL;
1824         _EntryList = iterator;
1825       } else {
1826         // CONSIDER:  finding the tail currently requires a linear-time walk of
1827         // the EntryList.  We can make tail access constant-time by converting to
1828         // a CDLL instead of using our current DLL.
1829         ObjectWaiter * Tail;
1830         for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
1831         assert(Tail != NULL && Tail->_next == NULL, "invariant");
1832         Tail->_next = iterator;
1833         iterator->_prev = Tail;
1834         iterator->_next = NULL;
1835       }
1836     } else if (Policy == 2) {      // prepend to cxq
1837       // prepend to cxq
1838       iterator->TState = ObjectWaiter::TS_CXQ;
1839       for (;;) {
1840         ObjectWaiter * Front = _cxq;
1841         iterator->_next = Front;
1842         if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
1843           break;
1844         }
1845       }
1846     } else if (Policy == 3) {      // append to cxq
1847       iterator->TState = ObjectWaiter::TS_CXQ;
1848       for (;;) {
1849         ObjectWaiter * Tail;
1850         Tail = _cxq;
1851         if (Tail == NULL) {
1852           iterator->_next = NULL;
1853           if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
1854             break;
1855           }
1856         } else {
1857           while (Tail->_next != NULL) Tail = Tail->_next;
1858           Tail->_next = iterator;
1859           iterator->_prev = Tail;
1860           iterator->_next = NULL;
1861           break;
1862         }
1863       }
1864     } else {
1865       ParkEvent * ev = iterator->_event;
1866       iterator->TState = ObjectWaiter::TS_RUN;
1867       OrderAccess::fence();
1868       ev->unpark();
1869     }
1870 
1871     if (Policy < 4) {
1872       iterator->wait_reenter_begin(this);



1873     }
1874 
1875     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1876     // move the add-to-EntryList operation, above, outside the critical section
1877     // protected by _WaitSetLock.  In practice that's not useful.  With the
1878     // exception of  wait() timeouts and interrupts the monitor owner
1879     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1880     // on _WaitSetLock so it's not profitable to reduce the length of the
1881     // critical section.
1882   }
1883 
1884   Thread::SpinRelease(&_WaitSetLock);
1885 
1886   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1887     ObjectMonitor::_sync_Notifications->inc(Tally);
1888   }
1889 }
1890 
1891 // -----------------------------------------------------------------------------
1892 // Adaptive Spinning Support
1893 //
1894 // Adaptive spin-then-block - rational spinning
1895 //
1896 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1897 // algorithm.  On high order SMP systems it would be better to start with
1898 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1899 // a contending thread could enqueue itself on the cxq and then spin locally
1900 // on a thread-specific variable such as its ParkEvent._Event flag.
1901 // That's left as an exercise for the reader.  Note that global spinning is
1902 // not problematic on Niagara, as the L2 cache serves the interconnect and
1903 // has both low latency and massive bandwidth.
1904 //
1905 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1906 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1907 // (duration) or we can fix the count at approximately the duration of




 429 
 430 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 431 // Callers must compensate as needed.
 432 
 433 int ObjectMonitor::TryLock(Thread * Self) {
 434   void * own = _owner;
 435   if (own != NULL) return 0;
 436   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 437     // Either guarantee _recursions == 0 or set _recursions = 0.
 438     assert(_recursions == 0, "invariant");
 439     assert(_owner == Self, "invariant");
 440     return 1;
 441   }
 442   // The lock had been free momentarily, but we lost the race to the lock.
 443   // Interference -- the CAS failed.
 444   // We can either return -1 or retry.
 445   // Retry doesn't make as much sense because the lock was just acquired.
 446   return -1;
 447 }
 448 
 449 #define MAX_RECHECK_INTERVAL 1000
 450 
 451 void NOINLINE ObjectMonitor::EnterI(TRAPS) {
 452   Thread * const Self = THREAD;
 453   assert(Self->is_Java_thread(), "invariant");
 454   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 455 
 456   // Try the lock - TATAS
 457   if (TryLock (Self) > 0) {
 458     assert(_succ != Self, "invariant");
 459     assert(_owner == Self, "invariant");
 460     assert(_Responsible != Self, "invariant");
 461     return;
 462   }
 463 
 464   DeferredInitialize();
 465 
 466   // We try one round of spinning *before* enqueueing Self.
 467   //
 468   // If the _owner is ready but OFFPROC we could use a YieldTo()
 469   // operation to donate the remainder of this thread's quantum
 470   // to the owner.  This has subtle but beneficial affinity


 540 
 541   if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
 542     // Try to assume the role of responsible thread for the monitor.
 543     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 544     Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 545   }
 546 
 547   // The lock might have been released while this thread was occupied queueing
 548   // itself onto _cxq.  To close the race and avoid "stranding" and
 549   // progress-liveness failure we must resample-retry _owner before parking.
 550   // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 551   // In this case the ST-MEMBAR is accomplished with CAS().
 552   //
 553   // TODO: Defer all thread state transitions until park-time.
 554   // Since state transitions are heavy and inefficient we'd like
 555   // to defer the state transitions until absolutely necessary,
 556   // and in doing so avoid some transitions ...
 557 
 558   TEVENT(Inflated enter - Contention);
 559   int nWakeups = 0;
 560   int recheckInterval = 1;
 561 
 562   for (;;) {
 563 
 564     if (TryLock(Self) > 0) break;
 565     assert(_owner != Self, "invariant");
 566 
 567     if ((SyncFlags & 2) && _Responsible == NULL) {
 568       Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
 569     }
 570 
 571     // park self
 572     if (_Responsible == Self || (SyncFlags & 1)) {
 573       TEVENT(Inflated enter - park TIMED);
 574       Self->_ParkEvent->park((jlong) recheckInterval);
 575       // Increase the recheckInterval, but clamp the value.
 576       recheckInterval *= 8;
 577       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 578         recheckInterval = MAX_RECHECK_INTERVAL;
 579       }
 580     } else {
 581       TEVENT(Inflated enter - park UNTIMED);
 582       Self->_ParkEvent->park();
 583     }
 584 
 585     if (TryLock(Self) > 0) break;
 586 
 587     // The lock is still contested.
 588     // Keep a tally of the # of futile wakeups.
 589     // Note that the counter is not protected by a lock or updated by atomics.
 590     // That is by design - we trade "lossy" counters which are exposed to
 591     // races during updates for a lower probe effect.
 592     TEVENT(Inflated enter - Futile wakeup);
 593     if (ObjectMonitor::_sync_FutileWakeups != NULL) {
 594       ObjectMonitor::_sync_FutileWakeups->inc();
 595     }
 596     ++nWakeups;
 597 
 598     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 599     // We can defer clearing _succ until after the spin completes


 712     ObjectWaiter::TStates v = SelfNode->TState;
 713     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 714     assert(_owner != Self, "invariant");
 715 
 716     if (TryLock(Self) > 0) break;
 717     if (TrySpin(Self) > 0) break;
 718 
 719     TEVENT(Wait Reentry - parking);
 720 
 721     // State transition wrappers around park() ...
 722     // ReenterI() wisely defers state transitions until
 723     // it's clear we must park the thread.
 724     {
 725       OSThreadContendState osts(Self->osthread());
 726       ThreadBlockInVM tbivm(jt);
 727 
 728       // cleared by handle_special_suspend_equivalent_condition()
 729       // or java_suspend_self()
 730       jt->set_suspend_equivalent();
 731       if (SyncFlags & 1) {
 732         Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
 733       } else {
 734         Self->_ParkEvent->park();
 735       }
 736 
 737       // were we externally suspended while we were waiting?
 738       for (;;) {
 739         if (!ExitSuspendEquivalent(jt)) break;
 740         if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 741         jt->java_suspend_self();
 742         jt->set_suspend_equivalent();
 743       }
 744     }
 745 
 746     // Try again, but just so we distinguish between futile wakeups and
 747     // successful wakeups.  The following test isn't algorithmically
 748     // necessary, but it helps us maintain sensible statistics.
 749     if (TryLock(Self) > 0) break;
 750 
 751     // The lock is still contested.
 752     // Keep a tally of the # of futile wakeups.


1639   // check if the notification happened
1640   if (!WasNotified) {
1641     // no, it could be timeout or Thread.interrupt() or both
1642     // check for interrupt event, otherwise it is timeout
1643     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1644       TEVENT(Wait - throw IEX from epilog);
1645       THROW(vmSymbols::java_lang_InterruptedException());
1646     }
1647   }
1648 
1649   // NOTE: Spurious wake up will be consider as timeout.
1650   // Monitor notify has precedence over thread interrupt.
1651 }
1652 
1653 
1654 // Consider:
1655 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
1656 // then instead of transferring a thread from the WaitSet to the EntryList
1657 // we might just dequeue a thread from the WaitSet and directly unpark() it.
1658 
1659 void ObjectMonitor::INotify(Thread * Self) {
1660   const int policy = Knob_MoveNotifyee;





1661 


1662   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1663   ObjectWaiter * iterator = DequeueWaiter();
1664   if (iterator != NULL) {
1665     TEVENT(Notify1 - Transfer);
1666     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1667     guarantee(iterator->_notified == 0, "invariant");
1668     // Disposition - what might we do with iterator ?
1669     // a.  add it directly to the EntryList - either tail or head.
1670     // b.  push it onto the front of the _cxq.
1671     // For now we use (a).
1672     if (policy != 4) {
1673       iterator->TState = ObjectWaiter::TS_ENTER;
1674     }
1675     iterator->_notified = 1;

1676     iterator->_notifier_tid = Self->osthread()->thread_id();
1677 
1678     ObjectWaiter * list = _EntryList;
1679     if (list != NULL) {
1680       assert(list->_prev == NULL, "invariant");
1681       assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1682       assert(list != iterator, "invariant");
1683     }
1684 
1685     if (policy == 0) {       // prepend to EntryList
1686       if (list == NULL) {
1687         iterator->_next = iterator->_prev = NULL;
1688         _EntryList = iterator;
1689       } else {
1690         list->_prev = iterator;
1691         iterator->_next = list;
1692         iterator->_prev = NULL;
1693         _EntryList = iterator;
1694       }
1695     } else if (policy == 1) {      // append to EntryList
1696       if (list == NULL) {
1697         iterator->_next = iterator->_prev = NULL;
1698         _EntryList = iterator;
1699       } else {
1700         // CONSIDER:  finding the tail currently requires a linear-time walk of
1701         // the EntryList.  We can make tail access constant-time by converting to
1702         // a CDLL instead of using our current DLL.
1703         ObjectWaiter * tail;
1704         for (tail = list; tail->_next != NULL; tail = tail->_next) /* empty */;
1705         assert(tail != NULL && tail->_next == NULL, "invariant");
1706         tail->_next = iterator;
1707         iterator->_prev = tail;
1708         iterator->_next = NULL;
1709       }
1710     } else if (policy == 2) {      // prepend to cxq
1711       if (list == NULL) {

1712         iterator->_next = iterator->_prev = NULL;
1713         _EntryList = iterator;
1714       } else {
1715         iterator->TState = ObjectWaiter::TS_CXQ;
1716         for (;;) {
1717           ObjectWaiter * front = _cxq;
1718           iterator->_next = front;
1719           if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
1720             break;
1721           }
1722         }
1723       }
1724     } else if (policy == 3) {      // append to cxq
1725       iterator->TState = ObjectWaiter::TS_CXQ;
1726       for (;;) {
1727         ObjectWaiter * tail = _cxq;
1728         if (tail == NULL) {

1729           iterator->_next = NULL;
1730           if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
1731             break;
1732           }
1733         } else {
1734           while (tail->_next != NULL) tail = tail->_next;
1735           tail->_next = iterator;
1736           iterator->_prev = tail;
1737           iterator->_next = NULL;
1738           break;
1739         }
1740       }
1741     } else {
1742       ParkEvent * ev = iterator->_event;
1743       iterator->TState = ObjectWaiter::TS_RUN;
1744       OrderAccess::fence();
1745       ev->unpark();
1746     }
1747 




1748     // _WaitSetLock protects the wait queue, not the EntryList.  We could
1749     // move the add-to-EntryList operation, above, outside the critical section
1750     // protected by _WaitSetLock.  In practice that's not useful.  With the
1751     // exception of  wait() timeouts and interrupts the monitor owner
1752     // is the only thread that grabs _WaitSetLock.  There's almost no contention
1753     // on _WaitSetLock so it's not profitable to reduce the length of the
1754     // critical section.

1755 
1756     if (policy < 4) {
1757       iterator->wait_reenter_begin(this);


1758     }
1759   }
1760   Thread::SpinRelease(&_WaitSetLock);
1761 }
1762 
1763 // Consider: a not-uncommon synchronization bug is to use notify() when
1764 // notifyAll() is more appropriate, potentially resulting in stranded
1765 // threads; this is one example of a lost wakeup. A useful diagnostic
1766 // option is to force all notify() operations to behave as notifyAll().
1767 //
1768 // Note: We can also detect many such problems with a "minimum wait".
1769 // When the "minimum wait" is set to a small non-zero timeout value
1770 // and the program does not hang whereas it did absent "minimum wait",
1771 // that suggests a lost wakeup bug. The '-XX:SyncFlags=1' option uses
1772 // a "minimum wait" for all park() operations; see the recheckInterval
1773 // variable and MAX_RECHECK_INTERVAL.
1774 
1775 void ObjectMonitor::notify(TRAPS) {
1776   CHECK_OWNER();

1777   if (_WaitSet == NULL) {
1778     TEVENT(Empty-Notify);
1779     return;
1780   }
1781   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1782   INotify(THREAD);
1783   if (ObjectMonitor::_sync_Notifications != NULL) {
1784     ObjectMonitor::_sync_Notifications->inc(1);




















1785   }
1786 }
1787 






1788 
1789 // The current implementation of notifyAll() transfers the waiters one-at-a-time
1790 // from the waitset to the EntryList. This could be done more efficiently with a
1791 // single bulk transfer but in practice it's not time-critical. Beware too,
1792 // that in prepend-mode we invert the order of the waiters. Let's say that the
1793 // waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1794 // mode the waitset will be empty and the EntryList will be "DCBAXYZ".





















































1795 
1796 void ObjectMonitor::notifyAll(TRAPS) {
1797   CHECK_OWNER();
1798   if (_WaitSet == NULL) {
1799     TEVENT(Empty-NotifyAll);
1800     return;
1801   }
1802 
1803   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1804   int tally = 0;
1805   while (_WaitSet != NULL) {
1806     tally++;
1807     INotify(THREAD);


1808   }
1809 
1810   if (tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
1811     ObjectMonitor::_sync_Notifications->inc(tally);


1812   }
1813 }
1814 
1815 // -----------------------------------------------------------------------------
1816 // Adaptive Spinning Support
1817 //
1818 // Adaptive spin-then-block - rational spinning
1819 //
1820 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1821 // algorithm.  On high order SMP systems it would be better to start with
1822 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1823 // a contending thread could enqueue itself on the cxq and then spin locally
1824 // on a thread-specific variable such as its ParkEvent._Event flag.
1825 // That's left as an exercise for the reader.  Note that global spinning is
1826 // not problematic on Niagara, as the L2 cache serves the interconnect and
1827 // has both low latency and massive bandwidth.
1828 //
1829 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
1830 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
1831 // (duration) or we can fix the count at approximately the duration of