< prev index next >

src/share/vm/runtime/objectMonitor.cpp

Print this page




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"

  27 #include "jfr/jfrEvents.hpp"
  28 #include "jfr/support/jfrThreadId.hpp"

  29 #include "memory/resourceArea.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.inline.hpp"
  33 #include "runtime/interfaceSupport.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/objectMonitor.hpp"
  36 #include "runtime/objectMonitor.inline.hpp"
  37 #include "runtime/orderAccess.inline.hpp"
  38 #include "runtime/osThread.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/thread.inline.hpp"
  41 #include "services/threadService.hpp"
  42 #include "utilities/dtrace.hpp"
  43 #include "utilities/macros.hpp"
  44 #include "utilities/preserveException.hpp"
  45 #ifdef TARGET_OS_FAMILY_linux
  46 # include "os_linux.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_solaris


 363      assert (_recursions == 0    , "invariant") ;
 364      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 365      Self->_Stalled = 0 ;
 366      return ;
 367   }
 368 
 369   assert (_owner != Self          , "invariant") ;
 370   assert (_succ  != Self          , "invariant") ;
 371   assert (Self->is_Java_thread()  , "invariant") ;
 372   JavaThread * jt = (JavaThread *) Self ;
 373   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 374   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 375   assert (this->object() != NULL  , "invariant") ;
 376   assert (_count >= 0, "invariant") ;
 377 
 378   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 379   // Ensure the object-monitor relationship remains stable while there's contention.
 380   Atomic::inc_ptr(&_count);
 381 
 382   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)

 383   EventJavaMonitorEnter event;
 384   if (event.should_commit()) {
 385     event.set_monitorClass(((oop)this->object())->klass());
 386     event.set_address((uintptr_t)(this->object_addr()));
 387   }

 388 
 389   { // Change java thread status to indicate blocked on monitor enter.
 390     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 391 
 392     Self->set_current_pending_monitor(this);
 393 
 394     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 395     if (JvmtiExport::should_post_monitor_contended_enter()) {
 396       JvmtiExport::post_monitor_contended_enter(jt, this);
 397 
 398       // The current thread does not yet own the monitor and does not
 399       // yet appear on any queues that would get it made the successor.
 400       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 401       // handler cannot accidentally consume an unpark() meant for the
 402       // ParkEvent associated with this ObjectMonitor.
 403     }
 404 
 405     OSThreadContendState osts(Self->osthread());
 406     ThreadBlockInVM tbivm(jt);
 407 


 455   // while we hold the monitor, increasing the length of the critical
 456   // section.  Amdahl's parallel speedup law comes vividly into play.
 457   //
 458   // Another option might be to aggregate the events (thread local or
 459   // per-monitor aggregation) and defer reporting until a more opportune
 460   // time -- such as next time some thread encounters contention but has
 461   // yet to acquire the lock.  While spinning that thread could
 462   // spinning we could increment JVMStat counters, etc.
 463 
 464   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 465   if (JvmtiExport::should_post_monitor_contended_entered()) {
 466     JvmtiExport::post_monitor_contended_entered(jt, this);
 467 
 468     // The current thread already owns the monitor and is not going to
 469     // call park() for the remainder of the monitor enter protocol. So
 470     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 471     // event handler consumed an unpark() issued by the thread that
 472     // just exited the monitor.
 473   }
 474 

 475   if (event.should_commit()) {
 476     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 477     event.commit();
 478   }

 479 
 480   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 481      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
 482   }
 483 }
 484 
 485 
 486 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 487 // Callers must compensate as needed.
 488 
 489 int ObjectMonitor::TryLock (Thread * Self) {
 490    for (;;) {
 491       void * own = _owner ;
 492       if (own != NULL) return 0 ;
 493       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 494          // Either guarantee _recursions == 0 or set _recursions = 0.
 495          assert (_recursions == 0, "invariant") ;
 496          assert (_owner == Self, "invariant") ;
 497          // CONSIDER: set or assert that OwnerIsThread == 1
 498          return 1 ;


1431         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
1432       }                                                                           \
1433     }                                                                             \
1434   } while (false)
1435 
1436 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1437 // TODO-FIXME: remove check_slow() -- it's likely dead.
1438 
1439 void ObjectMonitor::check_slow(TRAPS) {
1440   TEVENT (check_slow - throw IMSX) ;
1441   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1442   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1443 }
1444 
1445 static int Adjust (volatile int * adr, int dx) {
1446   int v ;
1447   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1448   return v ;
1449 }
1450 

1451 // helper method for posting a monitor wait event
1452 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1453                                     ObjectMonitor* monitor,
1454                                     jlong notifier_tid,
1455                                     jlong timeout,
1456                                     bool timedout) {
1457   assert(monitor != NULL, "invariant");
1458   event->set_monitorClass(((oop)monitor->object())->klass());
1459   event->set_timeout(timeout);
1460   event->set_address((uintptr_t)monitor->object_addr());
1461   event->set_notifier((u8)notifier_tid);
1462   event->set_timedOut(timedout);
1463   event->commit();
1464 }

1465 
1466 // -----------------------------------------------------------------------------
1467 // Wait/Notify/NotifyAll
1468 //
1469 // Note: a subset of changes to ObjectMonitor::wait()
1470 // will need to be replicated in complete_exit above
1471 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1472    Thread * const Self = THREAD ;
1473    assert(Self->is_Java_thread(), "Must be Java thread!");
1474    JavaThread *jt = (JavaThread *)THREAD;
1475 
1476    DeferredInitialize () ;
1477 
1478    // Throw IMSX or IEX.
1479    CHECK_OWNER();
1480 

1481    EventJavaMonitorWait event;

1482 
1483    // check for a pending interrupt
1484    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1485      // post monitor waited event.  Note that this is past-tense, we are done waiting.
1486      if (JvmtiExport::should_post_monitor_waited()) {
1487         // Note: 'false' parameter is passed here because the
1488         // wait was not timed out due to thread interrupt.
1489         JvmtiExport::post_monitor_waited(jt, this, false);
1490 
1491         // In this short circuit of the monitor wait protocol, the
1492         // current thread never drops ownership of the monitor and
1493         // never gets added to the wait queue so the current thread
1494         // cannot be made the successor. This means that the
1495         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1496         // consume an unpark() meant for the ParkEvent associated with
1497         // this ObjectMonitor.
1498      }

1499      if (event.should_commit()) {
1500        post_monitor_wait_event(&event, this, 0, millis, false);
1501      }

1502      TEVENT (Wait - Throw IEX) ;
1503      THROW(vmSymbols::java_lang_InterruptedException());
1504      return ;
1505    }
1506 
1507    TEVENT (Wait) ;
1508 
1509    assert (Self->_Stalled == 0, "invariant") ;
1510    Self->_Stalled = intptr_t(this) ;
1511    jt->set_current_waiting_monitor(this);
1512 
1513    // create a node to be put into the queue
1514    // Critically, after we reset() the event but prior to park(), we must check
1515    // for a pending interrupt.
1516    ObjectWaiter node(Self);
1517    node.TState = ObjectWaiter::TS_WAIT ;
1518    Self->_ParkEvent->reset() ;
1519    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1520 
1521    // Enter the waiting queue, which is a circular doubly linked list in this case


1623        if (node._notified != 0 && _succ == Self) {
1624          // In this part of the monitor wait-notify-reenter protocol it
1625          // is possible (and normal) for another thread to do a fastpath
1626          // monitor enter-exit while this thread is still trying to get
1627          // to the reenter portion of the protocol.
1628          //
1629          // The ObjectMonitor was notified and the current thread is
1630          // the successor which also means that an unpark() has already
1631          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1632          // consume the unpark() that was done when the successor was
1633          // set because the same ParkEvent is shared between Java
1634          // monitors and JVM/TI RawMonitors (for now).
1635          //
1636          // We redo the unpark() to ensure forward progress, i.e., we
1637          // don't want all pending threads hanging (parked) with none
1638          // entering the unlocked monitor.
1639          node._event->unpark();
1640        }
1641      }
1642 

1643      if (event.should_commit()) {
1644        post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1645      }

1646 
1647      OrderAccess::fence() ;
1648 
1649      assert (Self->_Stalled != 0, "invariant") ;
1650      Self->_Stalled = 0 ;
1651 
1652      assert (_owner != Self, "invariant") ;
1653      ObjectWaiter::TStates v = node.TState ;
1654      if (v == ObjectWaiter::TS_RUN) {
1655          enter (Self) ;
1656      } else {
1657          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1658          ReenterI (Self, &node) ;
1659          node.wait_reenter_end(this);
1660      }
1661 
1662      // Self has reacquired the lock.
1663      // Lifecycle - the node representing Self must not appear on any queues.
1664      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1665      // want residual elements associated with this thread left on any lists.


1707   CHECK_OWNER();
1708   if (_WaitSet == NULL) {
1709      TEVENT (Empty-Notify) ;
1710      return ;
1711   }
1712   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1713 
1714   int Policy = Knob_MoveNotifyee ;
1715 
1716   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1717   ObjectWaiter * iterator = DequeueWaiter() ;
1718   if (iterator != NULL) {
1719      TEVENT (Notify1 - Transfer) ;
1720      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1721      guarantee (iterator->_notified == 0, "invariant") ;
1722      if (Policy != 4) {
1723         iterator->TState = ObjectWaiter::TS_ENTER ;
1724      }
1725      iterator->_notified = 1 ;
1726      Thread * Self = THREAD;

1727      iterator->_notifier_tid = JFR_THREAD_ID(Self);

1728 
1729      ObjectWaiter * List = _EntryList ;
1730      if (List != NULL) {
1731         assert (List->_prev == NULL, "invariant") ;
1732         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1733         assert (List != iterator, "invariant") ;
1734      }
1735 
1736      if (Policy == 0) {       // prepend to EntryList
1737          if (List == NULL) {
1738              iterator->_next = iterator->_prev = NULL ;
1739              _EntryList = iterator ;
1740          } else {
1741              List->_prev = iterator ;
1742              iterator->_next = List ;
1743              iterator->_prev = NULL ;
1744              _EntryList = iterator ;
1745         }
1746      } else
1747      if (Policy == 1) {      // append to EntryList


1833 
1834   int Policy = Knob_MoveNotifyee ;
1835   int Tally = 0 ;
1836   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1837 
1838   for (;;) {
1839      iterator = DequeueWaiter () ;
1840      if (iterator == NULL) break ;
1841      TEVENT (NotifyAll - Transfer1) ;
1842      ++Tally ;
1843 
1844      // Disposition - what might we do with iterator ?
1845      // a.  add it directly to the EntryList - either tail or head.
1846      // b.  push it onto the front of the _cxq.
1847      // For now we use (a).
1848 
1849      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1850      guarantee (iterator->_notified == 0, "invariant") ;
1851      iterator->_notified = 1 ;
1852      Thread * Self = THREAD;

1853      iterator->_notifier_tid = JFR_THREAD_ID(Self);

1854      if (Policy != 4) {
1855         iterator->TState = ObjectWaiter::TS_ENTER ;
1856      }
1857 
1858      ObjectWaiter * List = _EntryList ;
1859      if (List != NULL) {
1860         assert (List->_prev == NULL, "invariant") ;
1861         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1862         assert (List != iterator, "invariant") ;
1863      }
1864 
1865      if (Policy == 0) {       // prepend to EntryList
1866          if (List == NULL) {
1867              iterator->_next = iterator->_prev = NULL ;
1868              _EntryList = iterator ;
1869          } else {
1870              List->_prev = iterator ;
1871              iterator->_next = List ;
1872              iterator->_prev = NULL ;
1873              _EntryList = iterator ;




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #if INCLUDE_JFR
  28 #include "jfr/jfrEvents.hpp"
  29 #include "jfr/support/jfrThreadId.hpp"
  30 #endif
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/markOop.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 #include "runtime/interfaceSupport.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/objectMonitor.hpp"
  38 #include "runtime/objectMonitor.inline.hpp"
  39 #include "runtime/orderAccess.inline.hpp"
  40 #include "runtime/osThread.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/threadService.hpp"
  44 #include "utilities/dtrace.hpp"
  45 #include "utilities/macros.hpp"
  46 #include "utilities/preserveException.hpp"
  47 #ifdef TARGET_OS_FAMILY_linux
  48 # include "os_linux.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_FAMILY_solaris


 365      assert (_recursions == 0    , "invariant") ;
 366      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 367      Self->_Stalled = 0 ;
 368      return ;
 369   }
 370 
 371   assert (_owner != Self          , "invariant") ;
 372   assert (_succ  != Self          , "invariant") ;
 373   assert (Self->is_Java_thread()  , "invariant") ;
 374   JavaThread * jt = (JavaThread *) Self ;
 375   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 376   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 377   assert (this->object() != NULL  , "invariant") ;
 378   assert (_count >= 0, "invariant") ;
 379 
 380   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 381   // Ensure the object-monitor relationship remains stable while there's contention.
 382   Atomic::inc_ptr(&_count);
 383 
 384   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
 385 #if INCLUDE_JFR
 386   EventJavaMonitorEnter event;
 387   if (event.should_commit()) {
 388     event.set_monitorClass(((oop)this->object())->klass());
 389     event.set_address((uintptr_t)(this->object_addr()));
 390   }
 391 #endif
 392 
 393   { // Change java thread status to indicate blocked on monitor enter.
 394     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 395 
 396     Self->set_current_pending_monitor(this);
 397 
 398     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 399     if (JvmtiExport::should_post_monitor_contended_enter()) {
 400       JvmtiExport::post_monitor_contended_enter(jt, this);
 401 
 402       // The current thread does not yet own the monitor and does not
 403       // yet appear on any queues that would get it made the successor.
 404       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
 405       // handler cannot accidentally consume an unpark() meant for the
 406       // ParkEvent associated with this ObjectMonitor.
 407     }
 408 
 409     OSThreadContendState osts(Self->osthread());
 410     ThreadBlockInVM tbivm(jt);
 411 


 459   // while we hold the monitor, increasing the length of the critical
 460   // section.  Amdahl's parallel speedup law comes vividly into play.
 461   //
 462   // Another option might be to aggregate the events (thread local or
 463   // per-monitor aggregation) and defer reporting until a more opportune
 464   // time -- such as next time some thread encounters contention but has
 465   // yet to acquire the lock.  While spinning that thread could
 466   // spinning we could increment JVMStat counters, etc.
 467 
 468   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 469   if (JvmtiExport::should_post_monitor_contended_entered()) {
 470     JvmtiExport::post_monitor_contended_entered(jt, this);
 471 
 472     // The current thread already owns the monitor and is not going to
 473     // call park() for the remainder of the monitor enter protocol. So
 474     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 475     // event handler consumed an unpark() issued by the thread that
 476     // just exited the monitor.
 477   }
 478 
 479 #if INCLUDE_JFR
 480   if (event.should_commit()) {
 481     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 482     event.commit();
 483   }
 484 #endif
 485 
 486   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
 487      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
 488   }
 489 }
 490 
 491 
 492 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 493 // Callers must compensate as needed.
 494 
 495 int ObjectMonitor::TryLock (Thread * Self) {
 496    for (;;) {
 497       void * own = _owner ;
 498       if (own != NULL) return 0 ;
 499       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 500          // Either guarantee _recursions == 0 or set _recursions = 0.
 501          assert (_recursions == 0, "invariant") ;
 502          assert (_owner == Self, "invariant") ;
 503          // CONSIDER: set or assert that OwnerIsThread == 1
 504          return 1 ;


1437         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
1438       }                                                                           \
1439     }                                                                             \
1440   } while (false)
1441 
1442 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1443 // TODO-FIXME: remove check_slow() -- it's likely dead.
1444 
1445 void ObjectMonitor::check_slow(TRAPS) {
1446   TEVENT (check_slow - throw IMSX) ;
1447   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1448   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1449 }
1450 
1451 static int Adjust (volatile int * adr, int dx) {
1452   int v ;
1453   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1454   return v ;
1455 }
1456 
1457 #if INCLUDE_JFR
1458 // helper method for posting a monitor wait event
1459 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1460                                     ObjectMonitor* monitor,
1461                                     jlong notifier_tid,
1462                                     jlong timeout,
1463                                     bool timedout) {
1464   assert(monitor != NULL, "invariant");
1465   event->set_monitorClass(((oop)monitor->object())->klass());
1466   event->set_timeout(timeout);
1467   event->set_address((uintptr_t)monitor->object_addr());
1468   event->set_notifier((u8)notifier_tid);
1469   event->set_timedOut(timedout);
1470   event->commit();
1471 }
1472 #endif
1473 
1474 // -----------------------------------------------------------------------------
1475 // Wait/Notify/NotifyAll
1476 //
1477 // Note: a subset of changes to ObjectMonitor::wait()
1478 // will need to be replicated in complete_exit above
1479 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1480    Thread * const Self = THREAD ;
1481    assert(Self->is_Java_thread(), "Must be Java thread!");
1482    JavaThread *jt = (JavaThread *)THREAD;
1483 
1484    DeferredInitialize () ;
1485 
1486    // Throw IMSX or IEX.
1487    CHECK_OWNER();
1488 
1489 #if INCLUDE_JFR
1490    EventJavaMonitorWait event;
1491 #endif
1492 
1493    // check for a pending interrupt
1494    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1495      // post monitor waited event.  Note that this is past-tense, we are done waiting.
1496      if (JvmtiExport::should_post_monitor_waited()) {
1497         // Note: 'false' parameter is passed here because the
1498         // wait was not timed out due to thread interrupt.
1499         JvmtiExport::post_monitor_waited(jt, this, false);
1500 
1501         // In this short circuit of the monitor wait protocol, the
1502         // current thread never drops ownership of the monitor and
1503         // never gets added to the wait queue so the current thread
1504         // cannot be made the successor. This means that the
1505         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1506         // consume an unpark() meant for the ParkEvent associated with
1507         // this ObjectMonitor.
1508      }
1509 #if INCLUDE_JFR
1510      if (event.should_commit()) {
1511        post_monitor_wait_event(&event, this, 0, millis, false);
1512      }
1513 #endif
1514      TEVENT (Wait - Throw IEX) ;
1515      THROW(vmSymbols::java_lang_InterruptedException());
1516      return ;
1517    }
1518 
1519    TEVENT (Wait) ;
1520 
1521    assert (Self->_Stalled == 0, "invariant") ;
1522    Self->_Stalled = intptr_t(this) ;
1523    jt->set_current_waiting_monitor(this);
1524 
1525    // create a node to be put into the queue
1526    // Critically, after we reset() the event but prior to park(), we must check
1527    // for a pending interrupt.
1528    ObjectWaiter node(Self);
1529    node.TState = ObjectWaiter::TS_WAIT ;
1530    Self->_ParkEvent->reset() ;
1531    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1532 
1533    // Enter the waiting queue, which is a circular doubly linked list in this case


1635        if (node._notified != 0 && _succ == Self) {
1636          // In this part of the monitor wait-notify-reenter protocol it
1637          // is possible (and normal) for another thread to do a fastpath
1638          // monitor enter-exit while this thread is still trying to get
1639          // to the reenter portion of the protocol.
1640          //
1641          // The ObjectMonitor was notified and the current thread is
1642          // the successor which also means that an unpark() has already
1643          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1644          // consume the unpark() that was done when the successor was
1645          // set because the same ParkEvent is shared between Java
1646          // monitors and JVM/TI RawMonitors (for now).
1647          //
1648          // We redo the unpark() to ensure forward progress, i.e., we
1649          // don't want all pending threads hanging (parked) with none
1650          // entering the unlocked monitor.
1651          node._event->unpark();
1652        }
1653      }
1654 
1655 #if INCLUDE_JFR
1656      if (event.should_commit()) {
1657        post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1658      }
1659 #endif
1660 
1661      OrderAccess::fence() ;
1662 
1663      assert (Self->_Stalled != 0, "invariant") ;
1664      Self->_Stalled = 0 ;
1665 
1666      assert (_owner != Self, "invariant") ;
1667      ObjectWaiter::TStates v = node.TState ;
1668      if (v == ObjectWaiter::TS_RUN) {
1669          enter (Self) ;
1670      } else {
1671          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
1672          ReenterI (Self, &node) ;
1673          node.wait_reenter_end(this);
1674      }
1675 
1676      // Self has reacquired the lock.
1677      // Lifecycle - the node representing Self must not appear on any queues.
1678      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1679      // want residual elements associated with this thread left on any lists.


1721   CHECK_OWNER();
1722   if (_WaitSet == NULL) {
1723      TEVENT (Empty-Notify) ;
1724      return ;
1725   }
1726   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1727 
1728   int Policy = Knob_MoveNotifyee ;
1729 
1730   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1731   ObjectWaiter * iterator = DequeueWaiter() ;
1732   if (iterator != NULL) {
1733      TEVENT (Notify1 - Transfer) ;
1734      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1735      guarantee (iterator->_notified == 0, "invariant") ;
1736      if (Policy != 4) {
1737         iterator->TState = ObjectWaiter::TS_ENTER ;
1738      }
1739      iterator->_notified = 1 ;
1740      Thread * Self = THREAD;
1741 #if INCLUDE_JFR
1742      iterator->_notifier_tid = JFR_THREAD_ID(Self);
1743 #endif
1744 
1745      ObjectWaiter * List = _EntryList ;
1746      if (List != NULL) {
1747         assert (List->_prev == NULL, "invariant") ;
1748         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1749         assert (List != iterator, "invariant") ;
1750      }
1751 
1752      if (Policy == 0) {       // prepend to EntryList
1753          if (List == NULL) {
1754              iterator->_next = iterator->_prev = NULL ;
1755              _EntryList = iterator ;
1756          } else {
1757              List->_prev = iterator ;
1758              iterator->_next = List ;
1759              iterator->_prev = NULL ;
1760              _EntryList = iterator ;
1761         }
1762      } else
1763      if (Policy == 1) {      // append to EntryList


1849 
1850   int Policy = Knob_MoveNotifyee ;
1851   int Tally = 0 ;
1852   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1853 
1854   for (;;) {
1855      iterator = DequeueWaiter () ;
1856      if (iterator == NULL) break ;
1857      TEVENT (NotifyAll - Transfer1) ;
1858      ++Tally ;
1859 
1860      // Disposition - what might we do with iterator ?
1861      // a.  add it directly to the EntryList - either tail or head.
1862      // b.  push it onto the front of the _cxq.
1863      // For now we use (a).
1864 
1865      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1866      guarantee (iterator->_notified == 0, "invariant") ;
1867      iterator->_notified = 1 ;
1868      Thread * Self = THREAD;
1869 #if INCLUDE_JFR
1870      iterator->_notifier_tid = JFR_THREAD_ID(Self);
1871 #endif
1872      if (Policy != 4) {
1873         iterator->TState = ObjectWaiter::TS_ENTER ;
1874      }
1875 
1876      ObjectWaiter * List = _EntryList ;
1877      if (List != NULL) {
1878         assert (List->_prev == NULL, "invariant") ;
1879         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1880         assert (List != iterator, "invariant") ;
1881      }
1882 
1883      if (Policy == 0) {       // prepend to EntryList
1884          if (List == NULL) {
1885              iterator->_next = iterator->_prev = NULL ;
1886              _EntryList = iterator ;
1887          } else {
1888              List->_prev = iterator ;
1889              iterator->_next = List ;
1890              iterator->_prev = NULL ;
1891              _EntryList = iterator ;


< prev index next >